hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject [3/5] hbase git commit: HBASE-14475 Region split requests are always audited with hbase user rather than request user (Ted Yu)
Date Thu, 01 Oct 2015 22:13:04 GMT
HBASE-14475 Region split requests are always audited with hbase user rather than request user
(Ted Yu)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1ae5c045
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1ae5c045
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1ae5c045

Branch: refs/heads/0.98
Commit: 1ae5c0450bd84e3e593e3451e2642c5ba03369ee
Parents: dd0a601
Author: Andrew Purtell <apurtell@apache.org>
Authored: Thu Oct 1 12:10:10 2015 -0700
Committer: Andrew Purtell <apurtell@apache.org>
Committed: Thu Oct 1 12:10:10 2015 -0700

----------------------------------------------------------------------
 .../hbase/regionserver/CompactSplitThread.java  | 75 ++++++++++++++------
 .../hbase/regionserver/CompactionRequestor.java |  8 ++-
 .../hbase/regionserver/HRegionServer.java       |  8 +--
 .../hadoop/hbase/regionserver/SplitRequest.java | 39 +++++++---
 .../hbase/regionserver/TestCompaction.java      |  4 +-
 5 files changed, 96 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/1ae5c045/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
index 6596aac..2a155dd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.StringWriter;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
@@ -40,6 +41,7 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputController;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputControllerFactory;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -225,6 +227,13 @@ public class CompactSplitThread implements CompactionRequestor {
   }
 
   public synchronized void requestSplit(final HRegion r, byte[] midKey) {
+    requestSplit(r, midKey, null);
+  }
+
+  /*
+   * The User parameter allows the split thread to assume the correct user identity
+   */
+  public synchronized void requestSplit(final HRegion r, byte[] midKey, User user) {
     if (midKey == null) {
       LOG.debug("Region " + r.getRegionNameAsString() +
         " not splittable because midkey=null");
@@ -234,7 +243,7 @@ public class CompactSplitThread implements CompactionRequestor {
       return;
     }
     try {
-      this.splits.execute(new SplitRequest(r, midKey, this.server));
+      this.splits.execute(new SplitRequest(r, midKey, this.server, user));
       if (LOG.isDebugEnabled()) {
         LOG.debug("Split requested for " + r + ".  " + this);
       }
@@ -252,54 +261,55 @@ public class CompactSplitThread implements CompactionRequestor {
   @Override
   public synchronized List<CompactionRequest> requestCompaction(final HRegion r, final
String why,
       List<Pair<CompactionRequest, Store>> requests) throws IOException {
-    return requestCompaction(r, why, Store.NO_PRIORITY, requests);
+    return requestCompaction(r, why, Store.NO_PRIORITY, requests, null);
   }
 
   @Override
   public synchronized CompactionRequest requestCompaction(final HRegion r, final Store s,
       final String why, CompactionRequest request) throws IOException {
-    return requestCompaction(r, s, why, Store.NO_PRIORITY, request);
+    return requestCompaction(r, s, why, Store.NO_PRIORITY, request, null);
   }
 
   @Override
   public synchronized List<CompactionRequest> requestCompaction(final HRegion r, final
String why,
-      int p, List<Pair<CompactionRequest, Store>> requests) throws IOException
{
-    return requestCompactionInternal(r, why, p, requests, true);
+      int p, List<Pair<CompactionRequest, Store>> requests, User user) throws
IOException {
+    return requestCompactionInternal(r, why, p, requests, true, user);
   }
 
   private List<CompactionRequest> requestCompactionInternal(final HRegion r, final
String why,
-      int p, List<Pair<CompactionRequest, Store>> requests, boolean selectNow)
throws IOException {
+      int p, List<Pair<CompactionRequest, Store>> requests, boolean selectNow,
User user)
+          throws IOException {
     // not a special compaction request, so make our own list
     List<CompactionRequest> ret = null;
     if (requests == null) {
       ret = selectNow ? new ArrayList<CompactionRequest>(r.getStores().size()) : null;
       for (Store s : r.getStores().values()) {
-        CompactionRequest cr = requestCompactionInternal(r, s, why, p, null, selectNow);
+        CompactionRequest cr = requestCompactionInternal(r, s, why, p, null, selectNow, user);
         if (selectNow) ret.add(cr);
       }
     } else {
       Preconditions.checkArgument(selectNow); // only system requests have selectNow == false
       ret = new ArrayList<CompactionRequest>(requests.size());
       for (Pair<CompactionRequest, Store> pair : requests) {
-        ret.add(requestCompaction(r, pair.getSecond(), why, p, pair.getFirst()));
+        ret.add(requestCompaction(r, pair.getSecond(), why, p, pair.getFirst(), user));
       }
     }
     return ret;
   }
 
   public CompactionRequest requestCompaction(final HRegion r, final Store s,
-      final String why, int priority, CompactionRequest request) throws IOException {
-    return requestCompactionInternal(r, s, why, priority, request, true);
+      final String why, int priority, CompactionRequest request, User user) throws IOException
{
+    return requestCompactionInternal(r, s, why, priority, request, true, user);
   }
 
   public synchronized void requestSystemCompaction(
       final HRegion r, final String why) throws IOException {
-    requestCompactionInternal(r, why, Store.NO_PRIORITY, null, false);
+    requestCompactionInternal(r, why, Store.NO_PRIORITY, null, false, null);
   }
 
   public void requestSystemCompaction(
       final HRegion r, final Store s, final String why) throws IOException {
-    requestCompactionInternal(r, s, why, Store.NO_PRIORITY, null, false);
+    requestCompactionInternal(r, s, why, Store.NO_PRIORITY, null, false, null);
   }
 
   /**
@@ -311,7 +321,7 @@ public class CompactSplitThread implements CompactionRequestor {
    *          compaction will be used.
    */
   private synchronized CompactionRequest requestCompactionInternal(final HRegion r, final
Store s,
-      final String why, int priority, CompactionRequest request, boolean selectNow)
+      final String why, int priority, CompactionRequest request, boolean selectNow, User
user)
           throws IOException {
     if (this.server.isStopped()
         || (r.getTableDesc() != null && !r.getTableDesc().isCompactionEnabled()))
{
@@ -328,7 +338,7 @@ public class CompactSplitThread implements CompactionRequestor {
     // pool; we will do selection there, and move to large pool if necessary.
     ThreadPoolExecutor pool = (selectNow && s.throttleCompaction(compaction.getRequest().getSize()))
       ? largeCompactions : smallCompactions;
-    pool.execute(new CompactionRunner(s, r, compaction, pool));
+    pool.execute(new CompactionRunner(s, r, compaction, pool, user));
     if (LOG.isDebugEnabled()) {
       String type = (pool == smallCompactions) ? "Small " : "Large ";
       LOG.debug(type + "Compaction requested: " + (selectNow ? compaction.toString() : "system")
@@ -430,9 +440,10 @@ public class CompactSplitThread implements CompactionRequestor {
     private CompactionContext compaction;
     private int queuedPriority;
     private ThreadPoolExecutor parent;
+    private User user;
 
     public CompactionRunner(Store store, HRegion region,
-        CompactionContext compaction, ThreadPoolExecutor parent) {
+        CompactionContext compaction, ThreadPoolExecutor parent, User user) {
       super();
       this.store = store;
       this.region = region;
@@ -440,6 +451,7 @@ public class CompactSplitThread implements CompactionRequestor {
       this.queuedPriority = (this.compaction == null)
           ? store.getCompactPriority() : compaction.getRequest().getPriority();
       this.parent = parent;
+      this.user = user;
     }
 
     @Override
@@ -448,13 +460,7 @@ public class CompactSplitThread implements CompactionRequestor {
           : ("Store = " + store.toString() + ", pri = " + queuedPriority);
     }
 
-    @Override
-    public void run() {
-      Preconditions.checkNotNull(server);
-      if (server.isStopped()
-          || (region.getTableDesc() != null && !region.getTableDesc().isCompactionEnabled()))
{
-        return;
-      }
+    private void doCompaction() {
       // Common case - system compaction without a file selection. Select now.
       if (this.compaction == null) {
         int oldPriority = this.queuedPriority;
@@ -523,6 +529,31 @@ public class CompactSplitThread implements CompactionRequestor {
       this.compaction.getRequest().afterExecute();
     }
 
+    @Override
+    public void run() {
+      Preconditions.checkNotNull(server);
+      if (server.isStopped()
+          || (region.getTableDesc() != null && !region.getTableDesc().isCompactionEnabled()))
{
+        return;
+      }
+      if (this.user == null) doCompaction();
+      else {
+        try {
+          user.getUGI().doAs(new PrivilegedExceptionAction<Void>() {
+            @Override
+            public Void run() throws Exception {
+              doCompaction();
+              return null;
+            }
+          });
+        } catch (InterruptedException ie) {
+          Thread.currentThread().interrupt();
+        } catch (IOException ioe) {
+          LOG.error("Encountered exception while compacting", ioe);
+        }
+      }
+    }
+
     private String formatStackTrace(Exception ex) {
       StringWriter sw = new StringWriter();
       PrintWriter pw = new PrintWriter(sw);

http://git-wip-us.apache.org/repos/asf/hbase/blob/1ae5c045/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
index 93a73e9..1fa4b8e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
@@ -23,6 +23,7 @@ import java.util.List;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Pair;
 
 @InterfaceAudience.Private
@@ -72,12 +73,14 @@ public interface CompactionRequestor {
    * @param requests custom compaction requests. Each compaction must specify the store on
which it
    *          is acting. Can be <tt>null</tt> in which case a compaction will
be attempted on all
    *          stores for the region.
+   * @user  the effective user
    * @return The created {@link CompactionRequest CompactionRequests} or an empty list if
no
    *         compactions were started.
    * @throws IOException
    */
   List<CompactionRequest> requestCompaction(
-    final HRegion r, final String why, int pri, List<Pair<CompactionRequest, Store>>
requests
+    final HRegion r, final String why, int pri, List<Pair<CompactionRequest, Store>>
requests,
+    User user
   ) throws IOException;
 
   /**
@@ -87,10 +90,11 @@ public interface CompactionRequestor {
    * @param pri Priority of this compaction. minHeap. <=0 is critical
    * @param request custom compaction request to run. {@link Store} and {@link HRegion} for
the
    *          request must match the region and store specified here.
+   * @param user
    * @return The created {@link CompactionRequest} or <tt>null</tt> if no compaction
was started
    * @throws IOException
    */
   CompactionRequest requestCompaction(
-    final HRegion r, final Store s, final String why, int pri, CompactionRequest request
+    final HRegion r, final Store s, final String why, int pri, CompactionRequest request,
User user
   ) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1ae5c045/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 95aa124..fe006ef 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1534,7 +1534,7 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
               } else {
                 this.instance.compactSplitThread.requestCompaction(r, s, getName()
                     + " requests major compaction; use configured priority",
-                  this.majorCompactPriority, null);
+                  this.majorCompactPriority, null, null);
               }
             }
           } catch (IOException e) {
@@ -4161,7 +4161,7 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
         splitPoint = request.getSplitPoint().toByteArray();
       }
       region.forceSplit(splitPoint);
-      compactSplitThread.requestSplit(region, region.checkSplit());
+      compactSplitThread.requestSplit(region, region.checkSplit(), RpcServer.getRequestUser());
       return SplitRegionResponse.newBuilder().build();
     } catch (DroppedSnapshotException ex) {
       abort("Replay of WAL required. Forcing server shutdown", ex);
@@ -4261,10 +4261,10 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
       String log = "User-triggered " + (major ? "major " : "") + "compaction" + familyLogMsg;
       if(family != null) {
         compactSplitThread.requestCompaction(region, store, log,
-          Store.PRIORITY_USER, null);
+          Store.PRIORITY_USER, null, RpcServer.getRequestUser());
       } else {
         compactSplitThread.requestCompaction(region, log,
-          Store.PRIORITY_USER, null);
+          Store.PRIORITY_USER, null, RpcServer.getRequestUser());
       }
       return CompactRegionResponse.newBuilder().build();
     } catch (IOException ie) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/1ae5c045/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
index cd3787d..9ec5f60 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -26,6 +27,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.DroppedSnapshotException;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Strings;
@@ -42,13 +44,15 @@ class SplitRequest implements Runnable {
   private final HRegion parent;
   private final byte[] midKey;
   private final HRegionServer server;
+  private final User user;
   private TableLock tableLock;
 
-  SplitRequest(HRegion region, byte[] midKey, HRegionServer hrs) {
+  SplitRequest(HRegion region, byte[] midKey, HRegionServer hrs, User user) {
     Preconditions.checkNotNull(hrs);
     this.parent = region;
     this.midKey = midKey;
     this.server = hrs;
+    this.user = user;
   }
 
   @Override
@@ -56,13 +60,7 @@ class SplitRequest implements Runnable {
     return "regionName=" + parent + ", midKey=" + Bytes.toStringBinary(midKey);
   }
 
-  @Override
-  public void run() {
-    if (this.server.isStopping() || this.server.isStopped()) {
-      LOG.debug("Skipping split because server is stopping=" +
-        this.server.isStopping() + " or stopped=" + this.server.isStopped());
-      return;
-    }
+  private void doSplitting() {
     boolean success = false;
     server.getMetrics().incrSplitRequest();
     long startTime = EnvironmentEdgeManager.currentTimeMillis();
@@ -148,6 +146,31 @@ class SplitRequest implements Runnable {
     }
   }
 
+  @Override
+  public void run() {
+    if (this.server.isStopping() || this.server.isStopped()) {
+      LOG.debug("Skipping split because server is stopping=" +
+        this.server.isStopping() + " or stopped=" + this.server.isStopped());
+      return;
+    }
+    if (this.user == null) doSplitting();
+    else {
+      try {
+        user.getUGI().doAs(new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws Exception {
+            doSplitting();
+            return null;
+          }
+        });
+      } catch (InterruptedException ie) {
+        Thread.currentThread().interrupt();
+      } catch (IOException ioe) {
+        LOG.error("Encountered exception while splitting", ioe);
+      }
+    }
+  }
+
   protected void releaseTableLock() {
     if (this.tableLock != null) {
       try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/1ae5c045/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
index 6bd227f..d19e428 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
@@ -289,7 +289,7 @@ public class TestCompaction {
 
     CountDownLatch latch = new CountDownLatch(1);
     TrackableCompactionRequest request = new TrackableCompactionRequest(latch);
-    thread.requestCompaction(r, store, "test custom comapction", Store.PRIORITY_USER, request);
+    thread.requestCompaction(r, store, "test custom comapction", Store.PRIORITY_USER, request,null);
     // wait for the latch to complete.
     latch.await();
 
@@ -325,7 +325,7 @@ public class TestCompaction {
     }
 
     thread.requestCompaction(r, "test mulitple custom comapctions", Store.PRIORITY_USER,
-      Collections.unmodifiableList(requests));
+      Collections.unmodifiableList(requests), null);
 
     // wait for the latch to complete.
     latch.await();


Mime
View raw message