hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jmhs...@apache.org
Subject svn commit: r1375229 - in /hbase/branches/0.92: CHANGES.txt src/main/java/org/apache/hadoop/hbase/util/FSUtils.java src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
Date Mon, 20 Aug 2012 21:30:42 GMT
Author: jmhsieh
Date: Mon Aug 20 21:30:41 2012
New Revision: 1375229

URL: http://svn.apache.org/viewvc?rev=1375229&view=rev
Log:
HBASE-5714 Add write permissions check before any hbck run that modifies hdfs (Liang Xie)

Modified:
    hbase/branches/0.92/CHANGES.txt
    hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
    hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java

Modified: hbase/branches/0.92/CHANGES.txt
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/CHANGES.txt?rev=1375229&r1=1375228&r2=1375229&view=diff
==============================================================================
--- hbase/branches/0.92/CHANGES.txt (original)
+++ hbase/branches/0.92/CHANGES.txt Mon Aug 20 21:30:41 2012
@@ -130,6 +130,7 @@ Release 0.92.2 - Unreleased
    HBASE-6173  hbck check specified tables only
    HBASE-5360  [uberhbck] Add options for how to handle offline split parents. 
    HBASE-6283  [region_mover.rb] Add option to exclude list of hosts on unload instead of
just assuming the source node
+   HBASE-5714  Add write permissions check before any hbck run that modifies hdfs (Liang
Xie)
 
   NEW FEATURE
    HBASE-5128  [uber hbck] Online automated repair of table integrity and region consistency
problems

Modified: hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java?rev=1375229&r1=1375228&r2=1375229&view=diff
==============================================================================
--- hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (original)
+++ hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java Mon Aug 20
21:30:41 2012
@@ -40,6 +40,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -48,6 +50,8 @@ import org.apache.hadoop.hbase.master.HM
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 
@@ -852,6 +856,42 @@ public abstract class FSUtils {
   }
 
   /**
+   * Throw an exception if an action is not permitted by a user on a file.
+   * 
+   * @param ugi
+   *          the user
+   * @param file
+   *          the file
+   * @param action
+   *          the action
+   */
+  public static void checkAccess(UserGroupInformation ugi, FileStatus file,
+      FsAction action) throws AccessControlException {
+    if (ugi.getUserName().equals(file.getOwner())) {
+      if (file.getPermission().getUserAction().implies(action)) {
+        return;
+      }
+    } else if (contains(ugi.getGroupNames(), file.getGroup())) {
+      if (file.getPermission().getGroupAction().implies(action)) {
+        return;
+      }
+    } else if (file.getPermission().getOtherAction().implies(action)) {
+      return;
+    }
+    throw new AccessControlException("Permission denied:" + " action=" + action
+        + " path=" + file.getPath() + " user=" + ugi.getUserName());
+  }
+
+  private static boolean contains(String[] groups, String user) {
+    for (String group : groups) {
+      if (group.equals(user)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
    * @param conf
    * @return Returns the filesystem of the hbase rootdir.
    * @throws IOException

Modified: hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java?rev=1375229&r1=1375228&r2=1375229&view=diff
==============================================================================
--- hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java (original)
+++ hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java Mon Aug
20 21:30:41 2012
@@ -47,6 +47,7 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -77,12 +78,15 @@ import org.apache.hadoop.hbase.ipc.HRegi
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
 import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
 import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
 import org.apache.hadoop.hbase.zookeeper.RootRegionTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKTable;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.zookeeper.KeeperException;
 
 import com.google.common.base.Joiner;
@@ -180,6 +184,7 @@ public class HBaseFsck {
   private boolean rerun = false; // if we tried to fix something, rerun hbck
   private static boolean summary = false; // if we want to print less output
   private boolean checkMetaOnly = false;
+  private boolean ignorePreCheckPermission = false; // if pre-check permission
 
   /*********
    * State
@@ -1186,6 +1191,27 @@ public class HBaseFsck {
     }
   }
 
+  private void preCheckPermission() throws IOException, AccessControlException {
+    if (shouldIgnorePreCheckPermission()) {
+      return;
+    }
+
+    Path hbaseDir = new Path(conf.get(HConstants.HBASE_DIR));
+    FileSystem fs = hbaseDir.getFileSystem(conf);
+    UserGroupInformation ugi = User.getCurrent().getUGI();
+    FileStatus[] files = fs.listStatus(hbaseDir);
+    for (FileStatus file : files) {
+      try {
+        FSUtils.checkAccess(ugi, file, FsAction.WRITE);
+      } catch (AccessControlException ace) {
+        LOG.warn("Got AccessControlException when preCheckPermission ", ace);
+        System.err.println("Current user " + ugi.getUserName() + " does not have write perms
to " + file.getPath()
+            + ". Please rerun hbck as hdfs user " + file.getOwner());
+        throw new AccessControlException(ace);
+      }
+    }
+  }
+
   /**
    * Deletes region from meta table
    */
@@ -3009,6 +3035,14 @@ public class HBaseFsck {
     return fixSplitParents;
   }
 
+  public boolean shouldIgnorePreCheckPermission() {
+    return ignorePreCheckPermission;
+  }
+
+  public void setIgnorePreCheckPermission(boolean ignorePreCheckPermission) {
+    this.ignorePreCheckPermission = ignorePreCheckPermission;
+  }
+
   /**
    * @param mm maximum number of regions to merge into a single region.
    */
@@ -3074,6 +3108,7 @@ public class HBaseFsck {
     System.err.println("   -sidelineBigOverlaps  When fixing region overlaps, allow to sideline
big overlaps");
     System.err.println("   -maxOverlapsToSideline <n>  When fixing region overlaps,
allow at most <n> regions to sideline per group. (n=" + DEFAULT_OVERLAPS_TO_SIDELINE
+" by default)");
     System.err.println("   -fixSplitParents  Try to force offline split parents to be online.");
+    System.err.println("   -ignorePreCheckPermission  ignore filesystem permission pre-check");
     System.err.println("");
     System.err.println("   -repair           Shortcut for -fixAssignments -fixMeta -fixHdfsHoles
" +
         "-fixHdfsOrphans -fixHdfsOverlaps -fixVersionFile -sidelineBigOverlaps");
@@ -3150,6 +3185,8 @@ public class HBaseFsck {
         fsck.setSidelineBigOverlaps(true);
       } else if (cmd.equals("-fixSplitParents")) {
         fsck.setFixSplitParents(true);
+      } else if (cmd.equals("-ignorePreCheckPermission")) {
+        fsck.setIgnorePreCheckPermission(true);
       } else if (cmd.equals("-repair")) {
         // this attempts to merge overlapping hdfs regions, needs testing
         // under load
@@ -3208,6 +3245,15 @@ public class HBaseFsck {
         System.out.println("Allow checking/fixes for table: " + cmd);
       }
     }
+
+    // pre-check current user has FS write permission or not
+    try {
+      fsck.preCheckPermission();
+    } catch (AccessControlException ace) {
+      Runtime.getRuntime().exit(-1);
+    } catch (IOException ioe) {
+      Runtime.getRuntime().exit(-1);
+    }
     // do the real work of fsck
     fsck.connect();
     int code = fsck.onlineHbck();



Mime
View raw message