incubator-hcatalog-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hashut...@apache.org
Subject svn commit: r1245309 - in /incubator/hcatalog/trunk: ./ conf/ src/java/org/apache/hcatalog/cli/SemanticAnalysis/ src/java/org/apache/hcatalog/common/ src/java/org/apache/hcatalog/security/ src/test/ src/test/org/apache/hcatalog/ src/test/org/apache/hca...
Date Fri, 17 Feb 2012 03:19:11 GMT
Author: hashutosh
Date: Fri Feb 17 03:19:10 2012
New Revision: 1245309

URL: http://svn.apache.org/viewvc?rev=1245309&view=rev
Log:
HCATALOG-245 : StorageHandler authorization providers (enis via hashutosh)

Added:
    incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java
    incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/
    incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java
    incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/StorageDelegationAuthorizationProvider.java
    incubator/hcatalog/trunk/src/test/org/apache/hcatalog/HcatTestUtils.java
    incubator/hcatalog/trunk/src/test/org/apache/hcatalog/security/
    incubator/hcatalog/trunk/src/test/org/apache/hcatalog/security/TestHdfsAuthorizationProvider.java
Removed:
    incubator/hcatalog/trunk/src/java/org/apache/hcatalog/common/AuthUtils.java
Modified:
    incubator/hcatalog/trunk/CHANGES.txt
    incubator/hcatalog/trunk/conf/proto-hive-site.xml
    incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/AddPartitionHook.java
    incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java
    incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java
    incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
    incubator/hcatalog/trunk/src/test/excluded-tests

Modified: incubator/hcatalog/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/CHANGES.txt?rev=1245309&r1=1245308&r2=1245309&view=diff
==============================================================================
--- incubator/hcatalog/trunk/CHANGES.txt (original)
+++ incubator/hcatalog/trunk/CHANGES.txt Fri Feb 17 03:19:10 2012
@@ -21,6 +21,8 @@ Apache HCatalog Change Log
 Trunk (unreleased changes)
 
   INCOMPATIBLE CHANGES
+  HCAT-245. StorageHandler authorization providers (enis via hashutosh) 
+
   HCAT-241. Changes to HCatRecord to support switch from StorageDriver to SerDe (khorgath)
 
   HCAT-240. Changes to HCatOutputFormat to make it use SerDes instead of StorageDriver (toffer)

Modified: incubator/hcatalog/trunk/conf/proto-hive-site.xml
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/conf/proto-hive-site.xml?rev=1245309&r1=1245308&r2=1245309&view=diff
==============================================================================
--- incubator/hcatalog/trunk/conf/proto-hive-site.xml (original)
+++ incubator/hcatalog/trunk/conf/proto-hive-site.xml Fri Feb 17 03:19:10 2012
@@ -109,4 +109,19 @@
   <description>HCatalog sets this property value to hcat.isd, hcat.osd so that there is no need to do alter table set file format after adding partitions to the table.</description>
 </property>
 
+<property>
+  <name>hive.security.authorization.enabled</name>
+  <value>true</value>
+  <description>enable or disable the hive client authorization</description>
+</property>
+
+<property>
+  <name>hive.security.authorization.manager</name>
+  <value>org.apache.hcatalog.security.StorageDelegationAuthorizationProvider</value>
+  <description>the hive client authorization manager class name.
+  The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.
+  HCatalog uses a model, where authorization checks are delegated to the storage layer (hdfs, hbase, ...).
+  </description>
+</property>
+
 </configuration>

Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/AddPartitionHook.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/AddPartitionHook.java?rev=1245309&r1=1245308&r2=1245309&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/AddPartitionHook.java (original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/AddPartitionHook.java Fri Feb 17 03:19:10 2012
@@ -17,16 +17,25 @@
  */
 package org.apache.hcatalog.cli.SemanticAnalysis;
 
+import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Order;
+import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
-import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook;
 import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
+import org.apache.hadoop.hive.ql.plan.DDLWork;
+import org.apache.hadoop.hive.ql.security.authorization.Privilege;
 import org.apache.hcatalog.common.HCatConstants;
 
-public class AddPartitionHook extends AbstractSemanticAnalyzerHook{
+public class AddPartitionHook extends HCatSemanticAnalyzerBase {
 
   private String tblName, inDriver, outDriver;
 
@@ -54,7 +63,7 @@ public class AddPartitionHook extends Ab
 //  @Override
 //  public void postAnalyze(HiveSemanticAnalyzerHookContext context,
 //      List<Task<? extends Serializable>> rootTasks) throws SemanticException {
-//
+//    authorizeDDL(context, rootTasks);
 //    try {
 //      Hive db = context.getHive();
 //      Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName);
@@ -77,7 +86,76 @@ public class AddPartitionHook extends Ab
 //      throw new SemanticException(e);
 //    }
 //  }
-}
-
-
+  
+  @Override
+  protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext context,
+      Hive hive, DDLWork work) throws HiveException {
+    AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc();
+    if (addPartitionDesc != null) {
+      String dbName = getDbName(hive, addPartitionDesc.getDbName());
+      Table table = hive.getTable(dbName, addPartitionDesc.getTableName());
+      Path partPath = null;
+      if (addPartitionDesc.getLocation() != null) {
+        partPath = new Path(table.getPath(), addPartitionDesc.getLocation());
+      }
+      
+      Partition part = newPartition(
+          table, addPartitionDesc.getPartSpec(), partPath,
+          addPartitionDesc.getPartParams(),
+          addPartitionDesc.getInputFormat(),
+          addPartitionDesc.getOutputFormat(),
+          addPartitionDesc.getNumBuckets(),
+          addPartitionDesc.getCols(),
+          addPartitionDesc.getSerializationLib(),
+          addPartitionDesc.getSerdeParams(),
+          addPartitionDesc.getBucketCols(),
+          addPartitionDesc.getSortCols());
+      
+      authorize(part, Privilege.CREATE);
+    }
+  }
+  
+  protected Partition newPartition(Table tbl, Map<String, String> partSpec,
+      Path location, Map<String, String> partParams, String inputFormat, String outputFormat,
+      int numBuckets, List<FieldSchema> cols,
+      String serializationLib, Map<String, String> serdeParams,
+      List<String> bucketCols, List<Order> sortCols) throws HiveException {
 
+    try {
+      Partition tmpPart = new Partition(tbl, partSpec, location);
+      org.apache.hadoop.hive.metastore.api.Partition inPart
+        = tmpPart.getTPartition();
+      if (partParams != null) {
+        inPart.setParameters(partParams);
+      }
+      if (inputFormat != null) {
+        inPart.getSd().setInputFormat(inputFormat);
+      }
+      if (outputFormat != null) {
+        inPart.getSd().setOutputFormat(outputFormat);
+      }
+      if (numBuckets != -1) {
+        inPart.getSd().setNumBuckets(numBuckets);
+      }
+      if (cols != null) {
+        inPart.getSd().setCols(cols);
+      }
+      if (serializationLib != null) {
+          inPart.getSd().getSerdeInfo().setSerializationLib(serializationLib);
+      }
+      if (serdeParams != null) {
+        inPart.getSd().getSerdeInfo().setParameters(serdeParams);
+      }
+      if (bucketCols != null) {
+        inPart.getSd().setBucketCols(bucketCols);
+      }
+      if (sortCols != null) {
+        inPart.getSd().setSortCols(sortCols);
+      }
+      
+      return new Partition(tbl, inPart);
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
+  }
+}
\ No newline at end of file

Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java?rev=1245309&r1=1245308&r2=1245309&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java (original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java Fri Feb 17 03:19:10 2012
@@ -20,18 +20,21 @@ package org.apache.hcatalog.cli.Semantic
 import java.io.Serializable;
 import java.util.List;
 
+import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
-import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.HiveParser;
 import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
+import org.apache.hadoop.hive.ql.plan.DDLWork;
+import org.apache.hadoop.hive.ql.security.authorization.Privilege;
 import org.apache.hcatalog.common.HCatConstants;
 
-final class CreateDatabaseHook  extends AbstractSemanticAnalyzerHook{
+final class CreateDatabaseHook  extends HCatSemanticAnalyzerBase {
 
   String databaseName;
 
@@ -79,5 +82,17 @@ final class CreateDatabaseHook  extends 
   public void postAnalyze(HiveSemanticAnalyzerHookContext context,
       List<Task<? extends Serializable>> rootTasks) throws SemanticException {
     context.getConf().set(HCatConstants.HCAT_CREATE_DB_NAME, databaseName);
+    super.postAnalyze(context, rootTasks);
+  }
+  
+  @Override
+  protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext context,
+      Hive hive, DDLWork work) throws HiveException {
+    CreateDatabaseDesc createDb = work.getCreateDatabaseDesc();
+    if (createDb != null) {
+      Database db = new Database(createDb.getName(), createDb.getComment(), 
+          createDb.getLocationUri(), createDb.getDatabaseProperties());
+      authorize(db, Privilege.CREATE);
+    }
   }
 }

Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java?rev=1245309&r1=1245308&r2=1245309&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java (original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java Fri Feb 17 03:19:10 2012
@@ -27,6 +27,7 @@ import org.apache.commons.lang.StringUti
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -34,8 +35,10 @@ import org.apache.hadoop.hive.ql.exec.DD
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
 import org.apache.hadoop.hive.ql.io.RCFileOutputFormat;
+import org.apache.hadoop.hive.ql.metadata.AuthorizationException;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
@@ -44,7 +47,7 @@ import org.apache.hadoop.hive.ql.parse.H
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
 import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider;
-import org.apache.hcatalog.common.AuthUtils;
+import org.apache.hadoop.hive.ql.security.authorization.Privilege;
 import org.apache.hcatalog.common.HCatConstants;
 import org.apache.hcatalog.common.HCatException;
 import org.apache.hcatalog.common.HCatUtil;
@@ -52,7 +55,7 @@ import org.apache.hcatalog.mapreduce.HCa
 import org.apache.hcatalog.rcfile.RCFileInputDriver;
 import org.apache.hcatalog.rcfile.RCFileOutputDriver;
 
-final class CreateTableHook extends AbstractSemanticAnalyzerHook {
+final class CreateTableHook extends HCatSemanticAnalyzerBase {
 
     private String inStorageDriver, outStorageDriver, tableName, loader, storer;
 
@@ -219,6 +222,12 @@ final class CreateTableHook extends Abst
         }
         CreateTableDesc desc = ((DDLTask) rootTasks.get(rootTasks.size() - 1))
                 .getWork().getCreateTblDesc();
+        if (desc == null) {
+          // Desc will be null if its CREATE TABLE LIKE. Desc will be
+          // contained in CreateTableLikeDesc. Currently, HCat disallows CTLT in
+          // pre-hook. So, desc can never be null.
+          return;
+        }
         Map<String, String> tblProps = desc.getTblProps();
         if (tblProps == null) {
             // tblProps will be null if user didnt use tblprops in his CREATE
@@ -230,15 +239,10 @@ final class CreateTableHook extends Abst
         // first check if we will allow the user to create table.
         String storageHandler = desc.getStorageHandler();
         if (StringUtils.isEmpty(storageHandler)) {
-
-            authorize(context, desc.getLocation());
             tblProps.put(HCatConstants.HCAT_ISD_CLASS, inStorageDriver);
             tblProps.put(HCatConstants.HCAT_OSD_CLASS, outStorageDriver);
 
         } else {
-            // Create instance of HCatStorageHandler and obtain the
-            // HiveAuthorizationprovider for the handler and use it
-            // to authorize.
             try {
                 HCatStorageHandler storageHandlerInst = HCatUtil
                         .getStorageHandler(context.getConf(),
@@ -246,18 +250,11 @@ final class CreateTableHook extends Abst
                                                      desc.getSerName(),
                                                      desc.getInputFormat(),
                                                      desc.getOutputFormat());
-                HiveAuthorizationProvider auth = storageHandlerInst
-                        .getAuthorizationProvider();
-
-                // TBD: To pass in the exact read and write privileges.
-                String databaseName = context.getHive().newTable(desc.getTableName()).getDbName();
-                auth.authorize(context.getHive().getDatabase(databaseName), null, null);
+                //Authorization checks are performed by the storageHandler.getAuthorizationProvider(), if  
+                //StorageDelegationAuthorizationProvider is used.
             } catch (IOException e) {
                 throw new SemanticException(e);
-            } catch (HiveException e) {
-                throw new SemanticException(e);
             }
-
         }
         if (loader!=null) {
             tblProps.put(HCatConstants.HCAT_PIG_LOADER, loader);
@@ -266,44 +263,37 @@ final class CreateTableHook extends Abst
             tblProps.put(HCatConstants.HCAT_PIG_STORER, storer);
         }
 
-        if (desc == null) {
-            // Desc will be null if its CREATE TABLE LIKE. Desc will be
-            // contained
-            // in CreateTableLikeDesc. Currently, HCat disallows CTLT in
-            // pre-hook.
-            // So, desc can never be null.
-            return;
+        if (desc != null) {
+          try {
+            Table table = context.getHive().newTable(desc.getTableName());
+            if (desc.getLocation() != null) {
+              table.setDataLocation(new Path(desc.getLocation()).toUri());
+            }
+            if (desc.getStorageHandler() != null) {
+              table.setProperty(
+                org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE,
+                desc.getStorageHandler());
+            }
+            for (Map.Entry<String, String> prop : tblProps.entrySet()) {
+              table.setProperty(prop.getKey(), prop.getValue());
+            }
+            for (Map.Entry<String, String> prop : desc.getSerdeProps().entrySet()) {
+              table.setSerdeParam(prop.getKey(), prop.getValue());
+            }
+            //TODO: set other Table properties as needed
+  
+            //authorize against the table operation so that location permissions can be checked if any
+            
+            if (HiveConf.getBoolVar(context.getConf(),
+                HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
+              authorize(table, Privilege.CREATE);
+            }
+          } catch (HiveException ex) {
+            throw new SemanticException(ex);
+          }
         }
 
         desc.setTblProps(tblProps);
         context.getConf().set(HCatConstants.HCAT_CREATE_TBL_NAME, tableName);
     }
-
-    private void authorize(HiveSemanticAnalyzerHookContext context, String loc)
-            throws SemanticException {
-
-        Path tblDir;
-        Configuration conf = context.getConf();
-        try {
-            Warehouse wh = new Warehouse(conf);
-            if (loc == null || loc.isEmpty()) {
-                Hive hive = context.getHive();
-                tblDir = wh.getTablePath(
-                        hive.getDatabase(hive.getCurrentDatabase()), tableName)
-                        .getParent();
-            } else {
-                tblDir = wh.getDnsPath(new Path(loc));
-            }
-
-            try {
-                AuthUtils.authorize(tblDir, FsAction.WRITE, conf);
-            } catch (HCatException e) {
-                throw new SemanticException(e);
-            }
-        } catch (MetaException e) {
-            throw new SemanticException(e);
-        } catch (HiveException e) {
-            throw new SemanticException(e);
-        }
-    }
 }

Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java?rev=1245309&r1=1245308&r2=1245309&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java (original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java Fri Feb 17 03:19:10 2012
@@ -19,29 +19,38 @@ package org.apache.hcatalog.cli.Semantic
 
 import java.io.Serializable;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook;
-import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.HiveParser;
 import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hcatalog.common.AuthUtils;
+import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
+import org.apache.hadoop.hive.ql.plan.DDLWork;
+import org.apache.hadoop.hive.ql.plan.DescDatabaseDesc;
+import org.apache.hadoop.hive.ql.plan.DescTableDesc;
+import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc;
+import org.apache.hadoop.hive.ql.plan.DropTableDesc;
+import org.apache.hadoop.hive.ql.plan.HiveOperation;
+import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc;
+import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc;
+import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc;
+import org.apache.hadoop.hive.ql.plan.ShowTablesDesc;
+import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc;
+import org.apache.hadoop.hive.ql.security.authorization.Privilege;
 import org.apache.hcatalog.common.ErrorType;
 import org.apache.hcatalog.common.HCatException;
 
-public class HCatSemanticAnalyzer extends AbstractSemanticAnalyzerHook {
+public class HCatSemanticAnalyzer extends HCatSemanticAnalyzerBase {
 
   private AbstractSemanticAnalyzerHook hook;
   private ASTNode ast;
@@ -116,13 +125,7 @@ public class HCatSemanticAnalyzer extend
       switch (ast.getToken().getType()) {
 
       case HiveParser.TOK_DESCTABLE:
-        authorize(getFullyQualifiedName((ASTNode) ast.getChild(0).getChild(0)), context, FsAction.READ, false);
-        break;
-
       case HiveParser.TOK_SHOWPARTITIONS:
-        authorize(BaseSemanticAnalyzer.getUnescapedName((ASTNode)ast.getChild(0)), context, FsAction.READ, false);
-        break;
-
       case HiveParser.TOK_ALTERTABLE_ADDPARTS:
       case HiveParser.TOK_DROPTABLE:
       case HiveParser.TOK_ALTERTABLE_ADDCOLS:
@@ -131,105 +134,141 @@ public class HCatSemanticAnalyzer extend
       case HiveParser.TOK_ALTERTABLE_PROPERTIES:
       case HiveParser.TOK_ALTERTABLE_SERIALIZER:
       case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES:
-        authorize(BaseSemanticAnalyzer.getUnescapedName((ASTNode)ast.getChild(0)), context, FsAction.WRITE, false);
-        break;
-
       case HiveParser.TOK_ALTERTABLE_PARTITION:
-        authorize(BaseSemanticAnalyzer.unescapeIdentifier(((ASTNode)ast.getChild(0)).getChild(0).getText()), context, FsAction.WRITE, false);
-        break;
-
       case HiveParser.TOK_DESCDATABASE:
-      case HiveParser.TOK_SWITCHDATABASE:
-        authorize(BaseSemanticAnalyzer.getUnescapedName((ASTNode)ast.getChild(0)), context, FsAction.READ, true);
-        break;
-
+      case HiveParser.TOK_SWITCHDATABASE: 
       case HiveParser.TOK_DROPDATABASE:
-        authorize(BaseSemanticAnalyzer.getUnescapedName((ASTNode)ast.getChild(0)), context, FsAction.WRITE, true);
-        break;
-
       case HiveParser.TOK_CREATEDATABASE:
       case HiveParser.TOK_SHOWDATABASES:
       case HiveParser.TOK_SHOW_TABLESTATUS:
       case HiveParser.TOK_SHOWTABLES:
-        // We do no checks for show tables/db , create db. Its always allowed.
-
-      case HiveParser.TOK_CREATETABLE:
-        // No checks for Create Table, since its not possible to compute location
-        // here easily. So, it is especially handled in CreateTable post hook.
+      case HiveParser.TOK_CREATETABLE: 
         break;
 
       case HiveParser.TOK_EXPORT:
-        String tableName = BaseSemanticAnalyzer.getUnescapedName(((ASTNode) ast.getChild(0).getChild(0)));
-        LOG.debug("Export for table " + tableName);
-        authorize(tableName, context, FsAction.READ, false);
-        break;
-
       case HiveParser.TOK_IMPORT:
-        LOG.debug("Import into location " + context.getConf().get("import.destination.dir"));
-        AuthUtils.authorize(new Path(context.getConf().get("import.destination.dir")),
-                    FsAction.WRITE, context.getConf());
         break;
 
-
       default:
         throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, "Unexpected token: "+ast.getToken());
       }
+      
+      authorizeDDL(context, rootTasks);
+      
     } catch(HCatException e){
       throw new SemanticException(e);
-    } catch (MetaException e) {
-      throw new SemanticException(e);
     } catch (HiveException e) {
       throw new SemanticException(e);
-  }
+    }
 
     if(hook != null){
       hook.postAnalyze(context, rootTasks);
     }
   }
 
-  private void authorize(String name, HiveSemanticAnalyzerHookContext cntxt, FsAction action, boolean isDBOp)
-                                                      throws MetaException, HiveException, HCatException{
+  @Override
+  protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive, DDLWork work)
+      throws HiveException {
+    // DB opereations, none of them are enforced by Hive right now.
+
+    ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc();
+    if (showDatabases != null) {
+      authorize(HiveOperation.SHOWDATABASES.getInputRequiredPrivileges(),
+          HiveOperation.SHOWDATABASES.getOutputRequiredPrivileges());
+    }
+
+    DropDatabaseDesc dropDb = work.getDropDatabaseDesc();
+    if (dropDb != null) {
+      Database db = cntxt.getHive().getDatabase(dropDb.getDatabaseName());
+      authorize(db, Privilege.DROP);
+    }
+
+    DescDatabaseDesc descDb = work.getDescDatabaseDesc();
+    if (descDb != null) {
+      Database db = cntxt.getHive().getDatabase(descDb.getDatabaseName());
+      authorize(db, Privilege.SELECT);
+    }
 
+    SwitchDatabaseDesc switchDb = work.getSwitchDatabaseDesc();
+    if (switchDb != null) {
+      Database db = cntxt.getHive().getDatabase(switchDb.getDatabaseName());
+      authorize(db, Privilege.SELECT);
+    }
+
+    ShowTablesDesc showTables = work.getShowTblsDesc();
+    if (showTables != null) {
+      String dbName = showTables.getDbName() == null ? cntxt.getHive().getCurrentDatabase()
+          : showTables.getDbName();
+      authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT);
+    }
+
+    ShowTableStatusDesc showTableStatus = work.getShowTblStatusDesc();
+    if (showTableStatus != null) {
+      String dbName = showTableStatus.getDbName() == null ? cntxt.getHive().getCurrentDatabase()
+          : showTableStatus.getDbName();
+      authorize(cntxt.getHive().getDatabase(dbName), Privilege.SELECT);
+    }
 
-    Warehouse wh = new Warehouse(cntxt.getConf());
-    if(!isDBOp){
-      // Do validations for table path.
-      Table tbl;
-      try{
-        tbl = cntxt.getHive().getTable(name);
+    // TODO: add alter database support in HCat
+
+    // Table operations.
+
+    DropTableDesc dropTable = work.getDropTblDesc();
+    if (dropTable != null) {
+      if (dropTable.getPartSpecs() == null) {
+        // drop table is already enforced by Hive. We only check for table level location even if the 
+        // table is partitioned.
+      } else {
+        //this is actually a ALTER TABLE DROP PARITITION statement
+        for (Map<String, String> partSpec : dropTable.getPartSpecs()) {
+          // partitions are not added as write entries in drop partitions in Hive
+          Table table = hive.getTable(hive.getCurrentDatabase(), dropTable.getTableName());
+          List<Partition> partitions = hive.getPartitions(table, partSpec);
+          for (Partition part : partitions) {
+            authorize(part, Privilege.DROP);
+          }
+        }
       }
-      catch(InvalidTableException ite){
-        // Table itself doesn't exist in metastore, nothing to validate.
-        return;
+    }
+
+    AlterTableDesc alterTable = work.getAlterTblDesc();
+    if (alterTable != null) {
+      Table table = hive.getTable(hive.getCurrentDatabase(), alterTable.getOldName(), false);
+
+      Partition part = null;
+      if (alterTable.getPartSpec() != null) {
+        part = hive.getPartition(table, alterTable.getPartSpec(), false);
       }
-      Path path = tbl.getPath();
-      if(path != null){
-        AuthUtils.authorize(wh.getDnsPath(path), action, cntxt.getConf());
-      } else{
-        // This will happen, if table exists in metastore for a given
-        // tablename, but has no path associated with it, so there is nothing to check.
-        // In such cases, do no checks and allow whatever hive behavior is for it.
-        return;
+
+      String newLocation = alterTable.getNewLocation();
+      
+      /* Hcat requires ALTER_DATA privileges for ALTER TABLE LOCATION statements 
+       * for the old table/partition location and the new location.  
+       */
+      if (alterTable.getOp() == AlterTableDesc.AlterTableTypes.ALTERLOCATION) {
+        if (part != null) {
+          authorize(part, Privilege.ALTER_DATA); // authorize for the old
+                                                 // location, and new location
+          part.setLocation(newLocation);
+          authorize(part, Privilege.ALTER_DATA);
+        } else {
+          authorize(table, Privilege.ALTER_DATA); // authorize for the old
+                                                  // location, and new location
+          table.getTTable().getSd().setLocation(newLocation);
+          authorize(table, Privilege.ALTER_DATA);
+        }
       }
-    } else{
-      // Else, its a DB operation.
-    	Database db = cntxt.getHive().getDatabase(name); 
-    	if(null == db){
-    		// Database doesn't exist, nothing to authorize
-    		return;
-    	}
-      AuthUtils.authorize(wh.getDatabasePath(db), action, cntxt.getConf());
+      //other alter operations are already supported by Hive
     }
-  }
 
-
-  private String getFullyQualifiedName(ASTNode ast) {
-    // Copied verbatim from DDLSemanticAnalyzer, since its private there.
-    if (ast.getChildCount() == 0) {
-      return ast.getText();
+    DescTableDesc descTable = work.getDescTblDesc();
+    if (descTable != null) {
+      authorizeTable(cntxt.getHive(), descTable.getTableName(), Privilege.SELECT);
     }
 
-    return getFullyQualifiedName((ASTNode) ast.getChild(0)) + "."
-        + getFullyQualifiedName((ASTNode) ast.getChild(1));
+    ShowPartitionsDesc showParts = work.getShowPartsDesc();
+    if (showParts != null) {
+      authorizeTable(cntxt.getHive(), showParts.getTabName(), Privilege.SELECT);
+    }
   }
 }

Added: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java?rev=1245309&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java (added)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java Fri Feb 17 03:19:10 2012
@@ -0,0 +1,179 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hcatalog.cli.SemanticAnalysis;
+
+import java.io.Serializable;
+import java.util.List;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.metadata.AuthorizationException;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook;
+import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.DDLWork;
+import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider;
+import org.apache.hadoop.hive.ql.security.authorization.Privilege;
+import org.apache.hadoop.hive.ql.session.SessionState;
+
+/** 
+ * Base class for HCatSemanticAnalyzer hooks.
+ */
+public class HCatSemanticAnalyzerBase extends AbstractSemanticAnalyzerHook {
+
+  private HiveAuthorizationProvider authProvider;
+  
+  protected String getDbName(Hive hive, String dbName) {
+    return dbName == null ? hive.getCurrentDatabase() : dbName;
+  }
+  
+  public HiveAuthorizationProvider getAuthProvider() {
+    if (authProvider == null) {
+      authProvider = SessionState.get().getAuthorizer();
+    }
+    
+    return authProvider;
+  }
+
+  @Override
+  public void postAnalyze(HiveSemanticAnalyzerHookContext context,
+      List<Task<? extends Serializable>> rootTasks) throws SemanticException {
+    super.postAnalyze(context, rootTasks);
+    
+    //Authorize the operation.
+    authorizeDDL(context, rootTasks);
+  }
+  
+  /** 
+   * Checks for the given rootTasks, and calls authorizeDDLWork() for each DDLWork to 
+   * be authorized. The hooks should override this, or authorizeDDLWork to perform the 
+   * actual authorization.
+   */
+  /*
+   * Impl note: Hive provides authorization with it's own model, and calls the defined 
+   * HiveAuthorizationProvider from Driver.doAuthorization(). However, HCat has to 
+   * do additional calls to the auth provider to implement expected behavior for 
+   * StorageDelegationAuthorizationProvider. This means, that the defined auth provider 
+   * is called by both Hive and HCat. The following are missing from Hive's implementation,
+   * and when they are fixed in Hive, we can remove the HCat-specific auth checks.   
+   * 1. CREATE DATABASE/TABLE, ADD PARTITION statements does not call 
+   * HiveAuthorizationProvider.authorize() with the candidate objects, which means that
+   * we cannot do checks against defined LOCATION.
+   * 2. HiveOperation does not define sufficient Privileges for most of the operations, 
+   * especially database operations. 
+   * 3. For some of the operations, Hive SemanticAnalyzer does not add the changed 
+   * object as a WriteEntity or ReadEntity.
+   * 
+   * @see https://issues.apache.org/jira/browse/HCATALOG-244
+   * @see https://issues.apache.org/jira/browse/HCATALOG-245
+   */
+  protected void authorizeDDL(HiveSemanticAnalyzerHookContext context, 
+      List<Task<? extends Serializable>> rootTasks)  throws SemanticException {
+    
+    if (!HiveConf.getBoolVar(context.getConf(),
+        HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
+      return;
+    }
+    
+    Hive hive;
+    try {
+      hive = context.getHive();
+    
+      for (Task<? extends Serializable> task : rootTasks) {
+        if (task.getWork() instanceof DDLWork) {
+          DDLWork work = (DDLWork)task.getWork();
+          if (work != null) {
+            authorizeDDLWork(context, hive, work);
+          }
+        }
+      }
+    } catch (SemanticException ex) {
+      throw ex;
+    } catch (AuthorizationException ex) {
+      throw ex;
+    } catch (Exception ex) {
+      throw new SemanticException(ex);
+    }
+  }
+  
+  /** 
+   * Authorized the given DDLWork. Does nothing by default. Override this 
+   * and delegate to the relevant method in HiveAuthorizationProvider obtained by 
+   * getAuthProvider().
+   */
+  protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext context,
+      Hive hive, DDLWork work) throws HiveException {
+  }
+
+  protected void authorize(Privilege[] inputPrivs, Privilege[] outputPrivs)
+      throws AuthorizationException, SemanticException {
+    try {
+      getAuthProvider().authorize(inputPrivs, outputPrivs);
+    } catch (HiveException ex) {
+      throw new SemanticException(ex);
+    }
+  }
+  
+  protected void authorize(Database db, Privilege priv) 
+      throws AuthorizationException, SemanticException {
+    try {
+      getAuthProvider().authorize(db, null, new Privilege[] {priv});
+    } catch (HiveException ex) {
+      throw new SemanticException(ex);
+    }
+  }
+  
+  protected void authorizeTable(Hive hive, String tableName, Privilege priv) 
+      throws AuthorizationException, HiveException {
+    Table table;
+    try{
+      table = hive.getTable(tableName);
+    }
+    catch(InvalidTableException ite){
+      // Table itself doesn't exist in metastore, nothing to validate.
+      return;
+    }
+    
+    authorize(table, priv);
+  }
+  
+  protected void authorize(Table table, Privilege priv) 
+      throws AuthorizationException, SemanticException {
+    try {
+      getAuthProvider().authorize(table, new Privilege[] {priv}, null);
+    } catch (HiveException ex) {
+      throw new SemanticException(ex);
+    }
+  }
+  
+  protected void authorize(Partition part, Privilege priv) 
+      throws AuthorizationException, SemanticException {
+    try {
+      getAuthProvider().authorize(part, new Privilege[] {priv}, null);
+    } catch (HiveException ex) {
+      throw new SemanticException(ex);
+    }
+  }
+}

Added: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java?rev=1245309&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java (added)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java Fri Feb 17 03:19:10 2012
@@ -0,0 +1,295 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hcatalog.security;
+
+import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.List;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.commons.lang.ArrayUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.metadata.AuthorizationException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProviderBase;
+import org.apache.hadoop.hive.ql.security.authorization.Privilege;
+import org.apache.hadoop.hive.shims.HadoopShims;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+
+/** 
+ * An AuthorizationProvider, which checks against the data access level permissions on HDFS. 
+ */
+public class HdfsAuthorizationProvider extends HiveAuthorizationProviderBase {
+
+  protected Warehouse wh;
+  
+  public HdfsAuthorizationProvider() {
+    super();
+  }
+  
+  public HdfsAuthorizationProvider(Configuration conf) {
+    super();
+    setConf(conf);
+  }
+  
+  @Override
+  public void setConf(Configuration conf) {
+    super.setConf(conf);
+    try {
+      this.wh = new Warehouse(conf);
+    } catch (MetaException ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+  
+  protected FsAction getFsAction(Privilege priv, Path path) {
+    
+    switch (priv.getPriv()) {
+    case ALL            : throw new AuthorizationException("no matching Action for Privilege.All");
+    case ALTER_DATA     : return FsAction.WRITE;   
+    case ALTER_METADATA : return FsAction.WRITE;  
+    case CREATE         : return FsAction.WRITE;
+    case DROP           : return FsAction.WRITE;
+    case INDEX          : return FsAction.WRITE;
+    case LOCK           : return FsAction.WRITE;
+    case SELECT         : return FsAction.READ;
+    case SHOW_DATABASE  : return FsAction.READ;
+    case UNKNOWN        : 
+    default             : throw new AuthorizationException("Unknown privilege");
+    }
+  }
+  
+  protected EnumSet<FsAction> getFsActions(Privilege[] privs, Path path) {
+    EnumSet<FsAction> actions = EnumSet.noneOf(FsAction.class);
+    
+    if (privs == null) {
+      return actions;
+    }
+    
+    for (Privilege priv : privs) {
+      actions.add(getFsAction(priv, path));
+    }
+    
+    return actions;
+  }
+  
+  private static final String DATABASE_WAREHOUSE_SUFFIX = ".db";
+
+  private Path getDefaultDatabasePath(String dbName) throws MetaException {
+    if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
+      return wh.getWhRoot();
+    }
+    return new Path(wh.getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX);
+  }
+  
+  protected Path getDbLocation(Database db) throws HiveException {
+    try {
+      String location = db.getLocationUri();
+      if (location == null) {
+        return getDefaultDatabasePath(db.getName());
+      } else {
+        return wh.getDnsPath(wh.getDatabasePath(db));
+      }
+    } catch (MetaException ex) {
+      throw new HiveException(ex.getMessage());
+    }
+  }
+  
+  @Override
+  public void authorize(Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+      throws HiveException, AuthorizationException {
+    //Authorize for global level permissions at the warehouse dir
+    Path root;
+    try {
+      root = wh.getWhRoot();
+      authorize(root, readRequiredPriv, writeRequiredPriv);
+    } catch (MetaException ex) {
+      throw new HiveException(ex);
+    }
+  }
+
+  @Override
+  public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+      throws HiveException, AuthorizationException {
+   
+    Path path = getDbLocation(db);
+    
+    authorize(path, readRequiredPriv, writeRequiredPriv);
+  }
+
+  @Override
+  public void authorize(Table table, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+      throws HiveException, AuthorizationException {
+    
+    //unlike Hive's model, this can be called at CREATE TABLE as well, since we should authorize 
+    //against the table's declared location
+    Path path = null;
+    try {
+      if (table.getTTable().getSd().getLocation() == null
+          || table.getTTable().getSd().getLocation().isEmpty()) {
+            path = wh.getTablePath(hive_db.getDatabase(table.getDbName()), table.getTableName());
+      } else {
+         path = table.getPath();
+      }
+    } catch (MetaException ex) {
+      throw new HiveException(ex);
+    }
+    
+    authorize(path, readRequiredPriv, writeRequiredPriv);
+  }
+
+  @Override
+  public void authorize(Partition part, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+      throws HiveException, AuthorizationException {
+    
+    if (part.getLocation() == null) { 
+      authorize(part.getTable(), readRequiredPriv, writeRequiredPriv);
+    } else {
+      authorize(part.getPartitionPath(), readRequiredPriv, writeRequiredPriv);
+    }
+  }
+
+  @Override
+  public void authorize(Table table, Partition part, List<String> columns,
+      Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException,
+      AuthorizationException {
+    //columns cannot live in different files, just check for partition level permissions
+    authorize(table, part, columns, readRequiredPriv, writeRequiredPriv);
+  }
+  
+  /** 
+   * Authorization privileges against a path.
+   * @param path a filesystem path
+   * @param readRequiredPriv a list of privileges needed for inputs.
+   * @param writeRequiredPriv a list of privileges needed for outputs.
+   */
+  public void authorize(Path path, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) 
+      throws HiveException, AuthorizationException {
+    try {
+      EnumSet<FsAction> actions = getFsActions(readRequiredPriv, path);
+      actions.addAll(getFsActions(writeRequiredPriv, path));
+      if (actions.isEmpty()) {
+        return;
+      }
+      
+      checkPermissions(getConf(), path, actions);
+      
+    } catch (AccessControlException ex) {
+      throw new AuthorizationException(ex);
+    } catch (LoginException ex) {
+      throw new AuthorizationException(ex);
+    } catch (IOException ex) {
+      throw new HiveException(ex);
+    }
+  }
+  
+  /**
+   * Checks the permissions for the given path and current user on Hadoop FS. If the given path 
+   * does not exists, it checks for it's parent folder.
+   */
+  public static void checkPermissions(final Configuration conf, final Path path, 
+      final EnumSet<FsAction> actions) throws IOException, LoginException {
+
+    if (path == null) {
+      throw new IllegalArgumentException("path is null");
+    }
+    
+    final UserGroupInformation ugi;
+    
+    HadoopShims shims = ShimLoader.getHadoopShims();
+    ugi = shims.getUGIForConf(conf);
+    final String user = shims.getShortUserName(ugi);  
+        
+    final FileSystem fs = path.getFileSystem(conf);
+
+    if (fs.exists(path)) {
+      checkPermissions(fs, path, actions, user, ugi.getGroupNames());
+    } else if (path.getParent() != null) {
+      // find the ancestor which exists to check it's permissions
+      Path par = path.getParent();
+      while (par != null) {
+        if (fs.exists(par)) {
+          break;
+        }
+        par = par.getParent();
+      }
+
+      checkPermissions(fs, par, actions, user, ugi.getGroupNames());
+    }
+  }
+  
+  /**
+   * Checks the permissions for the given path and current user on Hadoop FS. If the given path 
+   * does not exists, it returns.
+   */
+  @SuppressWarnings("deprecation")
+  public static void checkPermissions(final FileSystem fs, final Path path,
+      final EnumSet<FsAction> actions, String user, String[] groups) throws IOException,
+      AccessControlException {
+    
+    final FileStatus stat;
+
+    try {
+      stat = fs.getFileStatus(path);
+    } catch (FileNotFoundException fnfe) {
+      // File named by path doesn't exist; nothing to validate.
+      return;
+    } catch (org.apache.hadoop.fs.permission.AccessControlException ace) {
+      // Older hadoop version will throw this @deprecated Exception.
+      throw new AccessControlException(ace.getMessage());
+    }
+
+    final FsPermission dirPerms = stat.getPermission();
+    final String grp = stat.getGroup();
+
+    for (FsAction action : actions) {
+      if (user.equals(stat.getOwner())) {
+        if (dirPerms.getUserAction().implies(action)) {
+          continue;
+        }
+      }
+      if (ArrayUtils.contains(groups, grp)) {
+        if (dirPerms.getGroupAction().implies(action)) {
+          continue;
+        }
+      }
+      if (dirPerms.getOtherAction().implies(action)) {
+        continue;
+      }
+      throw new AccessControlException("action " + action + " not permitted on path " 
+          + path + " for user " + user);
+    }
+  }
+}

Added: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/StorageDelegationAuthorizationProvider.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/StorageDelegationAuthorizationProvider.java?rev=1245309&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/StorageDelegationAuthorizationProvider.java (added)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/StorageDelegationAuthorizationProvider.java Fri Feb 17 03:19:10 2012
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hcatalog.security;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.ql.metadata.AuthorizationException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider;
+import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider;
+import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProviderBase;
+import org.apache.hadoop.hive.ql.security.authorization.Privilege;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hcatalog.storagehandler.HCatStorageHandler;
+
+/**
+ * A HiveAuthorizationProvider which delegates the authorization requests to 
+ * the underlying AuthorizationProviders obtained from the StorageHandler.
+ */
+public class StorageDelegationAuthorizationProvider extends HiveAuthorizationProviderBase {
+
+  protected HiveAuthorizationProvider hdfsAuthorizer = new HdfsAuthorizationProvider();
+  
+  protected static Map<String, String> authProviders = new HashMap<String,String>();
+  
+  @Override
+  public void setConf(Configuration conf) {
+    super.setConf(conf);
+    hdfsAuthorizer.setConf(conf);
+  }
+  
+  @Override
+  public void setAuthenticator(HiveAuthenticationProvider authenticator) {
+    super.setAuthenticator(authenticator);
+    hdfsAuthorizer.setAuthenticator(authenticator);
+  }
+  
+  static {
+    registerAuthProvider("org.apache.hadoop.hive.hbase.HBaseStorageHandler",
+        "org.apache.hcatalog.hbase.HBaseAuthorizationProvider");
+    registerAuthProvider("org.apache.hcatalog.hbase.HBaseHCatStorageHandler", 
+        "org.apache.hcatalog.hbase.HBaseAuthorizationProvider");
+  }
+  
+  //workaround until Hive adds StorageHandler.getAuthorizationProvider(). Remove these parts afterwards
+  public static void registerAuthProvider(String storageHandlerClass, 
+      String authProviderClass) {
+    authProviders.put(storageHandlerClass, authProviderClass);
+  }
+  
+  /** Returns the StorageHandler of the Table obtained from the HCatStorageHandler */
+  protected HiveAuthorizationProvider getDelegate(Table table) throws HiveException {
+    HiveStorageHandler handler =  table.getStorageHandler();
+    
+    if (handler != null) {
+      if (handler instanceof HCatStorageHandler) {
+       return ((HCatStorageHandler) handler).getAuthorizationProvider();
+      } else {
+        String authProviderClass = authProviders.get(handler.getClass().getCanonicalName());
+        
+        if (authProviderClass != null) {
+          try {
+            ReflectionUtils.newInstance(getConf().getClassByName(authProviderClass), getConf());
+          } catch (ClassNotFoundException ex) {
+            throw new HiveException("Cannot instantiate delegation AuthotizationProvider");
+          }
+        }
+        
+        //else we do not have anything to delegate to
+        throw new HiveException(String.format("Storage Handler for table:%s is not an instance " +
+        		"of HCatStorageHandler", table.getTableName()));
+      }
+    } else {
+      //return an authorizer for HDFS
+      return hdfsAuthorizer;
+    }
+  }
+  
+  @Override
+  public void authorize(Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+      throws HiveException, AuthorizationException {
+    //global authorizations against warehouse hdfs directory
+    hdfsAuthorizer.authorize(readRequiredPriv, writeRequiredPriv);
+  }
+
+  @Override
+  public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+      throws HiveException, AuthorizationException {
+    //db's are tied to a hdfs location
+    hdfsAuthorizer.authorize(db, readRequiredPriv, writeRequiredPriv);
+  }
+
+  @Override
+  public void authorize(Table table, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+      throws HiveException, AuthorizationException {
+    getDelegate(table).authorize(table, readRequiredPriv, writeRequiredPriv);
+  }
+
+  @Override
+  public void authorize(Partition part, Privilege[] readRequiredPriv, 
+      Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException {
+    getDelegate(part.getTable()).authorize(part, readRequiredPriv, writeRequiredPriv);
+  }
+
+  @Override
+  public void authorize(Table table, Partition part, List<String> columns,
+      Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException,
+      AuthorizationException {
+    getDelegate(table).authorize(part, readRequiredPriv, writeRequiredPriv);
+  }
+}

Modified: incubator/hcatalog/trunk/src/test/excluded-tests
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/excluded-tests?rev=1245309&r1=1245308&r2=1245309&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/excluded-tests (original)
+++ incubator/hcatalog/trunk/src/test/excluded-tests Fri Feb 17 03:19:10 2012
@@ -1 +1 @@
-
+**/TestEximSemanticAnalysis.java

Added: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/HcatTestUtils.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/HcatTestUtils.java?rev=1245309&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/HcatTestUtils.java (added)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/HcatTestUtils.java Fri Feb 17 03:19:10 2012
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hcatalog;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+
+/** 
+ * Utility methods for tests
+ */
+public class HcatTestUtils {
+
+  public static FsPermission perm007 = FsPermission.createImmutable((short) 0007); // -------rwx
+  public static FsPermission perm070 = FsPermission.createImmutable((short) 0070); // ----rwx---
+  public static FsPermission perm700 = FsPermission.createImmutable((short) 0700); // -rwx------
+  public static FsPermission perm755 = FsPermission.createImmutable((short) 0755); // -rwxr-xr-x
+  public static FsPermission perm777 = FsPermission.createImmutable((short) 0777); // -rwxrwxrwx
+  public static FsPermission perm300 = FsPermission.createImmutable((short) 0300); // --wx------
+  public static FsPermission perm500 = FsPermission.createImmutable((short) 0500); // -r-x------
+  public static FsPermission perm555 = FsPermission.createImmutable((short) 0555); // -r-xr-xr-x
+  
+  /** 
+   * Returns the database path.
+   */
+  public static Path getDbPath(Hive hive, Warehouse wh, String dbName) throws MetaException, HiveException {
+    return wh.getDatabasePath(hive.getDatabase(dbName)); 
+  }
+  
+  /** 
+   * Removes all databases and tables from the metastore
+   */
+  public static void cleanupHMS(Hive hive, Warehouse wh, FsPermission defaultPerm) 
+      throws HiveException, MetaException, NoSuchObjectException {
+    for (String dbName : hive.getAllDatabases()) {
+      if (dbName.equals("default")) {
+        continue;
+      }
+      try {
+        Path path = getDbPath(hive, wh, dbName);
+        FileSystem whFs = path.getFileSystem(hive.getConf());
+        whFs.setPermission(path, defaultPerm);
+      } catch(IOException ex) {
+        //ignore
+      }
+      hive.dropDatabase(dbName, true, true, true);
+    }
+    
+    //clean tables in default db
+    for (String tablename : hive.getAllTables("default")) {
+      hive.dropTable("default", tablename, true, true);
+    }
+  }
+  
+}

Added: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/security/TestHdfsAuthorizationProvider.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/security/TestHdfsAuthorizationProvider.java?rev=1245309&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/security/TestHdfsAuthorizationProvider.java (added)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/security/TestHdfsAuthorizationProvider.java Fri Feb 17 03:19:10 2012
@@ -0,0 +1,579 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hcatalog.security;
+
+import static org.apache.hcatalog.HcatTestUtils.perm300;
+import static org.apache.hcatalog.HcatTestUtils.perm500;
+import static org.apache.hcatalog.HcatTestUtils.perm555;
+import static org.apache.hcatalog.HcatTestUtils.perm700;
+import static org.apache.hcatalog.HcatTestUtils.perm755;
+
+import java.io.IOException;
+import java.util.Random;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.cli.CliSessionState;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hcatalog.HcatTestUtils;
+import org.apache.hcatalog.cli.HCatDriver;
+import org.apache.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestHdfsAuthorizationProvider {
+  
+  protected HCatDriver hcatDriver;
+  protected HiveMetaStoreClient msc;
+  protected HiveConf conf;
+  protected String whDir;
+  protected Path whPath;
+  protected FileSystem whFs;
+  protected Warehouse wh;
+  protected Hive hive;
+
+  @Before
+  public void setUp() throws Exception {
+    
+    conf = new HiveConf(this.getClass());
+    conf.set(ConfVars.PREEXECHOOKS.varname, "");
+    conf.set(ConfVars.POSTEXECHOOKS.varname, "");
+    conf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+  
+    conf.set("hive.metastore.local", "true");
+    conf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
+    conf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED, true);
+    conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, 
+        StorageDelegationAuthorizationProvider.class.getCanonicalName());
+    conf.set("fs.pfile.impl", "org.apache.hadoop.fs.ProxyLocalFileSystem");
+    
+    whDir = System.getProperty("test.warehouse.dir", "/tmp/testhdfsauthorization_wh");
+    conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, whDir);
+    
+    UserGroupInformation ugi = ShimLoader.getHadoopShims().getUGIForConf(conf);
+    String username = ShimLoader.getHadoopShims().getShortUserName(ugi); 
+    
+    whPath = new Path(whDir);
+    whFs = whPath.getFileSystem(conf);
+    
+    wh = new Warehouse(conf);
+    hive = Hive.get(conf);
+    
+    //clean up mess in HMS 
+    HcatTestUtils.cleanupHMS(hive, wh, perm700);
+    
+    whFs.delete(whPath, true);
+    whFs.mkdirs(whPath, perm755);
+    
+    SessionState.start(new CliSessionState(conf));
+    hcatDriver = new HCatDriver();
+  }
+
+  @After
+  public void tearDown() throws IOException {
+    whFs.close();
+    hcatDriver.close();
+    Hive.closeCurrent();
+  }
+
+  public Path getDbPath(String dbName) throws MetaException, HiveException {
+    return HcatTestUtils.getDbPath(hive, wh, dbName); 
+  }
+  
+  public Path getTablePath(String dbName, String tableName) throws HiveException {
+    Table table = hive.getTable(dbName, tableName);
+    return table.getPath();
+  }
+
+  public Path getPartPath(String partName, String dbName, String tableName) throws HiveException {
+    return new Path(getTablePath(dbName, tableName), partName);
+  }
+
+  /** Execute the query expecting success*/
+  public void exec(String format, Object ... args) throws Exception {
+    String command = String.format(format, args);
+    CommandProcessorResponse resp = hcatDriver.run(command);
+    Assert.assertEquals(resp.getErrorMessage(), 0, resp.getResponseCode());
+    Assert.assertEquals(resp.getErrorMessage(), null, resp.getErrorMessage());
+  }
+
+  /** Execute the query expecting it to fail with AuthorizationException */
+  public void execFail(String format, Object ... args) throws Exception {
+    String command = String.format(format, args);
+    CommandProcessorResponse resp = hcatDriver.run(command);
+    Assert.assertNotSame(resp.getErrorMessage(), 0, resp.getResponseCode());
+    Assert.assertTrue(resp.getResponseCode() == 403 || //hive checks fail with 403
+        resp.getErrorMessage().contains("org.apache.hadoop.hive.ql.metadata.AuthorizationException")); 
+  }
+  
+  /** 
+   * Tests whether the warehouse directory is writable by the current user (as defined by Hadoop)
+   */
+  @Test
+  public void testWarehouseIsWritable() throws Exception {
+    Path top = new Path(whPath, "_foobarbaz12_");
+    try {
+      whFs.mkdirs(top);
+    } finally {
+      whFs.delete(top, true);
+    }
+  }
+  
+  @Test
+  public void testShowDatabases() throws Exception {
+    exec("CREATE DATABASE doo");
+    exec("SHOW DATABASES");
+    
+    whFs.setPermission(whPath, perm300); //revoke r
+    execFail("SHOW DATABASES");
+  }
+  
+  @Test
+  public void testDatabaseOps() throws Exception {
+    exec("SHOW TABLES");
+    exec("SHOW TABLE EXTENDED LIKE foo1");
+    
+    whFs.setPermission(whPath, perm700);
+    exec("CREATE DATABASE doo");
+    exec("DESCRIBE DATABASE doo");
+    exec("USE doo");
+    exec("SHOW TABLES");
+    exec("SHOW TABLE EXTENDED LIKE foo1");
+    exec("DROP DATABASE doo");
+    
+    //custom location
+    Path dbPath = new Path(whPath, new Random().nextInt() + "/mydb");
+    whFs.mkdirs(dbPath, perm700);
+    exec("CREATE DATABASE doo2 LOCATION '%s'", dbPath.toUri());
+    exec("DESCRIBE DATABASE doo2", dbPath.toUri());
+    exec("USE doo2");
+    exec("SHOW TABLES");
+    exec("SHOW TABLE EXTENDED LIKE foo1");
+    exec("DROP DATABASE doo2", dbPath.toUri());
+    
+    //custom non-existing location
+    exec("CREATE DATABASE doo3 LOCATION '%s/subpath'", dbPath.toUri());
+  }
+  
+  @Test
+  public void testCreateDatabaseFail1() throws Exception {
+    whFs.setPermission(whPath, perm500);
+    execFail("CREATE DATABASE doo"); //in the default location
+    
+    whFs.setPermission(whPath, perm555);
+    execFail("CREATE DATABASE doo2");
+  }
+
+  @Test
+  public void testCreateDatabaseFail2() throws Exception {
+    //custom location
+    Path dbPath = new Path(whPath, new Random().nextInt() + "/mydb");
+    
+    whFs.mkdirs(dbPath, perm700);
+    whFs.setPermission(dbPath, perm500);
+    execFail("CREATE DATABASE doo2 LOCATION '%s'", dbPath.toUri());
+  }
+  
+  @Test
+  public void testDropDatabaseFail1() throws Exception {
+    whFs.setPermission(whPath, perm700);
+    exec("CREATE DATABASE doo"); //in the default location
+    
+    whFs.setPermission(getDbPath("doo"), perm500); //revoke write
+    execFail("DROP DATABASE doo");
+  }
+  
+  @Test
+  public void testDropDatabaseFail2() throws Exception {
+    //custom location
+    Path dbPath = new Path(whPath, new Random().nextInt() + "/mydb");
+    
+    whFs.mkdirs(dbPath, perm700);
+    exec("CREATE DATABASE doo2 LOCATION '%s'", dbPath.toUri());
+    
+    whFs.setPermission(dbPath, perm500);
+    execFail("DROP DATABASE doo2");
+  }
+  
+  @Test
+  public void testDescSwitchDatabaseFail() throws Exception {
+    whFs.setPermission(whPath, perm700);
+    exec("CREATE DATABASE doo");
+    whFs.setPermission(getDbPath("doo"), perm300); //revoke read
+    execFail("DESCRIBE DATABASE doo");
+    execFail("USE doo");
+    
+    //custom location
+    Path dbPath = new Path(whPath, new Random().nextInt() + "/mydb");
+    whFs.mkdirs(dbPath, perm700);
+    exec("CREATE DATABASE doo2 LOCATION '%s'", dbPath.toUri());
+    whFs.mkdirs(dbPath, perm300); //revoke read
+    execFail("DESCRIBE DATABASE doo2", dbPath.toUri());
+    execFail("USE doo2");
+  }
+  
+  @Test 
+  public void testShowTablesFail() throws Exception {
+    whFs.setPermission(whPath, perm700);
+    exec("CREATE DATABASE doo");
+    exec("USE doo");
+    whFs.setPermission(getDbPath("doo"), perm300); //revoke read
+    execFail("SHOW TABLES");
+    execFail("SHOW TABLE EXTENDED LIKE foo1");
+  }
+  
+  @Test
+  public void testTableOps() throws Exception {
+    //default db
+    exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+    exec("DESCRIBE foo1");
+    exec("DROP TABLE foo1");
+    
+    //default db custom location
+    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+    whFs.mkdirs(tablePath, perm700);
+    exec("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+    exec("DESCRIBE foo2");
+    exec("DROP TABLE foo2");
+    
+    //default db custom non existing location
+    exec("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s/subpath'", tablePath);
+    exec("DESCRIBE foo3");
+    exec("DROP TABLE foo3");
+    
+    //non default db
+    exec("CREATE DATABASE doo");
+    exec("USE doo");
+    
+    exec("CREATE TABLE foo4 (foo INT) STORED AS RCFILE");
+    exec("DESCRIBE foo4");
+    exec("DROP TABLE foo4");
+    
+    //non-default db custom location
+    tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+    whFs.mkdirs(tablePath, perm700);
+    exec("CREATE EXTERNAL TABLE foo5 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+    exec("DESCRIBE foo5");
+    exec("DROP TABLE foo5");
+    
+    //non-default db custom non existing location
+    exec("CREATE EXTERNAL TABLE foo6 (foo INT) STORED AS RCFILE LOCATION '%s/subpath'", tablePath);
+    exec("DESCRIBE foo6");
+    exec("DROP TABLE foo6");
+    
+    exec("DROP TABLE IF EXISTS foo_non_exists");
+    
+    exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+    exec("DESCRIBE EXTENDED foo1");
+    exec("DESCRIBE FORMATTED foo1");
+    exec("DESCRIBE foo1.foo");
+    
+    //deep non-existing path for the table
+    tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+    whFs.mkdirs(tablePath, perm700);
+    exec("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s/a/a/a/'", tablePath);
+  }
+  
+  @Test
+  public void testCreateTableFail1() throws Exception {
+    //default db
+    whFs.mkdirs(whPath, perm500); //revoke w
+    execFail("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+  }
+  
+  @Test
+  public void testCreateTableFail2() throws Exception {
+    //default db custom location
+    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+    whFs.mkdirs(tablePath, perm500);
+    execFail("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+    
+    //default db custom non existing location
+    execFail("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s/subpath'", tablePath);
+  }
+  
+  @Test
+  public void testCreateTableFail3() throws Exception {
+    //non default db
+    exec("CREATE DATABASE doo");
+    whFs.setPermission(getDbPath("doo"), perm500);
+
+    execFail("CREATE TABLE doo.foo4 (foo INT) STORED AS RCFILE");
+    
+    //non-default db custom location, permission to write to tablePath, but not on db path
+    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+    whFs.mkdirs(tablePath, perm700);
+    exec("USE doo");
+    execFail("CREATE EXTERNAL TABLE foo5 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+  }
+
+  @Test
+  public void testCreateTableFail4() throws Exception {
+    //non default db
+    exec("CREATE DATABASE doo");
+
+    //non-default db custom location
+    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+    whFs.mkdirs(tablePath, perm500);
+    execFail("CREATE EXTERNAL TABLE doo.foo5 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+    
+    //non-default db custom non existing location
+    execFail("CREATE EXTERNAL TABLE doo.foo6 (foo INT) STORED AS RCFILE LOCATION '%s/a/a/a/'", tablePath);
+  }
+  
+  @Test
+  public void testDropTableFail1() throws Exception {
+    //default db
+    exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+    whFs.mkdirs(getTablePath("default", "foo1"), perm500); //revoke w
+    execFail("DROP TABLE foo1");
+  }
+  
+  @Test
+  public void testDropTableFail2() throws Exception {
+    //default db custom location
+    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+    exec("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+    whFs.mkdirs(tablePath, perm500);
+    execFail("DROP TABLE foo2");
+  }
+
+  @Test
+  public void testDropTableFail4() throws Exception {
+    //non default db
+    exec("CREATE DATABASE doo");
+
+    //non-default db custom location
+    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+    
+    exec("CREATE EXTERNAL TABLE doo.foo5 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+    whFs.mkdirs(tablePath, perm500);
+    exec("USE doo"); //There is no DROP TABLE doo.foo5 support in Hive
+    execFail("DROP TABLE foo5");
+  }
+  
+  @Test
+  public void testDescTableFail() throws Exception {
+    //default db
+    exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+    whFs.mkdirs(getTablePath("default", "foo1"), perm300); //revoke read
+    execFail("DESCRIBE foo1");
+    
+    //default db custom location
+    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+    whFs.mkdirs(tablePath, perm700);
+    exec("CREATE EXTERNAL TABLE foo2 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+    whFs.mkdirs(tablePath, perm300); //revoke read
+    execFail("DESCRIBE foo2");
+  }
+  
+  @Test
+  public void testAlterTableRename() throws Exception {
+    exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+    exec("ALTER TABLE foo1 RENAME TO foo2");
+    
+    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+    exec("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+    exec("ALTER TABLE foo3 RENAME TO foo4");
+  }
+  
+  @Test
+  public void testAlterTableRenameFail() throws Exception {
+    exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+    whFs.mkdirs(getTablePath("default", "foo1"), perm500); //revoke write
+    execFail("ALTER TABLE foo1 RENAME TO foo2");
+    
+    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+    exec("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s'", tablePath);
+    whFs.mkdirs(tablePath, perm500); //revoke write 
+    execFail("ALTER TABLE foo3 RENAME TO foo4");
+  }
+  
+  @Test
+  public void testAlterTableRelocate() throws Exception {
+    exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+    exec("ALTER TABLE foo1 SET LOCATION '%s'", tablePath.makeQualified(whFs));
+    
+    tablePath = new Path(whPath, new Random().nextInt() + "/mytable2");
+    exec("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s'", 
+        tablePath.makeQualified(whFs));
+    tablePath = new Path(whPath, new Random().nextInt() + "/mytable2");
+    exec("ALTER TABLE foo3 SET LOCATION '%s'", tablePath.makeQualified(whFs));
+  }
+  
+  @Test
+  public void testAlterTableRelocateFail() throws Exception {
+    exec("CREATE TABLE foo1 (foo INT) STORED AS RCFILE");
+    Path tablePath = new Path(whPath, new Random().nextInt() + "/mytable");
+    whFs.mkdirs(tablePath, perm500); //revoke write
+    execFail("ALTER TABLE foo1 SET LOCATION '%s'", tablePath.makeQualified(whFs));
+    
+    //dont have access to new table loc
+    tablePath = new Path(whPath, new Random().nextInt() + "/mytable2");
+    exec("CREATE EXTERNAL TABLE foo3 (foo INT) STORED AS RCFILE LOCATION '%s'", 
+        tablePath.makeQualified(whFs));
+    tablePath = new Path(whPath, new Random().nextInt() + "/mytable2");
+    whFs.mkdirs(tablePath, perm500); //revoke write
+    execFail("ALTER TABLE foo3 SET LOCATION '%s'", tablePath.makeQualified(whFs));
+    
+    //have access to new table loc, but not old table loc 
+    tablePath = new Path(whPath, new Random().nextInt() + "/mytable3");
+    exec("CREATE EXTERNAL TABLE foo4 (foo INT) STORED AS RCFILE LOCATION '%s'", 
+        tablePath.makeQualified(whFs));
+    whFs.mkdirs(tablePath, perm500); //revoke write
+    tablePath = new Path(whPath, new Random().nextInt() + "/mytable3");
+    execFail("ALTER TABLE foo4 SET LOCATION '%s'", tablePath.makeQualified(whFs));
+  }
+  
+  @Test
+  public void testAlterTable() throws Exception {
+    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
+    exec("ALTER TABLE foo1 SET TBLPROPERTIES ('foo'='bar')");
+    exec("ALTER TABLE foo1 SET SERDEPROPERTIES ('foo'='bar')");
+    exec("ALTER TABLE foo1 ADD COLUMNS (foo2 INT)");
+  }
+  
+  @Test
+  public void testAddDropPartition() throws Exception {
+    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
+    exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10')");
+    exec("ALTER TABLE foo1 ADD IF NOT EXISTS PARTITION (b='2010-10-10')");
+    String relPath = new Random().nextInt() + "/mypart";
+    exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-11') LOCATION '%s'", relPath);
+    
+    exec("ALTER TABLE foo1 PARTITION (b='2010-10-10') SET FILEFORMAT RCFILE");
+    
+    exec("ALTER TABLE foo1 PARTITION (b='2010-10-10') SET FILEFORMAT INPUTFORMAT "
+        + "'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT "
+        + "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver "
+        + "'mydriver' outputdriver 'yourdriver'");    
+    
+    exec("ALTER TABLE foo1 DROP PARTITION (b='2010-10-10')");
+    exec("ALTER TABLE foo1 DROP PARTITION (b='2010-10-11')");
+  }
+  
+  @Test
+  public void testAddPartitionFail1() throws Exception {
+    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
+    whFs.mkdirs(getTablePath("default", "foo1"), perm500);
+    execFail("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10')");
+  }
+  
+  @Test
+  public void testAddPartitionFail2() throws Exception {
+    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
+    String relPath = new Random().nextInt() + "/mypart";
+    Path partPath = new Path(getTablePath("default", "foo1"), relPath);
+    whFs.mkdirs(partPath, perm500);
+    execFail("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10') LOCATION '%s'", partPath);
+  }
+  
+  @Test
+  public void testDropPartitionFail1() throws Exception {
+    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
+    exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10')");
+    whFs.mkdirs(getPartPath("b=2010-10-10", "default", "foo1"), perm500);
+    execFail("ALTER TABLE foo1 DROP PARTITION (b='2010-10-10')");
+  }
+
+  @Test
+  public void testDropPartitionFail2() throws Exception {
+    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS TEXTFILE");
+    String relPath = new Random().nextInt() + "/mypart";
+    Path partPath = new Path(getTablePath("default", "foo1"), relPath);
+    whFs.mkdirs(partPath, perm700);
+    exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-10') LOCATION '%s'", partPath);
+    whFs.mkdirs(partPath, perm500); //revoke write
+    execFail("ALTER TABLE foo1 DROP PARTITION (b='2010-10-10')");
+  }
+  
+  @Test
+  public void testAlterTableFail() throws Exception {
+    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (boo STRING) STORED AS TEXTFILE");
+    whFs.mkdirs(getTablePath("default", "foo1"), perm500); //revoke write
+    execFail("ALTER TABLE foo1 SET TBLPROPERTIES ('foo'='bar')");
+    execFail("ALTER TABLE foo1 SET SERDEPROPERTIES ('foo'='bar')");
+    execFail("ALTER TABLE foo1 ADD COLUMNS (foo2 INT)");
+  }
+  
+  @Test
+  public void testShowTables() throws Exception {
+    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (boo STRING) STORED AS TEXTFILE");
+    exec("SHOW PARTITIONS foo1");
+    
+    whFs.mkdirs(getTablePath("default", "foo1"), perm300); //revoke read
+    execFail("SHOW PARTITIONS foo1");
+  }
+  
+  @Test
+  public void testAlterTablePartRename() throws Exception {
+    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS RCFILE");
+    Path loc = new Path(whPath, new Random().nextInt() + "/mypart");
+    exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-16') LOCATION '%s'", loc);
+    exec("ALTER TABLE foo1 PARTITION (b='2010-10-16') RENAME TO PARTITION (b='2010-10-17')");
+  }
+  
+  @Test
+  public void testAlterTablePartRenameFail() throws Exception {
+    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS RCFILE");
+    Path loc = new Path(whPath, new Random().nextInt() + "/mypart");
+    exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-16') LOCATION '%s'", loc);
+    whFs.setPermission(loc, perm500); //revoke w
+    execFail("ALTER TABLE foo1 PARTITION (b='2010-10-16') RENAME TO PARTITION (b='2010-10-17')");
+  }
+  
+  @Test
+  public void testAlterTablePartRelocate() throws Exception {
+    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS RCFILE");
+    exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-16')");
+    Path partPath = new Path(whPath, new Random().nextInt() + "/mypart");
+    exec("ALTER TABLE foo1 PARTITION (b='2010-10-16') SET LOCATION '%s'", partPath.makeQualified(whFs));
+  }
+
+  @Test
+  public void testAlterTablePartRelocateFail() throws Exception {
+    exec("CREATE TABLE foo1 (foo INT) PARTITIONED BY (b STRING) STORED AS RCFILE");
+    
+    Path oldLoc = new Path(whPath, new Random().nextInt() + "/mypart");
+    Path newLoc = new Path(whPath, new Random().nextInt() + "/mypart2");
+    
+    exec("ALTER TABLE foo1 ADD PARTITION (b='2010-10-16') LOCATION '%s'", oldLoc);
+    whFs.mkdirs(oldLoc, perm500);
+    execFail("ALTER TABLE foo1 PARTITION (b='2010-10-16') SET LOCATION '%s'", newLoc.makeQualified(whFs));
+    whFs.mkdirs(oldLoc, perm700);
+    whFs.mkdirs(newLoc, perm500);
+    execFail("ALTER TABLE foo1 PARTITION (b='2010-10-16') SET LOCATION '%s'", newLoc.makeQualified(whFs));
+  }
+  
+}



Mime
View raw message