hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From khorg...@apache.org
Subject svn commit: r1520466 [8/18] - in /hive/trunk/hcatalog: core/src/main/java/org/apache/hcatalog/cli/ core/src/main/java/org/apache/hcatalog/cli/SemanticAnalysis/ core/src/main/java/org/apache/hcatalog/common/ core/src/main/java/org/apache/hcatalog/data/ ...
Date Fri, 06 Sep 2013 00:49:17 GMT
Added: hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/mapreduce/StorerInfo.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/mapreduce/StorerInfo.java?rev=1520466&view=auto
==============================================================================
--- hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/mapreduce/StorerInfo.java (added)
+++ hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/mapreduce/StorerInfo.java Fri Sep  6 00:49:14 2013
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hcatalog.mapreduce;
+
+import java.io.Serializable;
+import java.util.Properties;
+
+/** Information about the storer to use for writing the data. */
+public class StorerInfo implements Serializable {
+
+    /** The serialization version */
+    private static final long serialVersionUID = 1L;
+
+    /** The properties for the storage handler */
+    private Properties properties;
+
+    private String ofClass;
+
+    private String ifClass;
+
+    private String serdeClass;
+
+    private String storageHandlerClass;
+
+    /**
+     * Initialize the storer information.
+     * @param ifClass the input format class
+     * @param ofClass the output format class
+     * @param serdeClass the SerDe class
+     * @param storageHandlerClass the storage handler class
+     * @param properties the properties for the storage handler
+     */
+    public StorerInfo(String ifClass, String ofClass, String serdeClass, String storageHandlerClass, Properties properties) {
+        super();
+        this.ifClass = ifClass;
+        this.ofClass = ofClass;
+        this.serdeClass = serdeClass;
+        this.storageHandlerClass = storageHandlerClass;
+        this.properties = properties;
+    }
+
+    /**
+     * @return the input format class
+     */
+    public String getIfClass() {
+        return ifClass;
+    }
+
+    /**
+     * @param ifClass the input format class
+     */
+    public void setIfClass(String ifClass) {
+        this.ifClass = ifClass;
+    }
+
+    /**
+     * @return the output format class
+     */
+    public String getOfClass() {
+        return ofClass;
+    }
+
+    /**
+     * @return the serdeClass
+     */
+    public String getSerdeClass() {
+        return serdeClass;
+    }
+
+    /**
+     * @return the storageHandlerClass
+     */
+    public String getStorageHandlerClass() {
+        return storageHandlerClass;
+    }
+
+    /**
+     * @return the storer properties
+     */
+    public Properties getProperties() {
+        return properties;
+    }
+
+    /**
+     * @param properties the storer properties to set 
+     */
+    public void setProperties(Properties properties) {
+        this.properties = properties;
+    }
+
+
+}

Added: hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/oozie/JavaAction.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/oozie/JavaAction.java?rev=1520466&view=auto
==============================================================================
--- hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/oozie/JavaAction.java (added)
+++ hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/oozie/JavaAction.java Fri Sep  6 00:49:14 2013
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hcatalog.oozie;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.cli.CliDriver;
+import org.apache.hadoop.hive.cli.CliSessionState;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
+
+public class JavaAction {
+
+    public static void main(String[] args) throws Exception {
+
+        HiveConf conf = new HiveConf();
+        conf.addResource(new Path("file:///", System.getProperty("oozie.action.conf.xml")));
+        conf.setVar(ConfVars.SEMANTIC_ANALYZER_HOOK, HCatSemanticAnalyzer.class.getName());
+        conf.setBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL, true);
+        SessionState.start(new CliSessionState(conf));
+        new CliDriver().processLine(args[0]);
+    }
+
+}

Added: hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/rcfile/RCFileMapReduceInputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/rcfile/RCFileMapReduceInputFormat.java?rev=1520466&view=auto
==============================================================================
--- hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/rcfile/RCFileMapReduceInputFormat.java (added)
+++ hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/rcfile/RCFileMapReduceInputFormat.java Fri Sep  6 00:49:14 2013
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hcatalog.rcfile;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+
+public class RCFileMapReduceInputFormat<K extends LongWritable, V extends BytesRefArrayWritable>
+    extends FileInputFormat<LongWritable, BytesRefArrayWritable> {
+
+    @Override
+    public RecordReader<LongWritable, BytesRefArrayWritable> createRecordReader(InputSplit split,
+                                                                                TaskAttemptContext context) throws IOException, InterruptedException {
+
+        context.setStatus(split.toString());
+        return new RCFileMapReduceRecordReader<LongWritable, BytesRefArrayWritable>();
+    }
+
+    @Override
+    public List<InputSplit> getSplits(JobContext job) throws IOException {
+
+        job.getConfiguration().setLong("mapred.min.split.size", SequenceFile.SYNC_INTERVAL);
+        return super.getSplits(job);
+    }
+}

Added: hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/rcfile/RCFileMapReduceOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/rcfile/RCFileMapReduceOutputFormat.java?rev=1520466&view=auto
==============================================================================
--- hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/rcfile/RCFileMapReduceOutputFormat.java (added)
+++ hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/rcfile/RCFileMapReduceOutputFormat.java Fri Sep  6 00:49:14 2013
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hcatalog.rcfile;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.io.RCFile;
+import org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.DefaultCodec;
+import org.apache.hadoop.mapreduce.RecordWriter;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.util.ReflectionUtils;
+
+/**
+ * The RC file input format using new Hadoop mapreduce APIs.
+ */
+public class RCFileMapReduceOutputFormat extends
+    FileOutputFormat<WritableComparable<?>, BytesRefArrayWritable> {
+
+    /**
+     * Set number of columns into the given configuration.
+     * @param conf
+     *          configuration instance which need to set the column number
+     * @param columnNum
+     *          column number for RCFile's Writer
+     *
+     */
+    public static void setColumnNumber(Configuration conf, int columnNum) {
+        assert columnNum > 0;
+        conf.setInt(RCFile.COLUMN_NUMBER_CONF_STR, columnNum);
+    }
+
+    /* (non-Javadoc)
+    * @see org.apache.hadoop.mapreduce.lib.output.FileOutputFormat#getRecordWriter(org.apache.hadoop.mapreduce.TaskAttemptContext)
+    */
+    @Override
+    public org.apache.hadoop.mapreduce.RecordWriter<WritableComparable<?>, BytesRefArrayWritable> getRecordWriter(
+        TaskAttemptContext task) throws IOException, InterruptedException {
+
+        //FileOutputFormat.getWorkOutputPath takes TaskInputOutputContext instead of
+        //TaskAttemptContext, so can't use that here
+        FileOutputCommitter committer = (FileOutputCommitter) getOutputCommitter(task);
+        Path outputPath = committer.getWorkPath();
+
+        FileSystem fs = outputPath.getFileSystem(task.getConfiguration());
+
+        if (!fs.exists(outputPath)) {
+            fs.mkdirs(outputPath);
+        }
+
+        Path file = getDefaultWorkFile(task, "");
+
+        CompressionCodec codec = null;
+        if (getCompressOutput(task)) {
+            Class<?> codecClass = getOutputCompressorClass(task, DefaultCodec.class);
+            codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, task.getConfiguration());
+        }
+
+        final RCFile.Writer out = new RCFile.Writer(fs, task.getConfiguration(), file, task, codec);
+
+        return new RecordWriter<WritableComparable<?>, BytesRefArrayWritable>() {
+
+            /* (non-Javadoc)
+            * @see org.apache.hadoop.mapreduce.RecordWriter#write(java.lang.Object, java.lang.Object)
+            */
+            @Override
+            public void write(WritableComparable<?> key, BytesRefArrayWritable value)
+                throws IOException {
+                out.append(value);
+            }
+
+            /* (non-Javadoc)
+            * @see org.apache.hadoop.mapreduce.RecordWriter#close(org.apache.hadoop.mapreduce.TaskAttemptContext)
+            */
+            @Override
+            public void close(TaskAttemptContext task) throws IOException, InterruptedException {
+                out.close();
+            }
+        };
+    }
+
+}

Added: hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/rcfile/RCFileMapReduceRecordReader.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/rcfile/RCFileMapReduceRecordReader.java?rev=1520466&view=auto
==============================================================================
--- hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/rcfile/RCFileMapReduceRecordReader.java (added)
+++ hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/rcfile/RCFileMapReduceRecordReader.java Fri Sep  6 00:49:14 2013
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hcatalog.rcfile;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.io.RCFile;
+import org.apache.hadoop.hive.ql.io.RCFile.Reader;
+import org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.FileSplit;
+
+public class RCFileMapReduceRecordReader<K extends LongWritable, V extends BytesRefArrayWritable>
+    extends RecordReader<LongWritable, BytesRefArrayWritable> {
+
+    private Reader in;
+    private long start;
+    private long end;
+    private boolean more = true;
+
+    // key and value objects are created once in initialize() and then reused
+    // for every getCurrentKey() and getCurrentValue() call. This is important
+    // since RCFile makes an assumption of this fact.
+
+    private LongWritable key;
+    private BytesRefArrayWritable value;
+
+    @Override
+    public void close() throws IOException {
+        in.close();
+    }
+
+    @Override
+    public LongWritable getCurrentKey() throws IOException, InterruptedException {
+        return key;
+    }
+
+    @Override
+    public BytesRefArrayWritable getCurrentValue() throws IOException, InterruptedException {
+        return value;
+    }
+
+    @Override
+    public float getProgress() throws IOException, InterruptedException {
+        if (end == start) {
+            return 0.0f;
+        } else {
+            return Math.min(1.0f, (in.getPosition() - start) / (float) (end - start));
+        }
+    }
+
+    @Override
+    public boolean nextKeyValue() throws IOException, InterruptedException {
+
+        more = next(key);
+        if (more) {
+            in.getCurrentRow(value);
+        }
+
+        return more;
+    }
+
+    private boolean next(LongWritable key) throws IOException {
+        if (!more) {
+            return false;
+        }
+
+        more = in.next(key);
+        if (!more) {
+            return false;
+        }
+
+        if (in.lastSeenSyncPos() >= end) {
+            more = false;
+            return more;
+        }
+        return more;
+    }
+
+    @Override
+    public void initialize(InputSplit split, TaskAttemptContext context) throws IOException,
+        InterruptedException {
+
+        FileSplit fSplit = (FileSplit) split;
+        Path path = fSplit.getPath();
+        Configuration conf = context.getConfiguration();
+        this.in = new RCFile.Reader(path.getFileSystem(conf), path, conf);
+        this.end = fSplit.getStart() + fSplit.getLength();
+
+        if (fSplit.getStart() > in.getPosition()) {
+            in.sync(fSplit.getStart());
+        }
+
+        this.start = in.getPosition();
+        more = start < end;
+
+        key = new LongWritable();
+        value = new BytesRefArrayWritable();
+    }
+}

Added: hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java?rev=1520466&view=auto
==============================================================================
--- hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java (added)
+++ hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java Fri Sep  6 00:49:14 2013
@@ -0,0 +1,337 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hcatalog.security;
+
+import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.List;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.commons.lang.ArrayUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.metadata.AuthorizationException;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider;
+import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProviderBase;
+import org.apache.hadoop.hive.ql.security.authorization.Privilege;
+import org.apache.hadoop.hive.shims.HadoopShims;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+
+/**
+ * An AuthorizationProvider, which checks against the data access level permissions on HDFS.
+ * It makes sense to eventually move this class to Hive, so that all hive users can
+ * use this authorization model. 
+ */
+public class HdfsAuthorizationProvider extends HiveAuthorizationProviderBase {
+
+    protected Warehouse wh;
+
+    //Config variables : create an enum to store them if we have more
+    private static final String PROXY_USER_NAME = "proxy.user.name";
+
+    public HdfsAuthorizationProvider() {
+        super();
+    }
+
+    public HdfsAuthorizationProvider(Configuration conf) {
+        super();
+        setConf(conf);
+    }
+
+    @Override
+    public void init(Configuration conf) throws HiveException {
+        hive_db = new HiveProxy(Hive.get(new HiveConf(conf, HiveAuthorizationProvider.class)));
+    }
+
+    @Override
+    public void setConf(Configuration conf) {
+        super.setConf(conf);
+        try {
+            this.wh = new Warehouse(conf);
+        } catch (MetaException ex) {
+            throw new RuntimeException(ex);
+        }
+    }
+
+    protected FsAction getFsAction(Privilege priv, Path path) {
+
+        switch (priv.getPriv()) {
+        case ALL:
+            throw new AuthorizationException("no matching Action for Privilege.All");
+        case ALTER_DATA:
+            return FsAction.WRITE;
+        case ALTER_METADATA:
+            return FsAction.WRITE;
+        case CREATE:
+            return FsAction.WRITE;
+        case DROP:
+            return FsAction.WRITE;
+        case INDEX:
+            return FsAction.WRITE;
+        case LOCK:
+            return FsAction.WRITE;
+        case SELECT:
+            return FsAction.READ;
+        case SHOW_DATABASE:
+            return FsAction.READ;
+        case UNKNOWN:
+        default:
+            throw new AuthorizationException("Unknown privilege");
+        }
+    }
+
+    protected EnumSet<FsAction> getFsActions(Privilege[] privs, Path path) {
+        EnumSet<FsAction> actions = EnumSet.noneOf(FsAction.class);
+
+        if (privs == null) {
+            return actions;
+        }
+
+        for (Privilege priv : privs) {
+            actions.add(getFsAction(priv, path));
+        }
+
+        return actions;
+    }
+
+    private static final String DATABASE_WAREHOUSE_SUFFIX = ".db";
+
+    private Path getDefaultDatabasePath(String dbName) throws MetaException {
+        if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
+            return wh.getWhRoot();
+        }
+        return new Path(wh.getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX);
+    }
+
+    protected Path getDbLocation(Database db) throws HiveException {
+        try {
+            String location = db.getLocationUri();
+            if (location == null) {
+                return getDefaultDatabasePath(db.getName());
+            } else {
+                return wh.getDnsPath(wh.getDatabasePath(db));
+            }
+        } catch (MetaException ex) {
+            throw new HiveException(ex.getMessage());
+        }
+    }
+
+    @Override
+    public void authorize(Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        //Authorize for global level permissions at the warehouse dir
+        Path root;
+        try {
+            root = wh.getWhRoot();
+            authorize(root, readRequiredPriv, writeRequiredPriv);
+        } catch (MetaException ex) {
+            throw new HiveException(ex);
+        }
+    }
+
+    @Override
+    public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        if (db == null) {
+            return;
+        }
+
+        Path path = getDbLocation(db);
+
+        authorize(path, readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Table table, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        if (table == null) {
+            return;
+        }
+
+        //unlike Hive's model, this can be called at CREATE TABLE as well, since we should authorize
+        //against the table's declared location
+        Path path = null;
+        try {
+            if (table.getTTable().getSd().getLocation() == null
+                || table.getTTable().getSd().getLocation().isEmpty()) {
+                path = wh.getTablePath(hive_db.getDatabase(table.getDbName()), table.getTableName());
+            } else {
+                path = table.getPath();
+            }
+        } catch (MetaException ex) {
+            throw new HiveException(ex);
+        }
+
+        authorize(path, readRequiredPriv, writeRequiredPriv);
+    }
+
+    //TODO: HiveAuthorizationProvider should expose this interface instead of #authorize(Partition, Privilege[], Privilege[])
+    public void authorize(Table table, Partition part, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+
+        if (part == null || part.getLocation() == null) {
+            authorize(table, readRequiredPriv, writeRequiredPriv);
+        } else {
+            authorize(part.getPartitionPath(), readRequiredPriv, writeRequiredPriv);
+        }
+    }
+
+    @Override
+    public void authorize(Partition part, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        if (part == null) {
+            return;
+        }
+        authorize(part.getTable(), part, readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Table table, Partition part, List<String> columns,
+                          Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
+        //columns cannot live in different files, just check for partition level permissions
+        authorize(table, part, readRequiredPriv, writeRequiredPriv);
+    }
+
+    /**
+     * Authorization privileges against a path.
+     * @param path a filesystem path
+     * @param readRequiredPriv a list of privileges needed for inputs.
+     * @param writeRequiredPriv a list of privileges needed for outputs.
+     */
+    public void authorize(Path path, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        try {
+            EnumSet<FsAction> actions = getFsActions(readRequiredPriv, path);
+            actions.addAll(getFsActions(writeRequiredPriv, path));
+            if (actions.isEmpty()) {
+                return;
+            }
+
+            checkPermissions(getConf(), path, actions);
+
+        } catch (AccessControlException ex) {
+            throw new AuthorizationException(ex);
+        } catch (LoginException ex) {
+            throw new AuthorizationException(ex);
+        } catch (IOException ex) {
+            throw new HiveException(ex);
+        }
+    }
+
+    /**
+     * Checks the permissions for the given path and current user on Hadoop FS. If the given path
+     * does not exists, it checks for it's parent folder.
+     */
+    protected static void checkPermissions(final Configuration conf, final Path path,
+                                           final EnumSet<FsAction> actions) throws IOException, LoginException {
+
+        if (path == null) {
+            throw new IllegalArgumentException("path is null");
+        }
+
+        HadoopShims shims = ShimLoader.getHadoopShims();
+        final UserGroupInformation ugi;
+        if (conf.get(PROXY_USER_NAME) != null) {
+            ugi = UserGroupInformation.createRemoteUser(conf.get(PROXY_USER_NAME));
+        } else {
+            ugi = shims.getUGIForConf(conf);
+        }
+        final String user = shims.getShortUserName(ugi);
+
+        final FileSystem fs = path.getFileSystem(conf);
+
+        if (fs.exists(path)) {
+            checkPermissions(fs, path, actions, user, ugi.getGroupNames());
+        } else if (path.getParent() != null) {
+            // find the ancestor which exists to check it's permissions
+            Path par = path.getParent();
+            while (par != null) {
+                if (fs.exists(par)) {
+                    break;
+                }
+                par = par.getParent();
+            }
+
+            checkPermissions(fs, par, actions, user, ugi.getGroupNames());
+        }
+    }
+
+    /**
+     * Checks the permissions for the given path and current user on Hadoop FS. If the given path
+     * does not exists, it returns.
+     */
+    @SuppressWarnings("deprecation")
+    protected static void checkPermissions(final FileSystem fs, final Path path,
+                                           final EnumSet<FsAction> actions, String user, String[] groups) throws IOException,
+        AccessControlException {
+
+        final FileStatus stat;
+
+        try {
+            stat = fs.getFileStatus(path);
+        } catch (FileNotFoundException fnfe) {
+            // File named by path doesn't exist; nothing to validate.
+            return;
+        } catch (org.apache.hadoop.fs.permission.AccessControlException ace) {
+            // Older hadoop version will throw this @deprecated Exception.
+            throw new AccessControlException(ace.getMessage());
+        }
+
+        final FsPermission dirPerms = stat.getPermission();
+        final String grp = stat.getGroup();
+
+        for (FsAction action : actions) {
+            if (user.equals(stat.getOwner())) {
+                if (dirPerms.getUserAction().implies(action)) {
+                    continue;
+                }
+            }
+            if (ArrayUtils.contains(groups, grp)) {
+                if (dirPerms.getGroupAction().implies(action)) {
+                    continue;
+                }
+            }
+            if (dirPerms.getOtherAction().implies(action)) {
+                continue;
+            }
+            throw new AccessControlException("action " + action + " not permitted on path "
+                + path + " for user " + user);
+        }
+    }
+}

Added: hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/security/StorageDelegationAuthorizationProvider.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/security/StorageDelegationAuthorizationProvider.java?rev=1520466&view=auto
==============================================================================
--- hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/security/StorageDelegationAuthorizationProvider.java (added)
+++ hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/security/StorageDelegationAuthorizationProvider.java Fri Sep  6 00:49:14 2013
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hcatalog.security;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.ql.metadata.AuthorizationException;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider;
+import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider;
+import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProviderBase;
+import org.apache.hadoop.hive.ql.security.authorization.Privilege;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hcatalog.mapreduce.HCatStorageHandler;
+
+/**
+ * A HiveAuthorizationProvider which delegates the authorization requests to 
+ * the underlying AuthorizationProviders obtained from the StorageHandler.
+ */
+public class StorageDelegationAuthorizationProvider extends HiveAuthorizationProviderBase {
+
+    protected HiveAuthorizationProvider hdfsAuthorizer = new HdfsAuthorizationProvider();
+
+    protected static Map<String, String> authProviders = new HashMap<String, String>();
+
+    @Override
+    public void setConf(Configuration conf) {
+        super.setConf(conf);
+        hdfsAuthorizer.setConf(conf);
+    }
+
+    @Override
+    public void init(Configuration conf) throws HiveException {
+        hive_db = new HiveProxy(Hive.get(new HiveConf(conf, HiveAuthorizationProvider.class)));
+    }
+
+    @Override
+    public void setAuthenticator(HiveAuthenticationProvider authenticator) {
+        super.setAuthenticator(authenticator);
+        hdfsAuthorizer.setAuthenticator(authenticator);
+    }
+
+    static {
+        registerAuthProvider("org.apache.hadoop.hive.hbase.HBaseStorageHandler",
+            "org.apache.hcatalog.hbase.HBaseAuthorizationProvider");
+        registerAuthProvider("org.apache.hcatalog.hbase.HBaseHCatStorageHandler",
+            "org.apache.hcatalog.hbase.HBaseAuthorizationProvider");
+    }
+
+    //workaround until Hive adds StorageHandler.getAuthorizationProvider(). Remove these parts afterwards
+    public static void registerAuthProvider(String storageHandlerClass,
+                                            String authProviderClass) {
+        authProviders.put(storageHandlerClass, authProviderClass);
+    }
+
+    /** Returns the StorageHandler of the Table obtained from the HCatStorageHandler */
+    protected HiveAuthorizationProvider getDelegate(Table table) throws HiveException {
+        HiveStorageHandler handler = table.getStorageHandler();
+
+        if (handler != null) {
+            if (handler instanceof HCatStorageHandler) {
+                return ((HCatStorageHandler) handler).getAuthorizationProvider();
+            } else {
+                String authProviderClass = authProviders.get(handler.getClass().getCanonicalName());
+
+                if (authProviderClass != null) {
+                    try {
+                        ReflectionUtils.newInstance(getConf().getClassByName(authProviderClass), getConf());
+                    } catch (ClassNotFoundException ex) {
+                        throw new HiveException("Cannot instantiate delegation AuthotizationProvider");
+                    }
+                }
+
+                //else we do not have anything to delegate to
+                throw new HiveException(String.format("Storage Handler for table:%s is not an instance " +
+                    "of HCatStorageHandler", table.getTableName()));
+            }
+        } else {
+            //return an authorizer for HDFS
+            return hdfsAuthorizer;
+        }
+    }
+
+    @Override
+    public void authorize(Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        //global authorizations against warehouse hdfs directory
+        hdfsAuthorizer.authorize(readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        //db's are tied to a hdfs location
+        hdfsAuthorizer.authorize(db, readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Table table, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        getDelegate(table).authorize(table, readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Partition part, Privilege[] readRequiredPriv,
+                          Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException {
+        getDelegate(part.getTable()).authorize(part, readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Table table, Partition part, List<String> columns,
+                          Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
+        getDelegate(table).authorize(table, part, columns, readRequiredPriv, writeRequiredPriv);
+    }
+}

Added: hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/shims/HCatHadoopShims.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/shims/HCatHadoopShims.java?rev=1520466&view=auto
==============================================================================
--- hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/shims/HCatHadoopShims.java (added)
+++ hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/shims/HCatHadoopShims.java Fri Sep  6 00:49:14 2013
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hcatalog.shims;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.OutputFormat;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.TaskID;
+import org.apache.hadoop.util.Progressable;
+
+/**
+ * Shim layer to abstract differences between Hadoop 0.20 and 0.23
+ * (HCATALOG-179). This mirrors Hive shims, but is kept separate for HCatalog
+ * dependencies.
+ **/
+public interface HCatHadoopShims {
+
+    enum PropertyName {CACHE_ARCHIVES, CACHE_FILES, CACHE_SYMLINK}
+
+    ;
+
+    public static abstract class Instance {
+        static HCatHadoopShims instance = selectShim();
+
+        public static HCatHadoopShims get() {
+            return instance;
+        }
+
+        private static HCatHadoopShims selectShim() {
+            // piggyback on Hive's detection logic
+            String major = ShimLoader.getMajorVersion();
+            String shimFQN = "org.apache.hcatalog.shims.HCatHadoopShims20S";
+            if (major.startsWith("0.23")) {
+                shimFQN = "org.apache.hcatalog.shims.HCatHadoopShims23";
+            }
+            try {
+                Class<? extends HCatHadoopShims> clasz = Class.forName(shimFQN)
+                    .asSubclass(HCatHadoopShims.class);
+                return clasz.newInstance();
+            } catch (Exception e) {
+                throw new RuntimeException("Failed to instantiate: " + shimFQN, e);
+            }
+        }
+    }
+
+    public TaskID createTaskID();
+
+    public TaskAttemptID createTaskAttemptID();
+
+    public org.apache.hadoop.mapreduce.TaskAttemptContext createTaskAttemptContext(Configuration conf,
+                                                                                   TaskAttemptID taskId);
+
+    public org.apache.hadoop.mapred.TaskAttemptContext createTaskAttemptContext(JobConf conf,
+                                                                                org.apache.hadoop.mapred.TaskAttemptID taskId, Progressable progressable);
+
+    public JobContext createJobContext(Configuration conf, JobID jobId);
+
+    public org.apache.hadoop.mapred.JobContext createJobContext(JobConf conf, JobID jobId, Progressable progressable);
+
+    public void commitJob(OutputFormat outputFormat, Job job) throws IOException;
+
+    public void abortJob(OutputFormat outputFormat, Job job) throws IOException;
+
+    /* Referring to job tracker in 0.20 and resource manager in 0.23 */
+    public InetSocketAddress getResourceManagerAddress(Configuration conf);
+
+    public String getPropertyName(PropertyName name);
+
+    /**
+     * Checks if file is in HDFS filesystem.
+     *
+     * @param fs
+     * @param path
+     * @return true if the file is in HDFS, false if the file is in other file systems.
+     */
+    public boolean isFileInHDFS(FileSystem fs, Path path) throws IOException;
+
+}

Added: hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/storagehandler/DummyHCatAuthProvider.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/storagehandler/DummyHCatAuthProvider.java?rev=1520466&view=auto
==============================================================================
--- hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/storagehandler/DummyHCatAuthProvider.java (added)
+++ hive/trunk/hcatalog/core/src/main/java/org/apache/hcatalog/storagehandler/DummyHCatAuthProvider.java Fri Sep  6 00:49:14 2013
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hcatalog.storagehandler;
+
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.ql.metadata.AuthorizationException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider;
+import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider;
+import org.apache.hadoop.hive.ql.security.authorization.Privilege;
+
+/**
+ * This class is a dummy implementation of HiveAuthorizationProvider to provide
+ * dummy authorization functionality for other classes to extend and override.
+ */
+class DummyHCatAuthProvider implements HiveAuthorizationProvider {
+
+    @Override
+    public Configuration getConf() {
+        return null;
+    }
+
+    @Override
+    public void setConf(Configuration conf) {
+    }
+
+    /*
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #init(org.apache.hadoop.conf.Configuration)
+    */
+    @Override
+    public void init(Configuration conf) throws HiveException {
+    }
+
+    @Override
+    public HiveAuthenticationProvider getAuthenticator() {
+        return null;
+    }
+
+    @Override
+    public void setAuthenticator(HiveAuthenticationProvider authenticator) {
+    }
+
+    /*
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
+    @Override
+    public void authorize(Privilege[] readRequiredPriv,
+                          Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
+    }
+
+    /*
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.metastore.api.Database,
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
+    @Override
+    public void authorize(Database db, Privilege[] readRequiredPriv,
+                          Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
+    }
+
+    /*
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.ql.metadata.Table,
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
+    @Override
+    public void authorize(Table table, Privilege[] readRequiredPriv,
+                          Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
+    }
+
+    /*
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.ql.metadata.Partition,
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
+    @Override
+    public void authorize(Partition part, Privilege[] readRequiredPriv,
+                          Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
+    }
+
+    /*
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.ql.metadata.Table,
+    * org.apache.hadoop.hive.ql.metadata.Partition, java.util.List,
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
+    @Override
+    public void authorize(Table table, Partition part, List<String> columns,
+                          Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+    }
+
+}

Added: hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/ExitException.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/ExitException.java?rev=1520466&view=auto
==============================================================================
--- hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/ExitException.java (added)
+++ hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/ExitException.java Fri Sep  6 00:49:14 2013
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hcatalog;
+
+public class ExitException extends SecurityException {
+    private static final long serialVersionUID = -1982617086752946683L;
+    private final int status;
+
+    /**
+     * @return the status
+     */
+    public int getStatus() {
+        return status;
+    }
+
+    public ExitException(int status) {
+
+        super("Raising exception, instead of System.exit(). Return code was: " + status);
+        this.status = status;
+    }
+}

Added: hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/HcatTestUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/HcatTestUtils.java?rev=1520466&view=auto
==============================================================================
--- hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/HcatTestUtils.java (added)
+++ hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/HcatTestUtils.java Fri Sep  6 00:49:14 2013
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hcatalog;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Utility methods for tests
+ */
+public class HcatTestUtils {
+    private static final Logger LOG = LoggerFactory.getLogger(HcatTestUtils.class);
+
+    public static FsPermission perm007 = FsPermission.createImmutable((short) 0007); // -------rwx
+    public static FsPermission perm070 = FsPermission.createImmutable((short) 0070); // ----rwx---
+    public static FsPermission perm700 = FsPermission.createImmutable((short) 0700); // -rwx------
+    public static FsPermission perm755 = FsPermission.createImmutable((short) 0755); // -rwxr-xr-x
+    public static FsPermission perm777 = FsPermission.createImmutable((short) 0777); // -rwxrwxrwx
+    public static FsPermission perm300 = FsPermission.createImmutable((short) 0300); // --wx------
+    public static FsPermission perm500 = FsPermission.createImmutable((short) 0500); // -r-x------
+    public static FsPermission perm555 = FsPermission.createImmutable((short) 0555); // -r-xr-xr-x
+
+    /**
+     * Returns the database path.
+     */
+    public static Path getDbPath(Hive hive, Warehouse wh, String dbName) throws MetaException, HiveException {
+        return wh.getDatabasePath(hive.getDatabase(dbName));
+    }
+
+    /**
+     * Removes all databases and tables from the metastore
+     */
+    public static void cleanupHMS(Hive hive, Warehouse wh, FsPermission defaultPerm)
+        throws HiveException, MetaException, NoSuchObjectException {
+        for (String dbName : hive.getAllDatabases()) {
+            if (dbName.equals("default")) {
+                continue;
+            }
+            try {
+                Path path = getDbPath(hive, wh, dbName);
+                FileSystem whFs = path.getFileSystem(hive.getConf());
+                whFs.setPermission(path, defaultPerm);
+            } catch (IOException ex) {
+                //ignore
+            }
+            hive.dropDatabase(dbName, true, true, true);
+        }
+
+        //clean tables in default db
+        for (String tablename : hive.getAllTables("default")) {
+            hive.dropTable("default", tablename, true, true);
+        }
+    }
+
+    public static void createTestDataFile(String filename, String[] lines) throws IOException {
+        FileWriter writer = null;
+        try {
+            File file = new File(filename);
+            file.deleteOnExit();
+            writer = new FileWriter(file);
+            for (String line : lines) {
+                writer.write(line + "\n");
+            }
+        } finally {
+            if (writer != null) {
+                writer.close();
+            }
+        }
+
+    }
+}

Added: hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/MiniCluster.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/MiniCluster.java?rev=1520466&view=auto
==============================================================================
--- hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/MiniCluster.java (added)
+++ hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/MiniCluster.java Fri Sep  6 00:49:14 2013
@@ -0,0 +1,201 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hcatalog;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.MiniMRCluster;
+
+/**
+ * This class builds a single instance of itself with the Singleton
+ * design pattern. While building the single instance, it sets up a
+ * mini cluster that actually consists of a mini DFS cluster and a
+ * mini MapReduce cluster on the local machine and also sets up the
+ * environment for Pig to run on top of the mini cluster.
+ */
+public class MiniCluster {
+    private MiniDFSCluster m_dfs = null;
+    private MiniMRCluster m_mr = null;
+    private FileSystem m_fileSys = null;
+    private JobConf m_conf = null;
+
+    private final static MiniCluster INSTANCE = new MiniCluster();
+    private static boolean isSetup = true;
+
+    private MiniCluster() {
+        setupMiniDfsAndMrClusters();
+    }
+
+    private void setupMiniDfsAndMrClusters() {
+        try {
+            final int dataNodes = 1;     // There will be 4 data nodes
+            final int taskTrackers = 1;  // There will be 4 task tracker nodes
+            Configuration config = new Configuration();
+
+            // Builds and starts the mini dfs and mapreduce clusters
+            System.setProperty("hadoop.log.dir", ".");
+            m_dfs = new MiniDFSCluster(config, dataNodes, true, null);
+
+            m_fileSys = m_dfs.getFileSystem();
+            m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1);
+
+            // Create the configuration hadoop-site.xml file
+            File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/");
+            conf_dir.mkdirs();
+            File conf_file = new File(conf_dir, "hadoop-site.xml");
+
+            // Write the necessary config info to hadoop-site.xml
+            m_conf = m_mr.createJobConf();
+            m_conf.setInt("mapred.submit.replication", 1);
+            m_conf.set("dfs.datanode.address", "0.0.0.0:0");
+            m_conf.set("dfs.datanode.http.address", "0.0.0.0:0");
+            m_conf.writeXml(new FileOutputStream(conf_file));
+
+            // Set the system properties needed by Pig
+            System.setProperty("cluster", m_conf.get("mapred.job.tracker"));
+            System.setProperty("namenode", m_conf.get("fs.default.name"));
+            System.setProperty("junit.hadoop.conf", conf_dir.getPath());
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    /**
+     * Returns the single instance of class MiniClusterBuilder that
+     * represents the resouces for a mini dfs cluster and a mini
+     * mapreduce cluster.
+     */
+    public static MiniCluster buildCluster() {
+        if (!isSetup) {
+            INSTANCE.setupMiniDfsAndMrClusters();
+            isSetup = true;
+        }
+        return INSTANCE;
+    }
+
+    public void shutDown() {
+        INSTANCE.shutdownMiniDfsAndMrClusters();
+    }
+
+    @Override
+    protected void finalize() {
+        shutdownMiniDfsAndMrClusters();
+    }
+
+    private void shutdownMiniDfsAndMrClusters() {
+        isSetup = false;
+        try {
+            if (m_fileSys != null) {
+                m_fileSys.close();
+            }
+        } catch (IOException e) {
+            e.printStackTrace();
+        }
+        if (m_dfs != null) {
+            m_dfs.shutdown();
+        }
+        if (m_mr != null) {
+            m_mr.shutdown();
+        }
+        m_fileSys = null;
+        m_dfs = null;
+        m_mr = null;
+    }
+
+    public Properties getProperties() {
+        errorIfNotSetup();
+        Properties properties = new Properties();
+        assert m_conf != null;
+        Iterator<Map.Entry<String, String>> iter = m_conf.iterator();
+        while (iter.hasNext()) {
+            Map.Entry<String, String> entry = iter.next();
+            properties.put(entry.getKey(), entry.getValue());
+        }
+        return properties;
+    }
+
+    public void setProperty(String name, String value) {
+        errorIfNotSetup();
+        m_conf.set(name, value);
+    }
+
+    public FileSystem getFileSystem() {
+        errorIfNotSetup();
+        return m_fileSys;
+    }
+
+    /**
+     * Throw RunTimeException if isSetup is false
+     */
+    private void errorIfNotSetup() {
+        if (isSetup) {
+            return;
+        }
+        String msg = "function called on MiniCluster that has been shutdown";
+        throw new RuntimeException(msg);
+    }
+
+    static public void createInputFile(MiniCluster miniCluster, String fileName,
+                                       String[] inputData)
+        throws IOException {
+        FileSystem fs = miniCluster.getFileSystem();
+        createInputFile(fs, fileName, inputData);
+    }
+
+    static public void createInputFile(FileSystem fs, String fileName,
+                                       String[] inputData) throws IOException {
+        Path path = new Path(fileName);
+        if (fs.exists(path)) {
+            throw new IOException("File " + fileName + " already exists on the minicluster");
+        }
+        FSDataOutputStream stream = fs.create(path);
+        PrintWriter pw = new PrintWriter(new OutputStreamWriter(stream, "UTF-8"));
+        for (int i = 0; i < inputData.length; i++) {
+            pw.println(inputData[i]);
+        }
+        pw.close();
+
+    }
+
+    /**
+     * Helper to remove a dfs file from the minicluster DFS
+     *
+     * @param miniCluster reference to the Minicluster where the file should be deleted
+     * @param fileName pathname of the file to be deleted
+     * @throws IOException
+     */
+    static public void deleteFile(MiniCluster miniCluster, String fileName)
+        throws IOException {
+        FileSystem fs = miniCluster.getFileSystem();
+        fs.delete(new Path(fileName), true);
+    }
+}

Added: hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/NoExitSecurityManager.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/NoExitSecurityManager.java?rev=1520466&view=auto
==============================================================================
--- hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/NoExitSecurityManager.java (added)
+++ hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/NoExitSecurityManager.java Fri Sep  6 00:49:14 2013
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hcatalog;
+
+import java.security.Permission;
+
+public class NoExitSecurityManager extends SecurityManager {
+
+    @Override
+    public void checkPermission(Permission perm) {
+        // allow anything.
+    }
+
+    @Override
+    public void checkPermission(Permission perm, Object context) {
+        // allow anything.
+    }
+
+    @Override
+    public void checkExit(int status) {
+
+        super.checkExit(status);
+        throw new ExitException(status);
+    }
+}

Added: hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/cli/DummyStorageHandler.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/cli/DummyStorageHandler.java?rev=1520466&view=auto
==============================================================================
--- hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/cli/DummyStorageHandler.java (added)
+++ hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/cli/DummyStorageHandler.java Fri Sep  6 00:49:14 2013
@@ -0,0 +1,289 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hcatalog.cli;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.HiveMetaHook;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
+import org.apache.hadoop.hive.ql.metadata.AuthorizationException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
+import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider;
+import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider;
+import org.apache.hadoop.hive.ql.security.authorization.Privilege;
+import org.apache.hadoop.hive.serde2.SerDe;
+import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.OutputFormat;
+import org.apache.hadoop.mapred.RecordReader;
+import org.apache.hadoop.mapred.RecordWriter;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.util.Progressable;
+import org.apache.hcatalog.data.HCatRecord;
+import org.apache.hcatalog.mapreduce.HCatStorageHandler;
+
+class DummyStorageHandler extends HCatStorageHandler {
+
+    @Override
+    public Configuration getConf() {
+        return null;
+    }
+
+    @Override
+    public void setConf(Configuration conf) {
+    }
+
+    @Override
+    public Class<? extends InputFormat> getInputFormatClass() {
+        return DummyInputFormat.class;
+    }
+
+    @Override
+    public Class<? extends OutputFormat> getOutputFormatClass() {
+        return DummyOutputFormat.class;
+    }
+
+    @Override
+    public Class<? extends SerDe> getSerDeClass() {
+        return ColumnarSerDe.class;
+    }
+
+    @Override
+    public HiveMetaHook getMetaHook() {
+        return null;
+    }
+
+    @Override
+    public void configureInputJobProperties(TableDesc tableDesc, Map<String, String> jobProperties) {
+    }
+
+    @Override
+    public void configureOutputJobProperties(TableDesc tableDesc, Map<String, String> jobProperties) {
+    }
+
+    @Override
+    public HiveAuthorizationProvider getAuthorizationProvider()
+        throws HiveException {
+        return new DummyAuthProvider();
+    }
+
+    private class DummyAuthProvider implements HiveAuthorizationProvider {
+
+        @Override
+        public Configuration getConf() {
+            return null;
+        }
+
+        /* @param conf
+         * @see org.apache.hadoop.conf.Configurable#setConf(org.apache.hadoop.conf.Configuration)
+         */
+        @Override
+        public void setConf(Configuration conf) {
+        }
+
+        /* @param conf
+        /* @throws HiveException
+         * @see org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider#init(org.apache.hadoop.conf.Configuration)
+         */
+        @Override
+        public void init(Configuration conf) throws HiveException {
+        }
+
+        /* @return HiveAuthenticationProvider
+         * @see org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider#getAuthenticator()
+         */
+        @Override
+        public HiveAuthenticationProvider getAuthenticator() {
+            return null;
+        }
+
+        /* @param authenticator
+         * @see org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider#setAuthenticator(org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider)
+         */
+        @Override
+        public void setAuthenticator(HiveAuthenticationProvider authenticator) {
+        }
+
+        /* @param readRequiredPriv
+        /* @param writeRequiredPriv
+        /* @throws HiveException
+        /* @throws AuthorizationException
+         * @see org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider#authorize(org.apache.hadoop.hive.ql.security.authorization.Privilege[], org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+         */
+        @Override
+        public void authorize(Privilege[] readRequiredPriv,
+                              Privilege[] writeRequiredPriv) throws HiveException,
+            AuthorizationException {
+        }
+
+        /* @param db
+        /* @param readRequiredPriv
+        /* @param writeRequiredPriv
+        /* @throws HiveException
+        /* @throws AuthorizationException
+         * @see org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider#authorize(org.apache.hadoop.hive.metastore.api.Database, org.apache.hadoop.hive.ql.security.authorization.Privilege[], org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+         */
+        @Override
+        public void authorize(Database db, Privilege[] readRequiredPriv,
+                              Privilege[] writeRequiredPriv) throws HiveException,
+            AuthorizationException {
+        }
+
+        /* @param table
+        /* @param readRequiredPriv
+        /* @param writeRequiredPriv
+        /* @throws HiveException
+        /* @throws AuthorizationException
+         * @see org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider#authorize(org.apache.hadoop.hive.ql.metadata.Table, org.apache.hadoop.hive.ql.security.authorization.Privilege[], org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+         */
+        @Override
+        public void authorize(org.apache.hadoop.hive.ql.metadata.Table table, Privilege[] readRequiredPriv,
+                              Privilege[] writeRequiredPriv) throws HiveException,
+            AuthorizationException {
+        }
+
+        /* @param part
+        /* @param readRequiredPriv
+        /* @param writeRequiredPriv
+        /* @throws HiveException
+        /* @throws AuthorizationException
+         * @see org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider#authorize(org.apache.hadoop.hive.ql.metadata.Partition, org.apache.hadoop.hive.ql.security.authorization.Privilege[], org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+         */
+        @Override
+        public void authorize(Partition part, Privilege[] readRequiredPriv,
+                              Privilege[] writeRequiredPriv) throws HiveException,
+            AuthorizationException {
+        }
+
+        /* @param table
+        /* @param part
+        /* @param columns
+        /* @param readRequiredPriv
+        /* @param writeRequiredPriv
+        /* @throws HiveException
+        /* @throws AuthorizationException
+         * @see org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider#authorize(org.apache.hadoop.hive.ql.metadata.Table, org.apache.hadoop.hive.ql.metadata.Partition, java.util.List, org.apache.hadoop.hive.ql.security.authorization.Privilege[], org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+         */
+        @Override
+        public void authorize(org.apache.hadoop.hive.ql.metadata.Table table, Partition part, List<String> columns,
+                              Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+            throws HiveException, AuthorizationException {
+        }
+
+    }
+
+    /**
+     * The Class DummyInputFormat is a dummy implementation of the old hadoop
+     * mapred.InputFormat required by HiveStorageHandler.
+     */
+    class DummyInputFormat implements
+        InputFormat<WritableComparable, HCatRecord> {
+
+        /*
+         * @see
+         * org.apache.hadoop.mapred.InputFormat#getRecordReader(org.apache.hadoop
+         * .mapred.InputSplit, org.apache.hadoop.mapred.JobConf,
+         * org.apache.hadoop.mapred.Reporter)
+         */
+        @Override
+        public RecordReader<WritableComparable, HCatRecord> getRecordReader(
+            InputSplit split, JobConf jobconf, Reporter reporter)
+            throws IOException {
+            throw new IOException("This operation is not supported.");
+        }
+
+        /*
+         * @see
+         * org.apache.hadoop.mapred.InputFormat#getSplits(org.apache.hadoop.
+         * mapred .JobConf, int)
+         */
+        @Override
+        public InputSplit[] getSplits(JobConf jobconf, int number)
+            throws IOException {
+            throw new IOException("This operation is not supported.");
+        }
+    }
+
+    /**
+     * The Class DummyOutputFormat is a dummy implementation of the old hadoop
+     * mapred.OutputFormat and HiveOutputFormat required by HiveStorageHandler.
+     */
+    class DummyOutputFormat implements
+        OutputFormat<WritableComparable<?>, HCatRecord>,
+        HiveOutputFormat<WritableComparable<?>, HCatRecord> {
+
+        /*
+         * @see
+         * org.apache.hadoop.mapred.OutputFormat#checkOutputSpecs(org.apache
+         * .hadoop .fs.FileSystem, org.apache.hadoop.mapred.JobConf)
+         */
+        @Override
+        public void checkOutputSpecs(FileSystem fs, JobConf jobconf)
+            throws IOException {
+            throw new IOException("This operation is not supported.");
+
+        }
+
+        /*
+         * @see
+         * org.apache.hadoop.mapred.OutputFormat#getRecordWriter(org.apache.
+         * hadoop .fs.FileSystem, org.apache.hadoop.mapred.JobConf,
+         * java.lang.String, org.apache.hadoop.util.Progressable)
+         */
+        @Override
+        public RecordWriter<WritableComparable<?>, HCatRecord> getRecordWriter(
+            FileSystem fs, JobConf jobconf, String str,
+            Progressable progress) throws IOException {
+            throw new IOException("This operation is not supported.");
+        }
+
+        /*
+         * @see
+         * org.apache.hadoop.hive.ql.io.HiveOutputFormat#getHiveRecordWriter(org
+         * .apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path,
+         * java.lang.Class, boolean, java.util.Properties,
+         * org.apache.hadoop.util.Progressable)
+         */
+        @Override
+        public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getHiveRecordWriter(
+            JobConf jc, Path finalOutPath,
+            Class<? extends Writable> valueClass, boolean isCompressed,
+            Properties tableProperties, Progressable progress)
+            throws IOException {
+            throw new IOException("This operation is not supported.");
+        }
+
+    }
+
+}
+
+

Added: hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/cli/TestEximSemanticAnalysis.java.broken
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/cli/TestEximSemanticAnalysis.java.broken?rev=1520466&view=auto
==============================================================================
--- hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/cli/TestEximSemanticAnalysis.java.broken (added)
+++ hive/trunk/hcatalog/core/src/test/java/org/apache/hcatalog/cli/TestEximSemanticAnalysis.java.broken Fri Sep  6 00:49:14 2013
@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hcatalog.cli;
+
+import java.io.IOException;
+import java.net.URI;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.cli.CliSessionState;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hcatalog.MiniCluster;
+import org.apache.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
+import org.apache.hcatalog.common.HCatConstants;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class TestEximSemanticAnalysis extends TestCase {
+
+  private final MiniCluster cluster = MiniCluster.buildCluster();
+  private HiveConf hcatConf;
+  private HCatDriver hcatDriver;
+  private Warehouse wh;
+  private static final Logger LOG = LoggerFactory.getLogger(TestEximSemanticAnalysis.class);
+
+  @Override
+  protected void setUp() throws Exception {
+
+    hcatConf = new HiveConf(this.getClass());
+    hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+    hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+    hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+    hcatConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
+    hcatConf.set("fs.pfile.impl", "org.apache.hadoop.fs.ProxyLocalFileSystem");
+    URI fsuri = cluster.getFileSystem().getUri();
+    Path whPath = new Path(fsuri.getScheme(), fsuri.getAuthority(), "/user/hive/warehouse");
+    hcatConf.set(HiveConf.ConfVars.HADOOPFS.varname, fsuri.toString());
+    hcatConf.set(ConfVars.METASTOREWAREHOUSE.varname, whPath.toString());
+    wh = new Warehouse(hcatConf);
+    SessionState.start(new CliSessionState(hcatConf));
+
+    hcatDriver = new HCatDriver();
+  }
+
+  @Override
+  protected void tearDown() throws Exception {
+  }
+
+  public void testExportPerms() throws IOException, MetaException, HiveException {
+
+    hcatDriver.run("drop table junit_sem_analysis");
+    CommandProcessorResponse response = hcatDriver
+        .run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
+    assertEquals(0, response.getResponseCode());
+    Path whPath = wh.getTablePath(Hive.get(hcatConf).getDatabase("default"), "junit_sem_analysis");
+    cluster.getFileSystem().setPermission(whPath, FsPermission.valueOf("-rwxrwx-wx"));
+    cluster.getFileSystem().setOwner(whPath, "nosuchuser", "nosuchgroup");
+
+    Runtime.getRuntime().exec("rm -rf /tmp/hcat");
+    response = hcatDriver
+        .run("export table junit_sem_analysis to 'pfile://local:9080/tmp/hcat/exports/junit_sem_analysis'");
+
+    assertEquals(10, response.getResponseCode());
+    assertTrue("Permission denied expected : "+response.getErrorMessage(),
+        response.getErrorMessage().startsWith(
+            "FAILED: Error in semantic analysis: org.apache.hcatalog.common.HCatException : 3000 : Permission denied"));
+    Runtime.getRuntime().exec("rm -rf /tmp/hcat");
+    response = hcatDriver.run("drop table junit_sem_analysis");
+    if (response.getResponseCode() != 0) {
+      LOG.error(response.getErrorMessage());
+      fail("Drop table failed");
+    }
+  }
+
+  public void testImportPerms() throws IOException, MetaException, HiveException {
+
+    hcatDriver.run("drop table junit_sem_analysis");
+    CommandProcessorResponse response = hcatDriver
+        .run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
+    assertEquals(0, response.getResponseCode());
+    Runtime.getRuntime().exec("rm -rf /tmp/hcat");
+    response = hcatDriver
+        .run("export table junit_sem_analysis to 'pfile://local:9080/tmp/hcat/exports/junit_sem_analysis'");
+    assertEquals(0, response.getResponseCode());
+    response = hcatDriver.run("drop table junit_sem_analysis");
+    assertEquals(0, response.getResponseCode());
+    response = hcatDriver
+        .run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
+    assertEquals(0, response.getResponseCode());
+    Path whPath = wh.getTablePath(Hive.get(hcatConf).getDatabase("default"), "junit_sem_analysis");
+    cluster.getFileSystem().setPermission(whPath, FsPermission.valueOf("-rwxrwxr-x"));
+    cluster.getFileSystem().setOwner(whPath, "nosuchuser", "nosuchgroup");
+
+    response = hcatDriver
+        .run("import table junit_sem_analysis from 'pfile://local:9080/tmp/hcat/exports/junit_sem_analysis'");
+
+    assertEquals(10, response.getResponseCode());
+    assertTrue(
+        "Permission denied expected: "+response.getErrorMessage() ,
+        response.getErrorMessage().startsWith(
+            "FAILED: Error in semantic analysis: org.apache.hcatalog.common.HCatException : 3000 : Permission denied"));
+    Runtime.getRuntime().exec("rm -rf /tmp/hcat");
+
+    cluster.getFileSystem().setPermission(whPath, FsPermission.valueOf("-rwxrwxrwx"));
+    response = hcatDriver.run("drop table junit_sem_analysis");
+    if (response.getResponseCode() != 0) {
+        LOG.error(response.getErrorMessage());
+      fail("Drop table failed");
+    }
+  }
+
+  public void testImportSetPermsGroup() throws IOException, MetaException, HiveException {
+
+    hcatDriver.run("drop table junit_sem_analysis");
+    hcatDriver.run("drop table junit_sem_analysis_imported");
+    CommandProcessorResponse response = hcatDriver
+        .run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
+    assertEquals(0, response.getResponseCode());
+    Runtime.getRuntime().exec("rm -rf /tmp/hcat");
+    response = hcatDriver
+        .run("export table junit_sem_analysis to 'pfile://local:9080/tmp/hcat/exports/junit_sem_analysis'");
+    assertEquals(0, response.getResponseCode());
+    response = hcatDriver.run("drop table junit_sem_analysis");
+    assertEquals(0, response.getResponseCode());
+
+    hcatConf.set(HCatConstants.HCAT_PERMS, "-rwxrw-r--");
+    hcatConf.set(HCatConstants.HCAT_GROUP, "nosuchgroup");
+
+    response = hcatDriver
+        .run("import table junit_sem_analysis_imported from 'pfile://local:9080/tmp/hcat/exports/junit_sem_analysis'");
+    assertEquals(0, response.getResponseCode());
+
+    Path whPath = wh.getTablePath(Hive.get(hcatConf).getDatabase("default"), "junit_sem_analysis_imported");
+    assertEquals(FsPermission.valueOf("-rwxrw-r--"), cluster.getFileSystem().getFileStatus(whPath).getPermission());
+    assertEquals("nosuchgroup", cluster.getFileSystem().getFileStatus(whPath).getGroup());
+
+    Runtime.getRuntime().exec("rm -rf /tmp/hcat");
+
+    response = hcatDriver.run("drop table junit_sem_analysis_imported");
+    if (response.getResponseCode() != 0) {
+      LOG.error(response.getErrorMessage());
+      fail("Drop table failed");
+    }
+  }
+
+
+}
+



Mime
View raw message