incubator-hcatalog-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From tra...@apache.org
Subject svn commit: r1383152 [13/27] - in /incubator/hcatalog/trunk: ./ hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/ hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/drivers/ hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/ ...
Date Mon, 10 Sep 2012 23:29:03 GMT
Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceInputFormat.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceInputFormat.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceInputFormat.java (original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceInputFormat.java Mon Sep 10 23:28:55 2012
@@ -29,22 +29,21 @@ import org.apache.hadoop.mapreduce.Recor
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 
-public class RCFileMapReduceInputFormat<K extends LongWritable,V extends BytesRefArrayWritable>
-extends FileInputFormat<LongWritable, BytesRefArrayWritable>
-{
+public class RCFileMapReduceInputFormat<K extends LongWritable, V extends BytesRefArrayWritable>
+    extends FileInputFormat<LongWritable, BytesRefArrayWritable> {
 
-  @Override
-  public RecordReader<LongWritable,BytesRefArrayWritable> createRecordReader(InputSplit split,
-      TaskAttemptContext context) throws IOException, InterruptedException {
+    @Override
+    public RecordReader<LongWritable, BytesRefArrayWritable> createRecordReader(InputSplit split,
+                                                                                TaskAttemptContext context) throws IOException, InterruptedException {
 
-    context.setStatus(split.toString());
-    return new RCFileMapReduceRecordReader<LongWritable,BytesRefArrayWritable>();
-  }
+        context.setStatus(split.toString());
+        return new RCFileMapReduceRecordReader<LongWritable, BytesRefArrayWritable>();
+    }
 
-  @Override
-  public List<InputSplit> getSplits(JobContext job) throws IOException {
+    @Override
+    public List<InputSplit> getSplits(JobContext job) throws IOException {
 
-    job.getConfiguration().setLong("mapred.min.split.size", SequenceFile.SYNC_INTERVAL);
-    return super.getSplits(job);
-  }
+        job.getConfiguration().setLong("mapred.min.split.size", SequenceFile.SYNC_INTERVAL);
+        return super.getSplits(job);
+    }
 }

Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceOutputFormat.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceOutputFormat.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceOutputFormat.java (original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceOutputFormat.java Mon Sep 10 23:28:55 2012
@@ -39,66 +39,66 @@ import org.apache.hadoop.util.Reflection
 public class RCFileMapReduceOutputFormat extends
     FileOutputFormat<WritableComparable<?>, BytesRefArrayWritable> {
 
-  /**
-   * Set number of columns into the given configuration.
-   * @param conf
-   *          configuration instance which need to set the column number
-   * @param columnNum
-   *          column number for RCFile's Writer
-   *
-   */
-  public static void setColumnNumber(Configuration conf, int columnNum) {
-    assert columnNum > 0;
-    conf.setInt(RCFile.COLUMN_NUMBER_CONF_STR, columnNum);
-  }
-
-  /* (non-Javadoc)
-   * @see org.apache.hadoop.mapreduce.lib.output.FileOutputFormat#getRecordWriter(org.apache.hadoop.mapreduce.TaskAttemptContext)
-   */
-  @Override
-  public org.apache.hadoop.mapreduce.RecordWriter<WritableComparable<?>, BytesRefArrayWritable> getRecordWriter(
-      TaskAttemptContext task) throws IOException, InterruptedException {
-
-    //FileOutputFormat.getWorkOutputPath takes TaskInputOutputContext instead of
-    //TaskAttemptContext, so can't use that here
-    FileOutputCommitter committer = (FileOutputCommitter) getOutputCommitter(task);
-    Path outputPath = committer.getWorkPath();
-
-    FileSystem fs = outputPath.getFileSystem(task.getConfiguration());
-
-    if (!fs.exists(outputPath)) {
-      fs.mkdirs(outputPath);
+    /**
+     * Set number of columns into the given configuration.
+     * @param conf
+     *          configuration instance which need to set the column number
+     * @param columnNum
+     *          column number for RCFile's Writer
+     *
+     */
+    public static void setColumnNumber(Configuration conf, int columnNum) {
+        assert columnNum > 0;
+        conf.setInt(RCFile.COLUMN_NUMBER_CONF_STR, columnNum);
     }
 
-    Path file = getDefaultWorkFile(task, "");
-
-    CompressionCodec codec = null;
-    if (getCompressOutput(task)) {
-      Class<?> codecClass = getOutputCompressorClass(task, DefaultCodec.class);
-      codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, task.getConfiguration());
+    /* (non-Javadoc)
+    * @see org.apache.hadoop.mapreduce.lib.output.FileOutputFormat#getRecordWriter(org.apache.hadoop.mapreduce.TaskAttemptContext)
+    */
+    @Override
+    public org.apache.hadoop.mapreduce.RecordWriter<WritableComparable<?>, BytesRefArrayWritable> getRecordWriter(
+        TaskAttemptContext task) throws IOException, InterruptedException {
+
+        //FileOutputFormat.getWorkOutputPath takes TaskInputOutputContext instead of
+        //TaskAttemptContext, so can't use that here
+        FileOutputCommitter committer = (FileOutputCommitter) getOutputCommitter(task);
+        Path outputPath = committer.getWorkPath();
+
+        FileSystem fs = outputPath.getFileSystem(task.getConfiguration());
+
+        if (!fs.exists(outputPath)) {
+            fs.mkdirs(outputPath);
+        }
+
+        Path file = getDefaultWorkFile(task, "");
+
+        CompressionCodec codec = null;
+        if (getCompressOutput(task)) {
+            Class<?> codecClass = getOutputCompressorClass(task, DefaultCodec.class);
+            codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, task.getConfiguration());
+        }
+
+        final RCFile.Writer out = new RCFile.Writer(fs, task.getConfiguration(), file, task, codec);
+
+        return new RecordWriter<WritableComparable<?>, BytesRefArrayWritable>() {
+
+            /* (non-Javadoc)
+            * @see org.apache.hadoop.mapreduce.RecordWriter#write(java.lang.Object, java.lang.Object)
+            */
+            @Override
+            public void write(WritableComparable<?> key, BytesRefArrayWritable value)
+                throws IOException {
+                out.append(value);
+            }
+
+            /* (non-Javadoc)
+            * @see org.apache.hadoop.mapreduce.RecordWriter#close(org.apache.hadoop.mapreduce.TaskAttemptContext)
+            */
+            @Override
+            public void close(TaskAttemptContext task) throws IOException, InterruptedException {
+                out.close();
+            }
+        };
     }
 
-    final RCFile.Writer out = new RCFile.Writer(fs, task.getConfiguration(), file, task, codec);
-
-    return new RecordWriter<WritableComparable<?>, BytesRefArrayWritable>() {
-
-      /* (non-Javadoc)
-       * @see org.apache.hadoop.mapreduce.RecordWriter#write(java.lang.Object, java.lang.Object)
-       */
-      @Override
-      public void write(WritableComparable<?> key, BytesRefArrayWritable value)
-          throws IOException {
-        out.append(value);
-      }
-
-      /* (non-Javadoc)
-       * @see org.apache.hadoop.mapreduce.RecordWriter#close(org.apache.hadoop.mapreduce.TaskAttemptContext)
-       */
-      @Override
-      public void close(TaskAttemptContext task) throws IOException, InterruptedException {
-        out.close();
-      }
-    };
-  }
-
 }

Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceRecordReader.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceRecordReader.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceRecordReader.java (original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceRecordReader.java Mon Sep 10 23:28:55 2012
@@ -31,90 +31,90 @@ import org.apache.hadoop.mapreduce.TaskA
 import org.apache.hadoop.mapreduce.lib.input.FileSplit;
 
 public class RCFileMapReduceRecordReader<K extends LongWritable, V extends BytesRefArrayWritable>
-  extends RecordReader<LongWritable,BytesRefArrayWritable>{
+    extends RecordReader<LongWritable, BytesRefArrayWritable> {
 
-  private Reader in;
-  private long start;
-  private long end;
-  private boolean more = true;
+    private Reader in;
+    private long start;
+    private long end;
+    private boolean more = true;
 
-  // key and value objects are created once in initialize() and then reused
-  // for every getCurrentKey() and getCurrentValue() call. This is important
-  // since RCFile makes an assumption of this fact.
+    // key and value objects are created once in initialize() and then reused
+    // for every getCurrentKey() and getCurrentValue() call. This is important
+    // since RCFile makes an assumption of this fact.
 
-  private LongWritable key;
-  private BytesRefArrayWritable value;
+    private LongWritable key;
+    private BytesRefArrayWritable value;
 
-  @Override
-  public void close() throws IOException {
-    in.close();
-  }
-
-  @Override
-  public LongWritable getCurrentKey() throws IOException, InterruptedException {
-    return key;
-  }
-
-  @Override
-  public BytesRefArrayWritable getCurrentValue() throws IOException, InterruptedException {
-    return value;
-  }
+    @Override
+    public void close() throws IOException {
+        in.close();
+    }
 
-  @Override
-  public float getProgress() throws IOException, InterruptedException {
-    if (end == start) {
-      return 0.0f;
-    } else {
-      return Math.min(1.0f, (in.getPosition() - start) / (float) (end - start));
+    @Override
+    public LongWritable getCurrentKey() throws IOException, InterruptedException {
+        return key;
     }
-  }
 
-  @Override
-  public boolean nextKeyValue() throws IOException, InterruptedException {
+    @Override
+    public BytesRefArrayWritable getCurrentValue() throws IOException, InterruptedException {
+        return value;
+    }
 
-    more = next(key);
-    if (more) {
-      in.getCurrentRow(value);
+    @Override
+    public float getProgress() throws IOException, InterruptedException {
+        if (end == start) {
+            return 0.0f;
+        } else {
+            return Math.min(1.0f, (in.getPosition() - start) / (float) (end - start));
+        }
     }
 
-    return more;
-  }
+    @Override
+    public boolean nextKeyValue() throws IOException, InterruptedException {
 
-  private boolean next(LongWritable key) throws IOException {
-    if (!more) {
-      return false;
-    }
+        more = next(key);
+        if (more) {
+            in.getCurrentRow(value);
+        }
 
-    more = in.next(key);
-    if (!more) {
-      return false;
+        return more;
     }
 
-    if (in.lastSeenSyncPos() >= end) {
-      more = false;
-      return more;
+    private boolean next(LongWritable key) throws IOException {
+        if (!more) {
+            return false;
+        }
+
+        more = in.next(key);
+        if (!more) {
+            return false;
+        }
+
+        if (in.lastSeenSyncPos() >= end) {
+            more = false;
+            return more;
+        }
+        return more;
     }
-    return more;
-  }
 
-  @Override
-  public void initialize(InputSplit split, TaskAttemptContext context) throws IOException,
-      InterruptedException {
+    @Override
+    public void initialize(InputSplit split, TaskAttemptContext context) throws IOException,
+        InterruptedException {
 
-    FileSplit fSplit = (FileSplit)split;
-    Path path = fSplit.getPath();
-    Configuration conf = context.getConfiguration();
-    this.in = new RCFile.Reader(path.getFileSystem(conf), path, conf);
-    this.end = fSplit.getStart() + fSplit.getLength();
+        FileSplit fSplit = (FileSplit) split;
+        Path path = fSplit.getPath();
+        Configuration conf = context.getConfiguration();
+        this.in = new RCFile.Reader(path.getFileSystem(conf), path, conf);
+        this.end = fSplit.getStart() + fSplit.getLength();
 
-    if(fSplit.getStart() > in.getPosition()) {
-      in.sync(fSplit.getStart());
-    }
+        if (fSplit.getStart() > in.getPosition()) {
+            in.sync(fSplit.getStart());
+        }
 
-    this.start = in.getPosition();
-    more = start < end;
+        this.start = in.getPosition();
+        more = start < end;
 
-    key = new LongWritable();
-    value = new BytesRefArrayWritable();
-  }
+        key = new LongWritable();
+        value = new BytesRefArrayWritable();
+    }
 }

Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java (original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/HdfsAuthorizationProvider.java Mon Sep 10 23:28:55 2012
@@ -48,272 +48,281 @@ import org.apache.hadoop.hive.shims.Shim
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 
-/** 
+/**
  * An AuthorizationProvider, which checks against the data access level permissions on HDFS.
  * It makes sense to eventually move this class to Hive, so that all hive users can
  * use this authorization model. 
  */
 public class HdfsAuthorizationProvider extends HiveAuthorizationProviderBase {
 
-  protected Warehouse wh;
-  
-  //Config variables : create an enum to store them if we have more
-  private static final String PROXY_USER_NAME = "proxy.user.name";
-
-  public HdfsAuthorizationProvider() {
-    super();
-  }
-  
-  public HdfsAuthorizationProvider(Configuration conf) {
-    super();
-    setConf(conf);
-  }
-  
-  @Override
-  public void setConf(Configuration conf) {
-    super.setConf(conf);
-    try {
-      this.wh = new Warehouse(conf);
-    } catch (MetaException ex) {
-      throw new RuntimeException(ex);
-    }
-  }
-  
-  protected FsAction getFsAction(Privilege priv, Path path) {
-    
-    switch (priv.getPriv()) {
-    case ALL            : throw new AuthorizationException("no matching Action for Privilege.All");
-    case ALTER_DATA     : return FsAction.WRITE;   
-    case ALTER_METADATA : return FsAction.WRITE;  
-    case CREATE         : return FsAction.WRITE;
-    case DROP           : return FsAction.WRITE;
-    case INDEX          : return FsAction.WRITE;
-    case LOCK           : return FsAction.WRITE;
-    case SELECT         : return FsAction.READ;
-    case SHOW_DATABASE  : return FsAction.READ;
-    case UNKNOWN        : 
-    default             : throw new AuthorizationException("Unknown privilege");
-    }
-  }
-  
-  protected EnumSet<FsAction> getFsActions(Privilege[] privs, Path path) {
-    EnumSet<FsAction> actions = EnumSet.noneOf(FsAction.class);
-    
-    if (privs == null) {
-      return actions;
-    }
-    
-    for (Privilege priv : privs) {
-      actions.add(getFsAction(priv, path));
-    }
-    
-    return actions;
-  }
-  
-  private static final String DATABASE_WAREHOUSE_SUFFIX = ".db";
-
-  private Path getDefaultDatabasePath(String dbName) throws MetaException {
-    if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
-      return wh.getWhRoot();
-    }
-    return new Path(wh.getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX);
-  }
-  
-  protected Path getDbLocation(Database db) throws HiveException {
-    try {
-      String location = db.getLocationUri();
-      if (location == null) {
-        return getDefaultDatabasePath(db.getName());
-      } else {
-        return wh.getDnsPath(wh.getDatabasePath(db));
-      }
-    } catch (MetaException ex) {
-      throw new HiveException(ex.getMessage());
-    }
-  }
-  
-  @Override
-  public void authorize(Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-      throws HiveException, AuthorizationException {
-    //Authorize for global level permissions at the warehouse dir
-    Path root;
-    try {
-      root = wh.getWhRoot();
-      authorize(root, readRequiredPriv, writeRequiredPriv);
-    } catch (MetaException ex) {
-      throw new HiveException(ex);
-    }
-  }
-
-  @Override
-  public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-      throws HiveException, AuthorizationException {
-    if (db == null) {
-      return;
-    }
-
-    Path path = getDbLocation(db);
-    
-    authorize(path, readRequiredPriv, writeRequiredPriv);
-  }
-
-  @Override
-  public void authorize(Table table, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-      throws HiveException, AuthorizationException {
-    if (table == null) {
-      return;
-    }
-    
-    //unlike Hive's model, this can be called at CREATE TABLE as well, since we should authorize 
-    //against the table's declared location
-    Path path = null;
-    try {
-      if (table.getTTable().getSd().getLocation() == null
-          || table.getTTable().getSd().getLocation().isEmpty()) {
-            path = wh.getTablePath(hive_db.getDatabase(table.getDbName()), table.getTableName());
-      } else {
-         path = table.getPath();
-      }
-    } catch (MetaException ex) {
-      throw new HiveException(ex);
-    }
-    
-    authorize(path, readRequiredPriv, writeRequiredPriv);
-  }
-
-  //TODO: HiveAuthorizationProvider should expose this interface instead of #authorize(Partition, Privilege[], Privilege[])
-  public void authorize(Table table, Partition part, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-      throws HiveException, AuthorizationException {
-    
-    if (part == null || part.getLocation() == null) {
-      authorize(table, readRequiredPriv, writeRequiredPriv);
-    } else {
-      authorize(part.getPartitionPath(), readRequiredPriv, writeRequiredPriv);
-    }
-  }
-
-  @Override
-  public void authorize(Partition part, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-      throws HiveException, AuthorizationException {
-    if (part == null) {
-      return;
-    }
-    authorize(part.getTable(), part, readRequiredPriv, writeRequiredPriv);
-  }
-
-  @Override
-  public void authorize(Table table, Partition part, List<String> columns,
-      Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException,
-      AuthorizationException {
-    //columns cannot live in different files, just check for partition level permissions
-    authorize(table, part, readRequiredPriv, writeRequiredPriv);
-  }
-  
-  /** 
-   * Authorization privileges against a path.
-   * @param path a filesystem path
-   * @param readRequiredPriv a list of privileges needed for inputs.
-   * @param writeRequiredPriv a list of privileges needed for outputs.
-   */
-  public void authorize(Path path, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) 
-      throws HiveException, AuthorizationException {
-    try {
-      EnumSet<FsAction> actions = getFsActions(readRequiredPriv, path);
-      actions.addAll(getFsActions(writeRequiredPriv, path));
-      if (actions.isEmpty()) {
-        return;
-      }
-      
-      checkPermissions(getConf(), path, actions);
-      
-    } catch (AccessControlException ex) {
-      throw new AuthorizationException(ex);
-    } catch (LoginException ex) {
-      throw new AuthorizationException(ex);
-    } catch (IOException ex) {
-      throw new HiveException(ex);
-    }
-  }
-  
-  /**
-   * Checks the permissions for the given path and current user on Hadoop FS. If the given path 
-   * does not exists, it checks for it's parent folder.
-   */
-  protected static void checkPermissions(final Configuration conf, final Path path, 
-      final EnumSet<FsAction> actions) throws IOException, LoginException {
-
-    if (path == null) {
-      throw new IllegalArgumentException("path is null");
-    }
-
-    HadoopShims shims = ShimLoader.getHadoopShims();
-    final UserGroupInformation ugi;
-    if(conf.get(PROXY_USER_NAME) != null){
-        ugi = UserGroupInformation.createRemoteUser(conf.get(PROXY_USER_NAME));
-    }
-    else {
-        ugi = shims.getUGIForConf(conf);
-    }
-    final String user = shims.getShortUserName(ugi);  
-        
-    final FileSystem fs = path.getFileSystem(conf);
-
-    if (fs.exists(path)) {
-      checkPermissions(fs, path, actions, user, ugi.getGroupNames());
-    } else if (path.getParent() != null) {
-      // find the ancestor which exists to check it's permissions
-      Path par = path.getParent();
-      while (par != null) {
-        if (fs.exists(par)) {
-          break;
-        }
-        par = par.getParent();
-      }
-
-      checkPermissions(fs, par, actions, user, ugi.getGroupNames());
-    }
-  }
-  
-  /**
-   * Checks the permissions for the given path and current user on Hadoop FS. If the given path 
-   * does not exists, it returns.
-   */
-  @SuppressWarnings("deprecation")
-  protected static void checkPermissions(final FileSystem fs, final Path path,
-      final EnumSet<FsAction> actions, String user, String[] groups) throws IOException,
-      AccessControlException {
-    
-    final FileStatus stat;
-
-    try {
-      stat = fs.getFileStatus(path);
-    } catch (FileNotFoundException fnfe) {
-      // File named by path doesn't exist; nothing to validate.
-      return;
-    } catch (org.apache.hadoop.fs.permission.AccessControlException ace) {
-      // Older hadoop version will throw this @deprecated Exception.
-      throw new AccessControlException(ace.getMessage());
-    }
-
-    final FsPermission dirPerms = stat.getPermission();
-    final String grp = stat.getGroup();
-
-    for (FsAction action : actions) {
-      if (user.equals(stat.getOwner())) {
-        if (dirPerms.getUserAction().implies(action)) {
-          continue;
-        }
-      }
-      if (ArrayUtils.contains(groups, grp)) {
-        if (dirPerms.getGroupAction().implies(action)) {
-          continue;
-        }
-      }
-      if (dirPerms.getOtherAction().implies(action)) {
-        continue;
-      }
-      throw new AccessControlException("action " + action + " not permitted on path " 
-          + path + " for user " + user);
+    protected Warehouse wh;
+
+    //Config variables : create an enum to store them if we have more
+    private static final String PROXY_USER_NAME = "proxy.user.name";
+
+    public HdfsAuthorizationProvider() {
+        super();
+    }
+
+    public HdfsAuthorizationProvider(Configuration conf) {
+        super();
+        setConf(conf);
+    }
+
+    @Override
+    public void setConf(Configuration conf) {
+        super.setConf(conf);
+        try {
+            this.wh = new Warehouse(conf);
+        } catch (MetaException ex) {
+            throw new RuntimeException(ex);
+        }
+    }
+
+    protected FsAction getFsAction(Privilege priv, Path path) {
+
+        switch (priv.getPriv()) {
+        case ALL:
+            throw new AuthorizationException("no matching Action for Privilege.All");
+        case ALTER_DATA:
+            return FsAction.WRITE;
+        case ALTER_METADATA:
+            return FsAction.WRITE;
+        case CREATE:
+            return FsAction.WRITE;
+        case DROP:
+            return FsAction.WRITE;
+        case INDEX:
+            return FsAction.WRITE;
+        case LOCK:
+            return FsAction.WRITE;
+        case SELECT:
+            return FsAction.READ;
+        case SHOW_DATABASE:
+            return FsAction.READ;
+        case UNKNOWN:
+        default:
+            throw new AuthorizationException("Unknown privilege");
+        }
+    }
+
+    protected EnumSet<FsAction> getFsActions(Privilege[] privs, Path path) {
+        EnumSet<FsAction> actions = EnumSet.noneOf(FsAction.class);
+
+        if (privs == null) {
+            return actions;
+        }
+
+        for (Privilege priv : privs) {
+            actions.add(getFsAction(priv, path));
+        }
+
+        return actions;
+    }
+
+    private static final String DATABASE_WAREHOUSE_SUFFIX = ".db";
+
+    private Path getDefaultDatabasePath(String dbName) throws MetaException {
+        if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
+            return wh.getWhRoot();
+        }
+        return new Path(wh.getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX);
+    }
+
+    protected Path getDbLocation(Database db) throws HiveException {
+        try {
+            String location = db.getLocationUri();
+            if (location == null) {
+                return getDefaultDatabasePath(db.getName());
+            } else {
+                return wh.getDnsPath(wh.getDatabasePath(db));
+            }
+        } catch (MetaException ex) {
+            throw new HiveException(ex.getMessage());
+        }
+    }
+
+    @Override
+    public void authorize(Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        //Authorize for global level permissions at the warehouse dir
+        Path root;
+        try {
+            root = wh.getWhRoot();
+            authorize(root, readRequiredPriv, writeRequiredPriv);
+        } catch (MetaException ex) {
+            throw new HiveException(ex);
+        }
+    }
+
+    @Override
+    public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        if (db == null) {
+            return;
+        }
+
+        Path path = getDbLocation(db);
+
+        authorize(path, readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Table table, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        if (table == null) {
+            return;
+        }
+
+        //unlike Hive's model, this can be called at CREATE TABLE as well, since we should authorize
+        //against the table's declared location
+        Path path = null;
+        try {
+            if (table.getTTable().getSd().getLocation() == null
+                || table.getTTable().getSd().getLocation().isEmpty()) {
+                path = wh.getTablePath(hive_db.getDatabase(table.getDbName()), table.getTableName());
+            } else {
+                path = table.getPath();
+            }
+        } catch (MetaException ex) {
+            throw new HiveException(ex);
+        }
+
+        authorize(path, readRequiredPriv, writeRequiredPriv);
+    }
+
+    //TODO: HiveAuthorizationProvider should expose this interface instead of #authorize(Partition, Privilege[], Privilege[])
+    public void authorize(Table table, Partition part, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+
+        if (part == null || part.getLocation() == null) {
+            authorize(table, readRequiredPriv, writeRequiredPriv);
+        } else {
+            authorize(part.getPartitionPath(), readRequiredPriv, writeRequiredPriv);
+        }
+    }
+
+    @Override
+    public void authorize(Partition part, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        if (part == null) {
+            return;
+        }
+        authorize(part.getTable(), part, readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Table table, Partition part, List<String> columns,
+                          Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
+        //columns cannot live in different files, just check for partition level permissions
+        authorize(table, part, readRequiredPriv, writeRequiredPriv);
+    }
+
+    /**
+     * Authorization privileges against a path.
+     * @param path a filesystem path
+     * @param readRequiredPriv a list of privileges needed for inputs.
+     * @param writeRequiredPriv a list of privileges needed for outputs.
+     */
+    public void authorize(Path path, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        try {
+            EnumSet<FsAction> actions = getFsActions(readRequiredPriv, path);
+            actions.addAll(getFsActions(writeRequiredPriv, path));
+            if (actions.isEmpty()) {
+                return;
+            }
+
+            checkPermissions(getConf(), path, actions);
+
+        } catch (AccessControlException ex) {
+            throw new AuthorizationException(ex);
+        } catch (LoginException ex) {
+            throw new AuthorizationException(ex);
+        } catch (IOException ex) {
+            throw new HiveException(ex);
+        }
+    }
+
+    /**
+     * Checks the permissions for the given path and current user on Hadoop FS. If the given path
+     * does not exists, it checks for it's parent folder.
+     */
+    protected static void checkPermissions(final Configuration conf, final Path path,
+                                           final EnumSet<FsAction> actions) throws IOException, LoginException {
+
+        if (path == null) {
+            throw new IllegalArgumentException("path is null");
+        }
+
+        HadoopShims shims = ShimLoader.getHadoopShims();
+        final UserGroupInformation ugi;
+        if (conf.get(PROXY_USER_NAME) != null) {
+            ugi = UserGroupInformation.createRemoteUser(conf.get(PROXY_USER_NAME));
+        } else {
+            ugi = shims.getUGIForConf(conf);
+        }
+        final String user = shims.getShortUserName(ugi);
+
+        final FileSystem fs = path.getFileSystem(conf);
+
+        if (fs.exists(path)) {
+            checkPermissions(fs, path, actions, user, ugi.getGroupNames());
+        } else if (path.getParent() != null) {
+            // find the ancestor which exists to check it's permissions
+            Path par = path.getParent();
+            while (par != null) {
+                if (fs.exists(par)) {
+                    break;
+                }
+                par = par.getParent();
+            }
+
+            checkPermissions(fs, par, actions, user, ugi.getGroupNames());
+        }
+    }
+
+    /**
+     * Checks the permissions for the given path and current user on Hadoop FS. If the given path
+     * does not exists, it returns.
+     */
+    @SuppressWarnings("deprecation")
+    protected static void checkPermissions(final FileSystem fs, final Path path,
+                                           final EnumSet<FsAction> actions, String user, String[] groups) throws IOException,
+        AccessControlException {
+
+        final FileStatus stat;
+
+        try {
+            stat = fs.getFileStatus(path);
+        } catch (FileNotFoundException fnfe) {
+            // File named by path doesn't exist; nothing to validate.
+            return;
+        } catch (org.apache.hadoop.fs.permission.AccessControlException ace) {
+            // Older hadoop version will throw this @deprecated Exception.
+            throw new AccessControlException(ace.getMessage());
+        }
+
+        final FsPermission dirPerms = stat.getPermission();
+        final String grp = stat.getGroup();
+
+        for (FsAction action : actions) {
+            if (user.equals(stat.getOwner())) {
+                if (dirPerms.getUserAction().implies(action)) {
+                    continue;
+                }
+            }
+            if (ArrayUtils.contains(groups, grp)) {
+                if (dirPerms.getGroupAction().implies(action)) {
+                    continue;
+                }
+            }
+            if (dirPerms.getOtherAction().implies(action)) {
+                continue;
+            }
+            throw new AccessControlException("action " + action + " not permitted on path "
+                + path + " for user " + user);
+        }
     }
-  }
 }

Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/StorageDelegationAuthorizationProvider.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/StorageDelegationAuthorizationProvider.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/StorageDelegationAuthorizationProvider.java (original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/security/StorageDelegationAuthorizationProvider.java Mon Sep 10 23:28:55 2012
@@ -42,93 +42,93 @@ import org.apache.hcatalog.mapreduce.HCa
  */
 public class StorageDelegationAuthorizationProvider extends HiveAuthorizationProviderBase {
 
-  protected HiveAuthorizationProvider hdfsAuthorizer = new HdfsAuthorizationProvider();
-  
-  protected static Map<String, String> authProviders = new HashMap<String,String>();
-  
-  @Override
-  public void setConf(Configuration conf) {
-    super.setConf(conf);
-    hdfsAuthorizer.setConf(conf);
-  }
-  
-  @Override
-  public void setAuthenticator(HiveAuthenticationProvider authenticator) {
-    super.setAuthenticator(authenticator);
-    hdfsAuthorizer.setAuthenticator(authenticator);
-  }
-  
-  static {
-    registerAuthProvider("org.apache.hadoop.hive.hbase.HBaseStorageHandler",
-        "org.apache.hcatalog.hbase.HBaseAuthorizationProvider");
-    registerAuthProvider("org.apache.hcatalog.hbase.HBaseHCatStorageHandler", 
-        "org.apache.hcatalog.hbase.HBaseAuthorizationProvider");
-  }
-  
-  //workaround until Hive adds StorageHandler.getAuthorizationProvider(). Remove these parts afterwards
-  public static void registerAuthProvider(String storageHandlerClass, 
-      String authProviderClass) {
-    authProviders.put(storageHandlerClass, authProviderClass);
-  }
-  
-  /** Returns the StorageHandler of the Table obtained from the HCatStorageHandler */
-  protected HiveAuthorizationProvider getDelegate(Table table) throws HiveException {
-    HiveStorageHandler handler =  table.getStorageHandler();
-    
-    if (handler != null) {
-      if (handler instanceof HCatStorageHandler) {
-       return ((HCatStorageHandler) handler).getAuthorizationProvider();
-      } else {
-        String authProviderClass = authProviders.get(handler.getClass().getCanonicalName());
-        
-        if (authProviderClass != null) {
-          try {
-            ReflectionUtils.newInstance(getConf().getClassByName(authProviderClass), getConf());
-          } catch (ClassNotFoundException ex) {
-            throw new HiveException("Cannot instantiate delegation AuthotizationProvider");
-          }
+    protected HiveAuthorizationProvider hdfsAuthorizer = new HdfsAuthorizationProvider();
+
+    protected static Map<String, String> authProviders = new HashMap<String, String>();
+
+    @Override
+    public void setConf(Configuration conf) {
+        super.setConf(conf);
+        hdfsAuthorizer.setConf(conf);
+    }
+
+    @Override
+    public void setAuthenticator(HiveAuthenticationProvider authenticator) {
+        super.setAuthenticator(authenticator);
+        hdfsAuthorizer.setAuthenticator(authenticator);
+    }
+
+    static {
+        registerAuthProvider("org.apache.hadoop.hive.hbase.HBaseStorageHandler",
+            "org.apache.hcatalog.hbase.HBaseAuthorizationProvider");
+        registerAuthProvider("org.apache.hcatalog.hbase.HBaseHCatStorageHandler",
+            "org.apache.hcatalog.hbase.HBaseAuthorizationProvider");
+    }
+
+    //workaround until Hive adds StorageHandler.getAuthorizationProvider(). Remove these parts afterwards
+    public static void registerAuthProvider(String storageHandlerClass,
+                                            String authProviderClass) {
+        authProviders.put(storageHandlerClass, authProviderClass);
+    }
+
+    /** Returns the StorageHandler of the Table obtained from the HCatStorageHandler */
+    protected HiveAuthorizationProvider getDelegate(Table table) throws HiveException {
+        HiveStorageHandler handler = table.getStorageHandler();
+
+        if (handler != null) {
+            if (handler instanceof HCatStorageHandler) {
+                return ((HCatStorageHandler) handler).getAuthorizationProvider();
+            } else {
+                String authProviderClass = authProviders.get(handler.getClass().getCanonicalName());
+
+                if (authProviderClass != null) {
+                    try {
+                        ReflectionUtils.newInstance(getConf().getClassByName(authProviderClass), getConf());
+                    } catch (ClassNotFoundException ex) {
+                        throw new HiveException("Cannot instantiate delegation AuthotizationProvider");
+                    }
+                }
+
+                //else we do not have anything to delegate to
+                throw new HiveException(String.format("Storage Handler for table:%s is not an instance " +
+                    "of HCatStorageHandler", table.getTableName()));
+            }
+        } else {
+            //return an authorizer for HDFS
+            return hdfsAuthorizer;
         }
-        
-        //else we do not have anything to delegate to
-        throw new HiveException(String.format("Storage Handler for table:%s is not an instance " +
-            "of HCatStorageHandler", table.getTableName()));
-      }
-    } else {
-      //return an authorizer for HDFS
-      return hdfsAuthorizer;
-    }
-  }
-  
-  @Override
-  public void authorize(Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-      throws HiveException, AuthorizationException {
-    //global authorizations against warehouse hdfs directory
-    hdfsAuthorizer.authorize(readRequiredPriv, writeRequiredPriv);
-  }
-
-  @Override
-  public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-      throws HiveException, AuthorizationException {
-    //db's are tied to a hdfs location
-    hdfsAuthorizer.authorize(db, readRequiredPriv, writeRequiredPriv);
-  }
-
-  @Override
-  public void authorize(Table table, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-      throws HiveException, AuthorizationException {
-    getDelegate(table).authorize(table, readRequiredPriv, writeRequiredPriv);
-  }
-
-  @Override
-  public void authorize(Partition part, Privilege[] readRequiredPriv, 
-      Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException {
-    getDelegate(part.getTable()).authorize(part, readRequiredPriv, writeRequiredPriv);
-  }
-
-  @Override
-  public void authorize(Table table, Partition part, List<String> columns,
-      Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException,
-      AuthorizationException {
-    getDelegate(table).authorize(table, part, columns, readRequiredPriv, writeRequiredPriv);
-  }
+    }
+
+    @Override
+    public void authorize(Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        //global authorizations against warehouse hdfs directory
+        hdfsAuthorizer.authorize(readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        //db's are tied to a hdfs location
+        hdfsAuthorizer.authorize(db, readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Table table, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
+        getDelegate(table).authorize(table, readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Partition part, Privilege[] readRequiredPriv,
+                          Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException {
+        getDelegate(part.getTable()).authorize(part, readRequiredPriv, writeRequiredPriv);
+    }
+
+    @Override
+    public void authorize(Table table, Partition part, List<String> columns,
+                          Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
+        getDelegate(table).authorize(table, part, columns, readRequiredPriv, writeRequiredPriv);
+    }
 }

Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/shims/HCatHadoopShims.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/shims/HCatHadoopShims.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/shims/HCatHadoopShims.java (original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/shims/HCatHadoopShims.java Mon Sep 10 23:28:55 2012
@@ -39,53 +39,55 @@ import org.apache.pig.ResourceSchema;
  **/
 public interface HCatHadoopShims {
 
-  enum PropertyName { CACHE_ARCHIVES, CACHE_FILES, CACHE_SYMLINK };
+    enum PropertyName {CACHE_ARCHIVES, CACHE_FILES, CACHE_SYMLINK}
 
-  public static abstract class Instance {
-    static HCatHadoopShims instance = selectShim();
+    ;
 
-    public static HCatHadoopShims get() {
-      return instance;
-    }
+    public static abstract class Instance {
+        static HCatHadoopShims instance = selectShim();
 
-    private static HCatHadoopShims selectShim() {
-      // piggyback on Hive's detection logic
-      String major = ShimLoader.getMajorVersion();
-      String shimFQN = "org.apache.hcatalog.shims.HCatHadoopShims20S";
-      if (major.startsWith("0.23")) {
-        shimFQN = "org.apache.hcatalog.shims.HCatHadoopShims23";
-      }
-      try {
-        Class<? extends HCatHadoopShims> clasz = Class.forName(shimFQN)
-            .asSubclass(HCatHadoopShims.class);
-        return clasz.newInstance();
-      } catch (Exception e) {
-        throw new RuntimeException("Failed to instantiate: " + shimFQN, e);
-      }
+        public static HCatHadoopShims get() {
+            return instance;
+        }
+
+        private static HCatHadoopShims selectShim() {
+            // piggyback on Hive's detection logic
+            String major = ShimLoader.getMajorVersion();
+            String shimFQN = "org.apache.hcatalog.shims.HCatHadoopShims20S";
+            if (major.startsWith("0.23")) {
+                shimFQN = "org.apache.hcatalog.shims.HCatHadoopShims23";
+            }
+            try {
+                Class<? extends HCatHadoopShims> clasz = Class.forName(shimFQN)
+                    .asSubclass(HCatHadoopShims.class);
+                return clasz.newInstance();
+            } catch (Exception e) {
+                throw new RuntimeException("Failed to instantiate: " + shimFQN, e);
+            }
+        }
     }
-  }
 
-  public TaskID createTaskID();
+    public TaskID createTaskID();
 
-  public TaskAttemptID createTaskAttemptID();
+    public TaskAttemptID createTaskAttemptID();
 
-  public org.apache.hadoop.mapreduce.TaskAttemptContext createTaskAttemptContext(Configuration conf,
-          TaskAttemptID taskId);
+    public org.apache.hadoop.mapreduce.TaskAttemptContext createTaskAttemptContext(Configuration conf,
+                                                                                   TaskAttemptID taskId);
 
-  public org.apache.hadoop.mapred.TaskAttemptContext createTaskAttemptContext(JobConf conf,
-          org.apache.hadoop.mapred.TaskAttemptID taskId, Progressable progressable);
+    public org.apache.hadoop.mapred.TaskAttemptContext createTaskAttemptContext(JobConf conf,
+                                                                                org.apache.hadoop.mapred.TaskAttemptID taskId, Progressable progressable);
 
-  public JobContext createJobContext(Configuration conf, JobID jobId);
+    public JobContext createJobContext(Configuration conf, JobID jobId);
 
-  public org.apache.hadoop.mapred.JobContext createJobContext(JobConf conf, JobID jobId, Progressable progressable);
+    public org.apache.hadoop.mapred.JobContext createJobContext(JobConf conf, JobID jobId, Progressable progressable);
 
-  public void commitJob(OutputFormat outputFormat, ResourceSchema schema,
-          String arg1, Job job) throws IOException;
+    public void commitJob(OutputFormat outputFormat, ResourceSchema schema,
+                          String arg1, Job job) throws IOException;
 
-  public void abortJob(OutputFormat outputFormat, Job job) throws IOException;
+    public void abortJob(OutputFormat outputFormat, Job job) throws IOException;
 
-  /* Referring to job tracker in 0.20 and resource manager in 0.23 */
-  public InetSocketAddress getResourceManagerAddress(Configuration conf);
+    /* Referring to job tracker in 0.20 and resource manager in 0.23 */
+    public InetSocketAddress getResourceManagerAddress(Configuration conf);
 
-  public String getPropertyName(PropertyName name);
+    public String getPropertyName(PropertyName name);
 }

Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/storagehandler/DummyHCatAuthProvider.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/storagehandler/DummyHCatAuthProvider.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/storagehandler/DummyHCatAuthProvider.java (original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/storagehandler/DummyHCatAuthProvider.java Mon Sep 10 23:28:55 2012
@@ -35,109 +35,109 @@ import org.apache.hadoop.hive.ql.securit
  * dummy authorization functionality for other classes to extend and override.
  */
 class DummyHCatAuthProvider implements HiveAuthorizationProvider {
-    
+
     @Override
     public Configuration getConf() {
         return null;
     }
-    
+
     @Override
     public void setConf(Configuration conf) {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #init(org.apache.hadoop.conf.Configuration)
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #init(org.apache.hadoop.conf.Configuration)
+    */
     @Override
     public void init(Configuration conf) throws HiveException {
     }
-    
+
     @Override
     public HiveAuthenticationProvider getAuthenticator() {
         return null;
     }
-    
+
     @Override
     public void setAuthenticator(HiveAuthenticationProvider authenticator) {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #authorize(org.apache.hadoop.hive.ql.security.authorization.Privilege[],
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
     @Override
     public void authorize(Privilege[] readRequiredPriv,
-            Privilege[] writeRequiredPriv) throws HiveException,
-            AuthorizationException {
+                          Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #authorize(org.apache.hadoop.hive.metastore.api.Database,
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.metastore.api.Database,
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
     @Override
     public void authorize(Database db, Privilege[] readRequiredPriv,
-            Privilege[] writeRequiredPriv) throws HiveException,
-            AuthorizationException {
+                          Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #authorize(org.apache.hadoop.hive.ql.metadata.Table,
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.ql.metadata.Table,
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
     @Override
     public void authorize(Table table, Privilege[] readRequiredPriv,
-            Privilege[] writeRequiredPriv) throws HiveException,
-            AuthorizationException {
+                          Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #authorize(org.apache.hadoop.hive.ql.metadata.Partition,
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.ql.metadata.Partition,
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
     @Override
     public void authorize(Partition part, Privilege[] readRequiredPriv,
-            Privilege[] writeRequiredPriv) throws HiveException,
-            AuthorizationException {
+                          Privilege[] writeRequiredPriv) throws HiveException,
+        AuthorizationException {
     }
-    
+
     /*
-     * (non-Javadoc)
-     * 
-     * @see
-     * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
-     * #authorize(org.apache.hadoop.hive.ql.metadata.Table,
-     * org.apache.hadoop.hive.ql.metadata.Partition, java.util.List,
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
-     * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
-     */
+    * (non-Javadoc)
+    *
+    * @see
+    * org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider
+    * #authorize(org.apache.hadoop.hive.ql.metadata.Table,
+    * org.apache.hadoop.hive.ql.metadata.Partition, java.util.List,
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[],
+    * org.apache.hadoop.hive.ql.security.authorization.Privilege[])
+    */
     @Override
     public void authorize(Table table, Partition part, List<String> columns,
-            Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
-            throws HiveException, AuthorizationException {
+                          Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv)
+        throws HiveException, AuthorizationException {
     }
-    
+
 }

Modified: incubator/hcatalog/trunk/src/test/e2e/hcatalog/build.xml
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/e2e/hcatalog/build.xml?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/e2e/hcatalog/build.xml (original)
+++ incubator/hcatalog/trunk/src/test/e2e/hcatalog/build.xml Mon Sep 10 23:28:55 2012
@@ -17,314 +17,314 @@
 
 <project name="TestHarnessHCatTests" default="test">
 
-  <property name="hcat.jar"
-    value="${hcat.dir}/share/hcatalog/hcatalog-${hcatalog.version}.jar"/>
+    <property name="hcat.jar"
+              value="${hcat.dir}/share/hcatalog/hcatalog-${hcatalog.version}.jar"/>
 
-  <!-- Separate property name for udfs' build.xml -->
-  <property name="hcat.jarfile" value="${hcat.jar}"/>
-  <property name="udf.dir" value="${basedir}/udfs"/>
-  <property name="udf.java.dir" value="${udf.dir}/java"/>
-  <property name="udf.jar" value="${udf.java.dir}/testudf.jar"/>
-  <property name="params.dir" value="${basedir}/paramfiles"/>
-  <property name="lib.dir" value="${basedir}/lib"/>
-  <property name="rctool.java.dir" value="${basedir}/tools/generate/java"/>
-
-  <property name="tar.name" value="${basedir}/hcattests.tar"/>
-  <property name="tar.dir" value="${basedir}/tar"/>
-  <property name="test.src" value="${basedir}/tests"/>
-  <property name="driver.src" value="${basedir}/drivers"/>
-  <property name="deployer.src" value="${basedir}/deployers"/>
-  <property name="conf.src" value="${basedir}/conf"/>
-  <property name="tool.src" value="${basedir}/tools"/>
-  <property name="data.dir" value="${basedir}/data"/>
-
-  <property name="harness.dir" value="${basedir}/../harness"/>
-  <property name="harness.tar" value="${harness.dir}/harness.tar"/>
-  <property name="test.location" value="${basedir}/testdist"/>
-  <property name="benchmark.location" value="${test.location}/benchmarks"/>
-  <!--<property name="hadoop.core.path" value="${harness.hadoop.home}"/>-->
-  <property name="hadoop.core.path" value="${hadoop.home}"/>
-  <!-- Override on command line to use rpm.conf -->
-  <property name="harness.conf" value="${test.location}/conf/default.conf"/>
-  <!-- Default value for output directory -->
-  <property name="harness.PH_LOCAL" value="out"/>
-
-  <property name="hadoopversion" value="20" />
-
-  <condition property="isHadoop23">
-    <equals arg1="${hadoopversion}" arg2="23"/>
-  </condition>
-
-  <!-- Build the UDFs -->
-  <target name="udfs" >
-    <ant dir="${udf.java.dir}"/>
-  </target>
-
-  <path id="hadoop.core.jar.location">
-    <fileset dir="${hadoop.core.path}">
-      <include name="hadoop-core-*.jar" unless="isHadoop23"/>
-      <include name="**/hadoop-common-*.jar" if="isHadoop23"/>
-      <include name="**/hadoop-auth-*.jar" if="isHadoop23"/>
-      <include name="**/hadoop-hdfs-*.jar" if="isHadoop23"/>
-      <include name="**/hadoop-mapreduce-client-core-*.jar" if="isHadoop23"/>
-      <include name="**/hadoop-yarn-api-*.jar" if="isHadoop23"/>
-      <include name="**/hadoop-yarn-common-*.jar" if="isHadoop23"/>
-      <include name="**/hadoop-annotations-*.jar" if="isHadoop23"/>
-    </fileset>
-  </path>
-
-  <path id="hive.serde.jar.location">
-    <!-- <fileset dir="${hive.dir}/build/serde"> -->
-    <fileset dir="${hive.home}/lib">
-      <include name="hive-serde-*.jar"/>
-    </fileset>
-  </path>
-
-  <path id="hive.ql.jar.location">
-    <!--<fileset dir="${hive.dir}/build/ql"> -->
-    <fileset dir="${hive.home}/lib">
-      <include name="hive-exec-*.jar"/>
-    </fileset>
-  </path>
-
-  <!-- Build the RCfile data generator -->
-  <target name="rctool" depends="property-check">
-    <ant dir="${rctool.java.dir}">
-      <property name="hive.serde.jarfile" refid="hive.serde.jar.location"/>
-      <property name="hive.ql.jarfile" refid="hive.ql.jar.location"/>
-      <property name="hadoop.core.jarfile" refid="hadoop.core.jar.location"/>
-    </ant>
-  </target>
-
-  <!-- Build an archive to use in the tests -->
-  <target name="tar" description="Create tar file with hcat modules">
-    <mkdir dir="${tar.dir}"/>
-    <mkdir dir="${tar.dir}/tests"/>
-    <mkdir dir="${tar.dir}/drivers"/>
-    <mkdir dir="${tar.dir}/deployers"/>
-    <mkdir dir="${tar.dir}/conf"/>
-    <mkdir dir="${tar.dir}/libexec"/>
-    <mkdir dir="${tar.dir}/libexec/PigTest"/>
-    <mkdir dir="${tar.dir}/libexec/PigTest/test"/>
-    <mkdir dir="${tar.dir}/libexec/PigTest/generate"/>
-    <mkdir dir="${tar.dir}/lib"/>
-    <mkdir dir="${tar.dir}/lib/java"/>
-    <mkdir dir="${tar.dir}/paramfiles"/>
-
-    <copy todir="${tar.dir}/tests">
-        <fileset dir="${test.src}">
+    <!-- Separate property name for udfs' build.xml -->
+    <property name="hcat.jarfile" value="${hcat.jar}"/>
+    <property name="udf.dir" value="${basedir}/udfs"/>
+    <property name="udf.java.dir" value="${udf.dir}/java"/>
+    <property name="udf.jar" value="${udf.java.dir}/testudf.jar"/>
+    <property name="params.dir" value="${basedir}/paramfiles"/>
+    <property name="lib.dir" value="${basedir}/lib"/>
+    <property name="rctool.java.dir" value="${basedir}/tools/generate/java"/>
+
+    <property name="tar.name" value="${basedir}/hcattests.tar"/>
+    <property name="tar.dir" value="${basedir}/tar"/>
+    <property name="test.src" value="${basedir}/tests"/>
+    <property name="driver.src" value="${basedir}/drivers"/>
+    <property name="deployer.src" value="${basedir}/deployers"/>
+    <property name="conf.src" value="${basedir}/conf"/>
+    <property name="tool.src" value="${basedir}/tools"/>
+    <property name="data.dir" value="${basedir}/data"/>
+
+    <property name="harness.dir" value="${basedir}/../harness"/>
+    <property name="harness.tar" value="${harness.dir}/harness.tar"/>
+    <property name="test.location" value="${basedir}/testdist"/>
+    <property name="benchmark.location" value="${test.location}/benchmarks"/>
+    <!--<property name="hadoop.core.path" value="${harness.hadoop.home}"/>-->
+    <property name="hadoop.core.path" value="${hadoop.home}"/>
+    <!-- Override on command line to use rpm.conf -->
+    <property name="harness.conf" value="${test.location}/conf/default.conf"/>
+    <!-- Default value for output directory -->
+    <property name="harness.PH_LOCAL" value="out"/>
+
+    <property name="hadoopversion" value="20"/>
+
+    <condition property="isHadoop23">
+        <equals arg1="${hadoopversion}" arg2="23"/>
+    </condition>
+
+    <!-- Build the UDFs -->
+    <target name="udfs">
+        <ant dir="${udf.java.dir}"/>
+    </target>
+
+    <path id="hadoop.core.jar.location">
+        <fileset dir="${hadoop.core.path}">
+            <include name="hadoop-core-*.jar" unless="isHadoop23"/>
+            <include name="**/hadoop-common-*.jar" if="isHadoop23"/>
+            <include name="**/hadoop-auth-*.jar" if="isHadoop23"/>
+            <include name="**/hadoop-hdfs-*.jar" if="isHadoop23"/>
+            <include name="**/hadoop-mapreduce-client-core-*.jar" if="isHadoop23"/>
+            <include name="**/hadoop-yarn-api-*.jar" if="isHadoop23"/>
+            <include name="**/hadoop-yarn-common-*.jar" if="isHadoop23"/>
+            <include name="**/hadoop-annotations-*.jar" if="isHadoop23"/>
         </fileset>
-    </copy>
-    
-    <copy todir="${tar.dir}/data">
-        <fileset dir="${data.dir}">
+    </path>
+
+    <path id="hive.serde.jar.location">
+        <!-- <fileset dir="${hive.dir}/build/serde"> -->
+        <fileset dir="${hive.home}/lib">
+            <include name="hive-serde-*.jar"/>
         </fileset>
-    </copy>
+    </path>
 
+    <path id="hive.ql.jar.location">
+        <!--<fileset dir="${hive.dir}/build/ql"> -->
+        <fileset dir="${hive.home}/lib">
+            <include name="hive-exec-*.jar"/>
+        </fileset>
+    </path>
 
-    <copy todir="${tar.dir}">
-      <fileset dir="${driver.src}">
-        <exclude name="TestDriverScript.pm"/>
-      </fileset>
-      <fileset dir="${deployer.src}"/>
-    </copy>
-
-
-    <copy todir="${tar.dir}/conf">
-      <fileset dir="${conf.src}"/>
-    </copy>
-
-    <copy todir="${tar.dir}/libexec/HCatTest">
-      <fileset dir="${tool.src}/test"/>
-      <fileset dir="${tool.src}/generate"/>
-      <fileset dir="${tool.src}/install"/>
-    </copy>
-
-    <copy todir="${tar.dir}/lib/java">
-      <fileset file="${udf.jar}"/>
-    </copy>
-
-    <copy todir="${tar.dir}/paramfiles">
-      <fileset file="${params.dir}/params_3"/>
-    </copy>
-
-    <tar destfile="${tar.name}" basedir="${tar.dir}"/>
-  </target>
-
-  <!-- Get the tarball for the harness -->
-  <target name="build-harness">
-    <ant dir="${harness.dir}" inheritAll="false"/>
-  </target>
-
-  <!-- Check that the necessary properties are setup -->
-  <target name="property-check">
-    <!--
-    <fail message="Please set the property hadoop.home to the location Hadoop is installed "
-      unless="hadoop.home"/>
-      -->
-    <fail message="Please set the property hadoop.home to the location Hadoop is installed ">
-      <condition>
-        <and>
-          <not>
-            <isset property="hadoop.home"/>
-          </not>
-          <not>
-            <contains string="${harness.conf}" substring="rpm.conf"/>
-          </not>
-        </and>
-      </condition>
-    </fail>
-    <fail message="Please set the property harness.cluster.conf to the location Hadoop conf is installed ">
-      <condition>
-        <and>
-          <not>
-            <isset property="harness.cluster.conf"/>
-          </not>
-          <not>
-            <contains string="${harness.conf}" substring="rpm.conf"/>
-          </not>
-        </and>
-      </condition>
-    </fail>
-    <fail message="Please set the property hive.home to the location Hive is installed ">
-      <condition>
-        <and>
-          <not>
-            <isset property="hive.home"/>
-          </not>
-          <not>
-            <contains string="${harness.conf}" substring="rpm.conf"/>
-          </not>
-        </and>
-      </condition>
-    </fail>
-    <fail message="Please set the property hcat.home to the location HCatalog is installed ">
-      <condition>
-        <and>
-          <not>
-            <isset property="hcat.home"/>
-          </not>
-          <not>
-            <contains string="${harness.conf}" substring="rpm.conf"/>
-          </not>
-        </and>
-      </condition>
-    </fail>
-    <fail message="Please set the property pig.home to the location Pig is installed ">
-    <condition>
-        <and>
-          <not>
-            <isset property="pig.home"/>
-          </not>
-          <not>
-            <contains string="${harness.conf}" substring="rpm.conf"/>
-          </not>
-        </and>
-      </condition>
-    </fail>
-
-    <fail message="Please set the property hbase.home to the location HBase is installed ">
-      <condition>
-        <and>
-          <not>
-            <isset property="hbase.home"/>
-          </not>
-          <not>
-            <contains string="${harness.conf}" substring="rpm.conf"/>
-          </not>
-        </and>
-      </condition>
-    </fail>
-  </target>
-
-  <!-- Prep the test area -->
-  <target name="init-test" depends="build-harness, tar">
-    <mkdir dir="${test.location}"/>
-    <mkdir dir="${benchmark.location}"/>
-
-    <untar src="${tar.name}" dest="${test.location}"/>
-    <untar src="${harness.tar}" dest="${test.location}"/>
-
-    <chmod perm="ugo+x" type="file">
-      <fileset dir="${test.location}/libexec" />
-      <fileset file="${test.location}/test_harness.pl"/>
-    </chmod>
-
-  </target>
-
-  <target name="test" depends="property-check, udfs, tar, init-test">
-
-    <!-- If they have not specified tests to run then null it out -->
-     <property name="tests.to.run" value=""/> 
-    <echo />
-    <exec executable="./test_harness.pl" dir="${test.location}" failonerror="true">
-      <env key="HARNESS_ROOT" value="."/>
-      <env key="PH_LOCAL" value="${harness.PH_LOCAL}"/>
-      <env key="HADOOP_HOME" value="${hadoop.home}"/>
-      <env key="HADOOP_CONF_DIR" value="${harness.cluster.conf}"/>
-      <env key="HIVE_HOME" value="${hive.home}"/>
-      <env key="HCAT_HOME" value="${hcat.home}"/>
-      <env key="PIG_HOME" value="${pig.home}"/>
-      <env key="HBASE_HOME" value="${hbase.home}"/>
-      <arg line="-conf ${harness.conf}"/>
-      <arg line="${tests.to.run}"/>
-      <arg value="${test.location}/tests/pig.conf"/>
-      <arg value="${test.location}/tests/hive.conf"/>
-      <arg value="${test.location}/tests/hcat.conf"/>
-      <arg value="${test.location}/tests/hadoop.conf"/>
-    </exec>
-  </target>
+    <!-- Build the RCfile data generator -->
+    <target name="rctool" depends="property-check">
+        <ant dir="${rctool.java.dir}">
+            <property name="hive.serde.jarfile" refid="hive.serde.jar.location"/>
+            <property name="hive.ql.jarfile" refid="hive.ql.jar.location"/>
+            <property name="hadoop.core.jarfile" refid="hadoop.core.jar.location"/>
+        </ant>
+    </target>
+
+    <!-- Build an archive to use in the tests -->
+    <target name="tar" description="Create tar file with hcat modules">
+        <mkdir dir="${tar.dir}"/>
+        <mkdir dir="${tar.dir}/tests"/>
+        <mkdir dir="${tar.dir}/drivers"/>
+        <mkdir dir="${tar.dir}/deployers"/>
+        <mkdir dir="${tar.dir}/conf"/>
+        <mkdir dir="${tar.dir}/libexec"/>
+        <mkdir dir="${tar.dir}/libexec/PigTest"/>
+        <mkdir dir="${tar.dir}/libexec/PigTest/test"/>
+        <mkdir dir="${tar.dir}/libexec/PigTest/generate"/>
+        <mkdir dir="${tar.dir}/lib"/>
+        <mkdir dir="${tar.dir}/lib/java"/>
+        <mkdir dir="${tar.dir}/paramfiles"/>
+
+        <copy todir="${tar.dir}/tests">
+            <fileset dir="${test.src}">
+            </fileset>
+        </copy>
+
+        <copy todir="${tar.dir}/data">
+            <fileset dir="${data.dir}">
+            </fileset>
+        </copy>
+
+
+        <copy todir="${tar.dir}">
+            <fileset dir="${driver.src}">
+                <exclude name="TestDriverScript.pm"/>
+            </fileset>
+            <fileset dir="${deployer.src}"/>
+        </copy>
+
+
+        <copy todir="${tar.dir}/conf">
+            <fileset dir="${conf.src}"/>
+        </copy>
+
+        <copy todir="${tar.dir}/libexec/HCatTest">
+            <fileset dir="${tool.src}/test"/>
+            <fileset dir="${tool.src}/generate"/>
+            <fileset dir="${tool.src}/install"/>
+        </copy>
+
+        <copy todir="${tar.dir}/lib/java">
+            <fileset file="${udf.jar}"/>
+        </copy>
+
+        <copy todir="${tar.dir}/paramfiles">
+            <fileset file="${params.dir}/params_3"/>
+        </copy>
+
+        <tar destfile="${tar.name}" basedir="${tar.dir}"/>
+    </target>
+
+    <!-- Get the tarball for the harness -->
+    <target name="build-harness">
+        <ant dir="${harness.dir}" inheritAll="false"/>
+    </target>
+
+    <!-- Check that the necessary properties are setup -->
+    <target name="property-check">
+        <!--
+      <fail message="Please set the property hadoop.home to the location Hadoop is installed "
+        unless="hadoop.home"/>
+        -->
+        <fail message="Please set the property hadoop.home to the location Hadoop is installed ">
+            <condition>
+                <and>
+                    <not>
+                        <isset property="hadoop.home"/>
+                    </not>
+                    <not>
+                        <contains string="${harness.conf}" substring="rpm.conf"/>
+                    </not>
+                </and>
+            </condition>
+        </fail>
+        <fail message="Please set the property harness.cluster.conf to the location Hadoop conf is installed ">
+            <condition>
+                <and>
+                    <not>
+                        <isset property="harness.cluster.conf"/>
+                    </not>
+                    <not>
+                        <contains string="${harness.conf}" substring="rpm.conf"/>
+                    </not>
+                </and>
+            </condition>
+        </fail>
+        <fail message="Please set the property hive.home to the location Hive is installed ">
+            <condition>
+                <and>
+                    <not>
+                        <isset property="hive.home"/>
+                    </not>
+                    <not>
+                        <contains string="${harness.conf}" substring="rpm.conf"/>
+                    </not>
+                </and>
+            </condition>
+        </fail>
+        <fail message="Please set the property hcat.home to the location HCatalog is installed ">
+            <condition>
+                <and>
+                    <not>
+                        <isset property="hcat.home"/>
+                    </not>
+                    <not>
+                        <contains string="${harness.conf}" substring="rpm.conf"/>
+                    </not>
+                </and>
+            </condition>
+        </fail>
+        <fail message="Please set the property pig.home to the location Pig is installed ">
+            <condition>
+                <and>
+                    <not>
+                        <isset property="pig.home"/>
+                    </not>
+                    <not>
+                        <contains string="${harness.conf}" substring="rpm.conf"/>
+                    </not>
+                </and>
+            </condition>
+        </fail>
+
+        <fail message="Please set the property hbase.home to the location HBase is installed ">
+            <condition>
+                <and>
+                    <not>
+                        <isset property="hbase.home"/>
+                    </not>
+                    <not>
+                        <contains string="${harness.conf}" substring="rpm.conf"/>
+                    </not>
+                </and>
+            </condition>
+        </fail>
+    </target>
+
+    <!-- Prep the test area -->
+    <target name="init-test" depends="build-harness, tar">
+        <mkdir dir="${test.location}"/>
+        <mkdir dir="${benchmark.location}"/>
+
+        <untar src="${tar.name}" dest="${test.location}"/>
+        <untar src="${harness.tar}" dest="${test.location}"/>
+
+        <chmod perm="ugo+x" type="file">
+            <fileset dir="${test.location}/libexec"/>
+            <fileset file="${test.location}/test_harness.pl"/>
+        </chmod>
+
+    </target>
+
+    <target name="test" depends="property-check, udfs, tar, init-test">
+
+        <!-- If they have not specified tests to run then null it out -->
+        <property name="tests.to.run" value=""/>
+        <echo/>
+        <exec executable="./test_harness.pl" dir="${test.location}" failonerror="true">
+            <env key="HARNESS_ROOT" value="."/>
+            <env key="PH_LOCAL" value="${harness.PH_LOCAL}"/>
+            <env key="HADOOP_HOME" value="${hadoop.home}"/>
+            <env key="HADOOP_CONF_DIR" value="${harness.cluster.conf}"/>
+            <env key="HIVE_HOME" value="${hive.home}"/>
+            <env key="HCAT_HOME" value="${hcat.home}"/>
+            <env key="PIG_HOME" value="${pig.home}"/>
+            <env key="HBASE_HOME" value="${hbase.home}"/>
+            <arg line="-conf ${harness.conf}"/>
+            <arg line="${tests.to.run}"/>
+            <arg value="${test.location}/tests/pig.conf"/>
+            <arg value="${test.location}/tests/hive.conf"/>
+            <arg value="${test.location}/tests/hcat.conf"/>
+            <arg value="${test.location}/tests/hadoop.conf"/>
+        </exec>
+    </target>
 
-  <target name="init-deploy" depends="rctool">
-     <!-- For now default to the existing cluster deployer, since 
+    <target name="init-deploy" depends="rctool">
+        <!-- For now default to the existing cluster deployer, since
     it's all there is.  Once the local deployer is available that
     should be the default. -->
-   <property name="deploy.conf"
-        value="${test.location}/conf/existing_deployer.conf"/>
-  </target>
-
-  <target name="deploy-base" depends="property-check, tar, init-test, init-deploy">
-    <exec executable="./test_harness.pl" dir="${test.location}"
-      failonerror="true">
-      <env key="HARNESS_ROOT" value="."/>
-      <env key="PH_LOCAL" value="${harness.PH_LOCAL}"/>
-      <env key="HADOOP_HOME" value="${hadoop.home}"/>
-      <env key="HIVE_HOME" value="${hive.home}"/>
-      <env key="HCAT_HOME" value="${hcat.home}"/>
-      <env key="PIG_HOME" value="${pig.home}"/>
-      <env key="HBASE_HOME" value="${hbase.home}"/>
-      <arg line="-conf ${harness.conf}"/>
-      <arg value="-deploycfg"/>
-      <arg value="${deploy.conf}"/>
-      <arg value="${deploy.opt}"/>
-      <!-- Give a bogus test so it just does the deployment -->
-      <arg value="-t"/>
-      <arg value="NoSuchTest"/>
-    </exec>
-  </target>
-
-  <target name="deploy">
-    <antcall target="deploy-base">
-      <param name="deploy.opt" value="-deploy"/>
-    </antcall>
-  </target>
-
-  <target name="undeploy">
-    <antcall target="deploy-base">
-      <param name="deploy.opt" value="-undeploy"/>
-    </antcall>
-  </target>
-
-  <target name="deploy-test" depends="deploy, test"/>
-
-  <target name="deploy-test-undeploy" depends="deploy, test, undeploy"/>
-
-  <target name="clean">
-    <delete dir="${test.location}"/>
-    <delete file="${tar.name}"/>
-    <delete dir="${tar.dir}"/>
-    <ant dir="${udf.java.dir}" target="clean"/>
-  </target>
+        <property name="deploy.conf"
+                  value="${test.location}/conf/existing_deployer.conf"/>
+    </target>
+
+    <target name="deploy-base" depends="property-check, tar, init-test, init-deploy">
+        <exec executable="./test_harness.pl" dir="${test.location}"
+              failonerror="true">
+            <env key="HARNESS_ROOT" value="."/>
+            <env key="PH_LOCAL" value="${harness.PH_LOCAL}"/>
+            <env key="HADOOP_HOME" value="${hadoop.home}"/>
+            <env key="HIVE_HOME" value="${hive.home}"/>
+            <env key="HCAT_HOME" value="${hcat.home}"/>
+            <env key="PIG_HOME" value="${pig.home}"/>
+            <env key="HBASE_HOME" value="${hbase.home}"/>
+            <arg line="-conf ${harness.conf}"/>
+            <arg value="-deploycfg"/>
+            <arg value="${deploy.conf}"/>
+            <arg value="${deploy.opt}"/>
+            <!-- Give a bogus test so it just does the deployment -->
+            <arg value="-t"/>
+            <arg value="NoSuchTest"/>
+        </exec>
+    </target>
+
+    <target name="deploy">
+        <antcall target="deploy-base">
+            <param name="deploy.opt" value="-deploy"/>
+        </antcall>
+    </target>
+
+    <target name="undeploy">
+        <antcall target="deploy-base">
+            <param name="deploy.opt" value="-undeploy"/>
+        </antcall>
+    </target>
+
+    <target name="deploy-test" depends="deploy, test"/>
+
+    <target name="deploy-test-undeploy" depends="deploy, test, undeploy"/>
+
+    <target name="clean">
+        <delete dir="${test.location}"/>
+        <delete file="${tar.name}"/>
+        <delete dir="${tar.dir}"/>
+        <ant dir="${udf.java.dir}" target="clean"/>
+    </target>
 
 </project>
 

Modified: incubator/hcatalog/trunk/src/test/e2e/hcatalog/tools/generate/java/build.xml
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/e2e/hcatalog/tools/generate/java/build.xml?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/e2e/hcatalog/tools/generate/java/build.xml (original)
+++ incubator/hcatalog/trunk/src/test/e2e/hcatalog/tools/generate/java/build.xml Mon Sep 10 23:28:55 2012
@@ -15,57 +15,56 @@
 
 <project name="Hive-Data-Generator" default="generator-jar">
 
-    <property name="generator.jarfile" value="hive-gen.jar" />
-    <property name="generator.build.dir" value="${basedir}/build" />
-    <property name="generator.src.dir" value="${basedir}/org" />
+    <property name="generator.jarfile" value="hive-gen.jar"/>
+    <property name="generator.build.dir" value="${basedir}/build"/>
+    <property name="generator.src.dir" value="${basedir}/org"/>
 
 
     <path id="generator-classpath">
-        <fileset file="${hive.serde.jarfile}" />
-        <fileset file="${hive.ql.jarfile}" />
-        <fileset file="${hadoop.core.jarfile}" />
+        <fileset file="${hive.serde.jarfile}"/>
+        <fileset file="${hive.ql.jarfile}"/>
+        <fileset file="${hadoop.core.jarfile}"/>
     </path>
 
     <target name="init">
-        <mkdir dir="${generator.build.dir}" />
+        <mkdir dir="${generator.build.dir}"/>
     </target>
 
     <target name="clean">
-        <delete dir="${generator.build.dir}" />
-        <delete file="${generator.jarfile}" />
+        <delete dir="${generator.build.dir}"/>
+        <delete file="${generator.jarfile}"/>
     </target>
 
     <target name="generator-compile"
             depends="init, serde.jar.check, ql.jar.check, hadoop.jar.check">
         <echo>*** Compiling UDFs ***</echo>
-        <javac srcdir="${generator.src.dir}" destdir="${generator.build.dir}" debug="on" includeantruntime="false" includes="**/*.java">
-            <classpath refid="generator-classpath" />
+        <javac srcdir="${generator.src.dir}" destdir="${generator.build.dir}" debug="on" includeantruntime="false"
+               includes="**/*.java">
+            <classpath refid="generator-classpath"/>
         </javac>
     </target>
 
     <target name="generator-jar" depends="generator-compile">
         <echo>*** Creating UDF jar ***</echo>
         <jar duplicate="preserve" jarfile="${generator.jarfile}">
-	    <fileset dir="build"/>
+            <fileset dir="build"/>
         </jar>
     </target>
 
-	<target name="serde.jar.check" unless="hive.serde.jarfile">
-	 	<fail message="'hive.serde.jarfile' is not defined. 
-		Please pass -Dhive.serde.jarfile=&lt;Hive serde jar to use&gt; to Ant on the command-line." />
-	</target>
-
-	<target name="ql.jar.check" unless="hive.ql.jarfile">
-	 	<fail message="'hive.ql.jarfile' is not defined. 
-		Please pass -Dhive.ql.jarfile=&lt;Hive ql jar to use&gt; to Ant on the command-line." />
-	</target>
-
-	<target name="hadoop.jar.check" unless="hadoop.core.jarfile">
-	 	<fail message="'hadoop.core.jarfile' is not defined. 
-		Please pass -Dhadoop.core.jarfile=&lt;Hadoop core jar to use&gt; to Ant on the command-line." />
-	</target>
+    <target name="serde.jar.check" unless="hive.serde.jarfile">
+        <fail message="'hive.serde.jarfile' is not defined.
+		Please pass -Dhive.serde.jarfile=&lt;Hive serde jar to use&gt; to Ant on the command-line."/>
+    </target>
 
+    <target name="ql.jar.check" unless="hive.ql.jarfile">
+        <fail message="'hive.ql.jarfile' is not defined.
+		Please pass -Dhive.ql.jarfile=&lt;Hive ql jar to use&gt; to Ant on the command-line."/>
+    </target>
 
+    <target name="hadoop.jar.check" unless="hadoop.core.jarfile">
+        <fail message="'hadoop.core.jarfile' is not defined.
+		Please pass -Dhadoop.core.jarfile=&lt;Hadoop core jar to use&gt; to Ant on the command-line."/>
+    </target>
 
 
 </project>

Modified: incubator/hcatalog/trunk/src/test/e2e/hcatalog/tools/generate/java/org/apache/hadoop/hive/tools/generate/RCFileGenerator.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/e2e/hcatalog/tools/generate/java/org/apache/hadoop/hive/tools/generate/RCFileGenerator.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/e2e/hcatalog/tools/generate/java/org/apache/hadoop/hive/tools/generate/RCFileGenerator.java (original)
+++ incubator/hcatalog/trunk/src/test/e2e/hcatalog/tools/generate/java/org/apache/hadoop/hive/tools/generate/RCFileGenerator.java Mon Sep 10 23:28:55 2012
@@ -46,24 +46,24 @@ public class RCFileGenerator {
     private static Random rand;
 
     private static Path getFile(String filename) throws Exception {
-       return new Path(basedir, filename);
+        return new Path(basedir, filename);
     }
 
     private static String[] firstName = {"alice", "bob", "calvin", "david",
-      "ethan", "fred", "gabriella", "holly", "irene", "jessica", "katie",
-      "luke", "mike", "nick", "oscar", "priscilla", "quinn", "rachel",
-      "sarah", "tom", "ulysses", "victor", "wendy", "xavier", "yuri",
-      "zach"};
+        "ethan", "fred", "gabriella", "holly", "irene", "jessica", "katie",
+        "luke", "mike", "nick", "oscar", "priscilla", "quinn", "rachel",
+        "sarah", "tom", "ulysses", "victor", "wendy", "xavier", "yuri",
+        "zach"};
 
     private static String[] lastName = {"allen", "brown", "carson",
-      "davidson", "ellison", "falkner", "garcia", "hernandez", "ichabod",
-      "johnson", "king", "laertes", "miller", "nixon", "ovid", "polk",
-      "quirinius", "robinson", "steinbeck", "thompson", "underhill",
-      "van buren", "white", "xylophone", "young", "zipper"};
+        "davidson", "ellison", "falkner", "garcia", "hernandez", "ichabod",
+        "johnson", "king", "laertes", "miller", "nixon", "ovid", "polk",
+        "quirinius", "robinson", "steinbeck", "thompson", "underhill",
+        "van buren", "white", "xylophone", "young", "zipper"};
 
     private static String randomName() {
         StringBuffer buf =
-            new StringBuffer(firstName[rand.nextInt(firstName.length)]);
+                new StringBuffer(firstName[rand.nextInt(firstName.length)]);
         buf.append(' ');
         buf.append(lastName[rand.nextInt(lastName.length)]);
         return buf.toString();
@@ -115,8 +115,8 @@ public class RCFileGenerator {
     }
 
     private static void usage() {
-        System.err.println("Usage: rcfilegen format number_of_rows " + 
-            "output_file plain_output_file");
+        System.err.println("Usage: rcfilegen format number_of_rows " +
+                "output_file plain_output_file");
         System.err.println("  format one of:  student voter alltypes");
         System.exit(1);
     }
@@ -140,7 +140,7 @@ public class RCFileGenerator {
                                 int numRows,
                                 String output, String plainOutput) throws Exception {
         int numFields = 0;
-        if (format.equals("student")) { 
+        if (format.equals("student")) {
             rand = new Random(numRows);
             numFields = 3;
         } else if (format.equals("voter")) {
@@ -153,8 +153,8 @@ public class RCFileGenerator {
 
         RCFileOutputFormat.setColumnNumber(conf, numFields);
         RCFile.Writer writer = new RCFile.Writer(fs, conf, getFile(output),
-            null, new DefaultCodec());
-        
+                null, new DefaultCodec());
+
         PrintWriter pw = new PrintWriter(new FileWriter(plainOutput));
 
         for (int j = 0; j < numRows; j++) {
@@ -164,30 +164,30 @@ public class RCFileGenerator {
 
             if (format.equals("student")) {
                 byte[][] f = {
-                    randomName().getBytes("UTF-8"),
-                    Integer.valueOf(randomAge()).toString().getBytes("UTF-8"),
-                    Double.valueOf(randomGpa()).toString().getBytes("UTF-8")
+                        randomName().getBytes("UTF-8"),
+                        Integer.valueOf(randomAge()).toString().getBytes("UTF-8"),
+                        Double.valueOf(randomGpa()).toString().getBytes("UTF-8")
                 };
                 fields = f;
             } else if (format.equals("voter")) {
                 byte[][] f = {
-                    randomName().getBytes("UTF-8"),
-                    Integer.valueOf(randomAge()).toString().getBytes("UTF-8"),
-                    randomRegistration().getBytes("UTF-8"),
-                    Double.valueOf(randomContribution()).toString().getBytes("UTF-8")
+                        randomName().getBytes("UTF-8"),
+                        Integer.valueOf(randomAge()).toString().getBytes("UTF-8"),
+                        randomRegistration().getBytes("UTF-8"),
+                        Double.valueOf(randomContribution()).toString().getBytes("UTF-8")
                 };
                 fields = f;
             } else if (format.equals("alltypes")) {
                 byte[][] f = {
-                    Integer.valueOf(rand.nextInt(Byte.MAX_VALUE)).toString().getBytes("UTF-8"),
-                    Integer.valueOf(rand.nextInt(Short.MAX_VALUE)).toString().getBytes("UTF-8"),
-                    Integer.valueOf(rand.nextInt()).toString().getBytes("UTF-8"),
-                    Long.valueOf(rand.nextLong()).toString().getBytes("UTF-8"),
-                    Float.valueOf(rand.nextFloat() * 1000).toString().getBytes("UTF-8"),
-                    Double.valueOf(rand.nextDouble() * 1000000).toString().getBytes("UTF-8"),
-                    randomName().getBytes("UTF-8"),
-                    randomMap(),
-                    randomArray()
+                        Integer.valueOf(rand.nextInt(Byte.MAX_VALUE)).toString().getBytes("UTF-8"),
+                        Integer.valueOf(rand.nextInt(Short.MAX_VALUE)).toString().getBytes("UTF-8"),
+                        Integer.valueOf(rand.nextInt()).toString().getBytes("UTF-8"),
+                        Long.valueOf(rand.nextLong()).toString().getBytes("UTF-8"),
+                        Float.valueOf(rand.nextFloat() * 1000).toString().getBytes("UTF-8"),
+                        Double.valueOf(rand.nextDouble() * 1000000).toString().getBytes("UTF-8"),
+                        randomName().getBytes("UTF-8"),
+                        randomMap(),
+                        randomArray()
                 };
                 fields = f;
             }
@@ -195,10 +195,10 @@ public class RCFileGenerator {
 
             for (int i = 0; i < fields.length; i++) {
                 BytesRefWritable field = new BytesRefWritable(fields[i], 0,
-                    fields[i].length);
+                        fields[i].length);
                 row.set(i, field);
                 pw.print(new String(fields[i]));
-                if (i!=fields.length-1)
+                if (i != fields.length - 1)
                     pw.print("\t");
                 else
                     pw.println();
@@ -209,6 +209,6 @@ public class RCFileGenerator {
 
         writer.close();
         pw.close();
-  }
+    }
 }
 

Modified: incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/build.xml
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/build.xml?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/build.xml (original)
+++ incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/build.xml Mon Sep 10 23:28:55 2012
@@ -15,35 +15,35 @@
 
 <project name="HCatalog-test-utils" default="udf-jar">
 
-    <property name="udf.jarfile" value="testudf.jar" />
-    <property name="udfs.build.dir" value="${basedir}/build" />
-    <property name="udfs.src.dir" value="${basedir}/org/" />
+    <property name="udf.jarfile" value="testudf.jar"/>
+    <property name="udfs.build.dir" value="${basedir}/build"/>
+    <property name="udfs.src.dir" value="${basedir}/org/"/>
 
     <path id="udf-classpath">
-       <fileset file="../../../../../../build/hcatalog/*.jar" />
-       <fileset file="../../../../../../build/ivy/lib/default/*.jar" />
+        <fileset file="../../../../../../build/hcatalog/*.jar"/>
+        <fileset file="../../../../../../build/ivy/lib/default/*.jar"/>
     </path>
 
     <target name="init">
-        <mkdir dir="${udfs.build.dir}" />
+        <mkdir dir="${udfs.build.dir}"/>
     </target>
 
     <target name="clean">
-        <delete dir="${udfs.build.dir}" />
-        <delete file="${udf.jarfile}" />
+        <delete dir="${udfs.build.dir}"/>
+        <delete file="${udf.jarfile}"/>
     </target>
 
     <target name="udf-compile" depends="init">
         <echo>*** Compiling UDFs ***</echo>
         <javac srcdir="${udfs.src.dir}" destdir="${udfs.build.dir}" debug="on">
-            <classpath refid="udf-classpath" />
+            <classpath refid="udf-classpath"/>
         </javac>
     </target>
 
     <target name="udf-jar" depends="udf-compile">
         <echo>*** Creating UDF jar ***</echo>
         <jar duplicate="preserve" jarfile="${udf.jarfile}">
-	    <fileset dir="build"/>
+            <fileset dir="build"/>
         </jar>
     </target>
 </project>



Mime
View raw message