incubator-hcatalog-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ga...@apache.org
Subject svn commit: r1091509 [6/8] - in /incubator/hcatalog/trunk: ./ bin/ ivy/ src/ src/docs/ src/docs/src/ src/docs/src/documentation/ src/docs/src/documentation/classes/ src/docs/src/documentation/conf/ src/docs/src/documentation/content/ src/docs/src/docum...
Date Tue, 12 Apr 2011 17:30:12 GMT
Added: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileInputDriver.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileInputDriver.java?rev=1091509&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileInputDriver.java (added)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileInputDriver.java Tue Apr 12 17:30:08 2011
@@ -0,0 +1,230 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hcatalog.rcfile;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Map.Entry;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.serde.Constants;
+import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
+import org.apache.hadoop.hive.serde2.SerDe;
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable;
+import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
+import org.apache.hadoop.hive.serde2.columnar.ColumnarStruct;
+import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.StructField;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.mapreduce.InputFormat;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hcatalog.common.HCatUtil;
+import org.apache.hcatalog.data.DefaultHCatRecord;
+import org.apache.hcatalog.data.HCatRecord;
+import org.apache.hcatalog.data.schema.HCatFieldSchema;
+import org.apache.hcatalog.data.schema.HCatSchema;
+import org.apache.hcatalog.mapreduce.HCatInputStorageDriver;
+
+public class RCFileInputDriver extends HCatInputStorageDriver{
+
+
+  private SerDe serde;
+  private static final Log LOG = LogFactory.getLog(RCFileInputDriver.class);
+  private List<HCatFieldSchema> colsInData;
+  private StructObjectInspector oi;
+  private Map<String,String> partValues;
+  private List<HCatFieldSchema> outCols;
+  private List<? extends StructField> structFields;
+  private Map<String,Integer> namePosMapping;
+
+  @Override
+  public InputFormat<? extends WritableComparable, ? extends Writable> getInputFormat(Properties howlProperties) {
+    return new RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable>();
+  }
+
+  @Override
+  public void setInputPath(JobContext jobContext, String location) throws IOException {
+
+    super.setInputPath(jobContext, location);
+  }
+
+  @Override
+  public void setOriginalSchema(JobContext jobContext, HCatSchema dataSchema) throws IOException {
+
+    colsInData = dataSchema.getFields();
+    namePosMapping = new HashMap<String, Integer>(colsInData.size());
+    int index =0;
+    for(HCatFieldSchema field : dataSchema.getFields()){
+      namePosMapping.put(field.getName(), index++);
+    }
+  }
+
+  @Override
+  public void setOutputSchema(JobContext jobContext, HCatSchema desiredSchema) throws IOException {
+
+    // Finds out which column ids needs to be projected and set them up for RCFile.
+    outCols = desiredSchema.getFields();
+    ArrayList<Integer> prjColumns = new ArrayList<Integer>();
+    for(HCatFieldSchema prjCol : outCols){
+      Integer pos = namePosMapping.get(prjCol.getName().toLowerCase());
+      if(pos != null) {
+        prjColumns.add(pos);
+      }
+    }
+
+    Collections.sort(prjColumns);
+    ColumnProjectionUtils.setReadColumnIDs(jobContext.getConfiguration(), prjColumns);
+  }
+
+  @Override
+  public void setPartitionValues(JobContext jobContext, Map<String, String> partitionValues)
+  throws IOException {
+    partValues = partitionValues;
+  }
+
+  @Override
+  public HCatRecord convertToHCatRecord(WritableComparable ignored, Writable bytesRefArray) throws IOException {
+
+    // Deserialize bytesRefArray into struct and then convert that struct to
+    // HowlRecord.
+    ColumnarStruct struct;
+    try {
+      struct = (ColumnarStruct)serde.deserialize(bytesRefArray);
+    } catch (SerDeException e) {
+      LOG.error(e.toString(), e);
+      throw new IOException(e);
+    }
+
+    List<Object> outList = new ArrayList<Object>(outCols.size());
+
+    String colName;
+    Integer index;
+
+    for(HCatFieldSchema col : outCols){
+
+      colName = col.getName().toLowerCase();
+      index = namePosMapping.get(colName);
+
+      if(index != null){
+        StructField field = structFields.get(index);
+        outList.add( getTypedObj(oi.getStructFieldData(struct, field), field.getFieldObjectInspector()));
+      }
+
+      else {
+        outList.add(partValues.get(colName));
+      }
+
+    }
+    return new DefaultHCatRecord(outList);
+  }
+
+  private Object getTypedObj(Object data, ObjectInspector oi) throws IOException{
+
+    // The real work-horse method. We are gobbling up all the laziness benefits
+    // of Hive-RCFile by deserializing everything and creating crisp  HowlRecord
+    // with crisp Java objects inside it. We have to do it because higher layer
+    // may not know how to do it.
+
+    switch(oi.getCategory()){
+
+    case PRIMITIVE:
+      return ((PrimitiveObjectInspector)oi).getPrimitiveJavaObject(data);
+
+    case MAP:
+      MapObjectInspector moi = (MapObjectInspector)oi;
+      Map<?,?> lazyMap = moi.getMap(data);
+      ObjectInspector keyOI = moi.getMapKeyObjectInspector();
+      ObjectInspector valOI = moi.getMapValueObjectInspector();
+      Map<Object,Object> typedMap = new HashMap<Object,Object>(lazyMap.size());
+      for(Entry<?,?> e : lazyMap.entrySet()){
+        typedMap.put(getTypedObj(e.getKey(), keyOI), getTypedObj(e.getValue(), valOI));
+      }
+      return typedMap;
+
+    case LIST:
+      ListObjectInspector loi = (ListObjectInspector)oi;
+      List<?> lazyList = loi.getList(data);
+      ObjectInspector elemOI = loi.getListElementObjectInspector();
+      List<Object> typedList = new ArrayList<Object>(lazyList.size());
+      Iterator<?> itr = lazyList.listIterator();
+      while(itr.hasNext()){
+        typedList.add(getTypedObj(itr.next(),elemOI));
+      }
+      return typedList;
+
+    case STRUCT:
+      StructObjectInspector soi = (StructObjectInspector)oi;
+      List<? extends StructField> fields = soi.getAllStructFieldRefs();
+      List<Object> typedStruct = new ArrayList<Object>(fields.size());
+      for(StructField field : fields){
+        typedStruct.add( getTypedObj(soi.getStructFieldData(data, field), field.getFieldObjectInspector()));
+      }
+      return typedStruct;
+
+
+    default:
+      throw new IOException("Don't know how to deserialize: "+oi.getCategory());
+
+    }
+  }
+
+  @Override
+  public void initialize(JobContext context,Properties howlProperties)
+  throws IOException {
+
+    super.initialize(context, howlProperties);
+
+    // Columnar Serde needs to know names and types of columns it needs to read.
+    List<FieldSchema> fields = HCatUtil.getFieldSchemaList(colsInData);
+    howlProperties.setProperty(Constants.LIST_COLUMNS,MetaStoreUtils.
+        getColumnNamesFromFieldSchema(fields));
+    howlProperties.setProperty(Constants.LIST_COLUMN_TYPES, MetaStoreUtils.
+        getColumnTypesFromFieldSchema(fields));
+
+    // It seems RCFIle reads and writes nulls differently as compared to default hive.
+    // setting these props to match LazySimpleSerde
+    howlProperties.setProperty(Constants.SERIALIZATION_NULL_FORMAT, "\\N");
+    howlProperties.setProperty(Constants.SERIALIZATION_FORMAT, "1");
+
+    try {
+      serde = new ColumnarSerDe();
+      serde.initialize(context.getConfiguration(), howlProperties);
+      oi = (StructObjectInspector) serde.getObjectInspector();
+      structFields = oi.getAllStructFieldRefs();
+
+    } catch (SerDeException e) {
+      throw new IOException(e);
+    }
+  }
+}

Added: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceInputFormat.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceInputFormat.java?rev=1091509&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceInputFormat.java (added)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceInputFormat.java Tue Apr 12 17:30:08 2011
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hcatalog.rcfile;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+
+public class RCFileMapReduceInputFormat<K extends LongWritable,V extends BytesRefArrayWritable>
+extends FileInputFormat<LongWritable, BytesRefArrayWritable>
+{
+
+  @Override
+  public RecordReader<LongWritable,BytesRefArrayWritable> createRecordReader(InputSplit split,
+      TaskAttemptContext context) throws IOException, InterruptedException {
+
+    context.setStatus(split.toString());
+    return new RCFileMapReduceRecordReader<LongWritable,BytesRefArrayWritable>();
+  }
+
+  @Override
+  public List<InputSplit> getSplits(JobContext job) throws IOException {
+
+    job.getConfiguration().setLong("mapred.min.split.size", SequenceFile.SYNC_INTERVAL);
+    return super.getSplits(job);
+  }
+}

Added: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceOutputFormat.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceOutputFormat.java?rev=1091509&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceOutputFormat.java (added)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceOutputFormat.java Tue Apr 12 17:30:08 2011
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hcatalog.rcfile;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.io.RCFile;
+import org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.DefaultCodec;
+import org.apache.hadoop.mapreduce.RecordWriter;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.util.ReflectionUtils;
+
+/**
+ * The RC file input format using new Hadoop mapreduce APIs.
+ */
+public class RCFileMapReduceOutputFormat extends
+    FileOutputFormat<WritableComparable<?>, BytesRefArrayWritable> {
+
+  /**
+   * Set number of columns into the given configuration.
+   * @param conf
+   *          configuration instance which need to set the column number
+   * @param columnNum
+   *          column number for RCFile's Writer
+   *
+   */
+  public static void setColumnNumber(Configuration conf, int columnNum) {
+    assert columnNum > 0;
+    conf.setInt(RCFile.COLUMN_NUMBER_CONF_STR, columnNum);
+  }
+
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.mapreduce.lib.output.FileOutputFormat#getRecordWriter(org.apache.hadoop.mapreduce.TaskAttemptContext)
+   */
+  @Override
+  public org.apache.hadoop.mapreduce.RecordWriter<WritableComparable<?>, BytesRefArrayWritable> getRecordWriter(
+      TaskAttemptContext task) throws IOException, InterruptedException {
+
+    //FileOutputFormat.getWorkOutputPath takes TaskInputOutputContext instead of
+    //TaskAttemptContext, so can't use that here
+    FileOutputCommitter committer = (FileOutputCommitter) getOutputCommitter(task);
+    Path outputPath = committer.getWorkPath();
+
+    FileSystem fs = outputPath.getFileSystem(task.getConfiguration());
+
+    if (!fs.exists(outputPath)) {
+      fs.mkdirs(outputPath);
+    }
+
+    Path file = getDefaultWorkFile(task, "");
+
+    CompressionCodec codec = null;
+    if (getCompressOutput(task)) {
+      Class<?> codecClass = getOutputCompressorClass(task, DefaultCodec.class);
+      codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, task.getConfiguration());
+    }
+
+    final RCFile.Writer out = new RCFile.Writer(fs, task.getConfiguration(), file, task, codec);
+
+    return new RecordWriter<WritableComparable<?>, BytesRefArrayWritable>() {
+
+      /* (non-Javadoc)
+       * @see org.apache.hadoop.mapreduce.RecordWriter#write(java.lang.Object, java.lang.Object)
+       */
+      @Override
+      public void write(WritableComparable<?> key, BytesRefArrayWritable value)
+          throws IOException {
+        out.append(value);
+      }
+
+      /* (non-Javadoc)
+       * @see org.apache.hadoop.mapreduce.RecordWriter#close(org.apache.hadoop.mapreduce.TaskAttemptContext)
+       */
+      @Override
+      public void close(TaskAttemptContext task) throws IOException, InterruptedException {
+        out.close();
+      }
+    };
+  }
+
+}

Added: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceRecordReader.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceRecordReader.java?rev=1091509&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceRecordReader.java (added)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileMapReduceRecordReader.java Tue Apr 12 17:30:08 2011
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hcatalog.rcfile;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.io.RCFile;
+import org.apache.hadoop.hive.ql.io.RCFile.Reader;
+import org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.FileSplit;
+
+public class RCFileMapReduceRecordReader<K extends LongWritable, V extends BytesRefArrayWritable>
+  extends RecordReader<LongWritable,BytesRefArrayWritable>{
+
+  private Reader in;
+  private long start;
+  private long end;
+  private boolean more = true;
+
+  // key and value objects are created once in initialize() and then reused
+  // for every getCurrentKey() and getCurrentValue() call. This is important
+  // since RCFile makes an assumption of this fact.
+
+  private LongWritable key;
+  private BytesRefArrayWritable value;
+
+  @Override
+  public void close() throws IOException {
+    in.close();
+  }
+
+  @Override
+  public LongWritable getCurrentKey() throws IOException, InterruptedException {
+    return key;
+  }
+
+  @Override
+  public BytesRefArrayWritable getCurrentValue() throws IOException, InterruptedException {
+    return value;
+  }
+
+  @Override
+  public float getProgress() throws IOException, InterruptedException {
+    if (end == start) {
+      return 0.0f;
+    } else {
+      return Math.min(1.0f, (in.getPosition() - start) / (float) (end - start));
+    }
+  }
+
+  @Override
+  public boolean nextKeyValue() throws IOException, InterruptedException {
+
+    more = next(key);
+    if (more) {
+      in.getCurrentRow(value);
+    }
+
+    return more;
+  }
+
+  private boolean next(LongWritable key) throws IOException {
+    if (!more) {
+      return false;
+    }
+
+    more = in.next(key);
+    if (!more) {
+      return false;
+    }
+
+    if (in.lastSeenSyncPos() >= end) {
+      more = false;
+      return more;
+    }
+    return more;
+  }
+
+  @Override
+  public void initialize(InputSplit split, TaskAttemptContext context) throws IOException,
+      InterruptedException {
+
+    FileSplit fSplit = (FileSplit)split;
+    Path path = fSplit.getPath();
+    Configuration conf = context.getConfiguration();
+    this.in = new RCFile.Reader(path.getFileSystem(conf), path, conf);
+    this.end = fSplit.getStart() + fSplit.getLength();
+
+    if(fSplit.getStart() > in.getPosition()) {
+      in.sync(fSplit.getStart());
+    }
+
+    this.start = in.getPosition();
+    more = start < end;
+
+    key = new LongWritable();
+    value = new BytesRefArrayWritable();
+  }
+}

Added: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileOutputDriver.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileOutputDriver.java?rev=1091509&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileOutputDriver.java (added)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/rcfile/RCFileOutputDriver.java Tue Apr 12 17:30:08 2011
@@ -0,0 +1,220 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hcatalog.rcfile;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.serde.Constants;
+import org.apache.hadoop.hive.serde2.SerDe;
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
+import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.OutputFormat;
+import org.apache.hcatalog.common.HCatUtil;
+import org.apache.hcatalog.data.HCatRecord;
+import org.apache.hcatalog.data.schema.HCatFieldSchema;
+import org.apache.hcatalog.data.schema.HCatSchema;
+import org.apache.hcatalog.mapreduce.HCatOutputStorageDriver;
+
+/**
+ * The storage driver for writing RCFile data through HCatOutputFormat.
+ */
+ public class RCFileOutputDriver extends HCatOutputStorageDriver {
+
+   /** The serde for serializing the HCatRecord to bytes writable */
+   private SerDe serde;
+
+   /** The object inspector for the given schema */
+   private StructObjectInspector objectInspector;
+
+   /** The schema for the output data */
+   private HCatSchema outputSchema;
+
+   /** The cached RCFile output format instance */
+   private OutputFormat outputFormat = null;
+
+  /* (non-Javadoc)
+   * @see org.apache.hcatalog.mapreduce.HCatOutputStorageDriver#convertValue(org.apache.hcatalog.data.HCatRecord)
+   */
+  @Override
+  public Writable convertValue(HCatRecord value) throws IOException {
+    try {
+
+      return serde.serialize(value.getAll(), objectInspector);
+    } catch(SerDeException e) {
+      throw new IOException(e);
+    }
+  }
+
+  /* (non-Javadoc)
+   * @see org.apache.hcatalog.mapreduce.HCatOutputStorageDriver#generateKey(org.apache.hcatalog.data.HCatRecord)
+   */
+  @Override
+  public WritableComparable<?> generateKey(HCatRecord value) throws IOException {
+    //key is not used for RCFile output
+    return null;
+  }
+
+  /* (non-Javadoc)
+   * @see org.apache.hcatalog.mapreduce.HCatOutputStorageDriver#getOutputFormat(java.util.Properties)
+   */
+  @SuppressWarnings("unchecked")
+  @Override
+  public OutputFormat<? super WritableComparable<?>, ? super Writable> getOutputFormat() throws IOException {
+    if( outputFormat == null ) {
+      outputFormat = new RCFileMapReduceOutputFormat();
+    }
+
+    return outputFormat;
+  }
+
+  /* (non-Javadoc)
+   * @see org.apache.hcatalog.mapreduce.HCatOutputStorageDriver#setOutputPath(org.apache.hadoop.mapreduce.JobContext, java.lang.String)
+   */
+  @Override
+  public void setOutputPath(JobContext jobContext, String location) throws IOException {
+    //Not calling FileOutputFormat.setOutputPath since that requires a Job instead of JobContext
+    jobContext.getConfiguration().set("mapred.output.dir", location);
+  }
+
+  /* (non-Javadoc)
+   * @see org.apache.hcatalog.mapreduce.HCatOutputStorageDriver#setPartitionValues(org.apache.hadoop.mapreduce.JobContext, java.util.Map)
+   */
+  @Override
+  public void setPartitionValues(JobContext jobContext, Map<String, String> partitionValues)
+      throws IOException {
+    //default implementation of HCatOutputStorageDriver.getPartitionLocation will use the partition
+    //values to generate the data location, so partition values not used here
+  }
+
+  /* (non-Javadoc)
+   * @see org.apache.hcatalog.mapreduce.HCatOutputStorageDriver#setSchema(org.apache.hadoop.mapreduce.JobContext, org.apache.hadoop.hive.metastore.api.Schema)
+   */
+  @Override
+  public void setSchema(JobContext jobContext, HCatSchema schema) throws IOException {
+    outputSchema = schema;
+    RCFileMapReduceOutputFormat.setColumnNumber(
+        jobContext.getConfiguration(), schema.getFields().size());
+  }
+
+  @Override
+  public void initialize(JobContext context,Properties hcatProperties) throws IOException {
+
+    super.initialize(context, hcatProperties);
+
+    List<FieldSchema> fields = HCatUtil.getFieldSchemaList(outputSchema.getFields());
+    hcatProperties.setProperty(Constants.LIST_COLUMNS,
+          MetaStoreUtils.getColumnNamesFromFieldSchema(fields));
+    hcatProperties.setProperty(Constants.LIST_COLUMN_TYPES,
+          MetaStoreUtils.getColumnTypesFromFieldSchema(fields));
+
+    // setting these props to match LazySimpleSerde
+    hcatProperties.setProperty(Constants.SERIALIZATION_NULL_FORMAT, "\\N");
+    hcatProperties.setProperty(Constants.SERIALIZATION_FORMAT, "1");
+
+    try {
+      serde = new ColumnarSerDe();
+      serde.initialize(context.getConfiguration(), hcatProperties);
+      objectInspector = createStructObjectInspector();
+
+    } catch (SerDeException e) {
+      throw new IOException(e);
+    }
+  }
+
+  public StructObjectInspector createStructObjectInspector() throws IOException {
+
+    if( outputSchema == null ) {
+      throw new IOException("Invalid output schema specified");
+    }
+
+    List<ObjectInspector> fieldInspectors = new ArrayList<ObjectInspector>();
+    List<String> fieldNames = new ArrayList<String>();
+
+    for(HCatFieldSchema hcatFieldSchema : outputSchema.getFields()) {
+      TypeInfo type = TypeInfoUtils.getTypeInfoFromTypeString(hcatFieldSchema.getTypeString());
+
+      fieldNames.add(hcatFieldSchema.getName());
+      fieldInspectors.add(getObjectInspector(type));
+    }
+
+    StructObjectInspector structInspector = ObjectInspectorFactory.
+        getStandardStructObjectInspector(fieldNames, fieldInspectors);
+    return structInspector;
+  }
+
+  public ObjectInspector getObjectInspector(TypeInfo type) throws IOException {
+
+    switch(type.getCategory()) {
+
+    case PRIMITIVE :
+      PrimitiveTypeInfo primitiveType = (PrimitiveTypeInfo) type;
+      return PrimitiveObjectInspectorFactory.
+        getPrimitiveJavaObjectInspector(primitiveType.getPrimitiveCategory());
+
+    case MAP :
+      MapTypeInfo mapType = (MapTypeInfo) type;
+      MapObjectInspector mapInspector = ObjectInspectorFactory.getStandardMapObjectInspector(
+          getObjectInspector(mapType.getMapKeyTypeInfo()), getObjectInspector(mapType.getMapValueTypeInfo()));
+      return mapInspector;
+
+    case LIST :
+      ListTypeInfo listType = (ListTypeInfo) type;
+      ListObjectInspector listInspector = ObjectInspectorFactory.getStandardListObjectInspector(
+          getObjectInspector(listType.getListElementTypeInfo()));
+      return listInspector;
+
+    case STRUCT :
+      StructTypeInfo structType = (StructTypeInfo) type;
+      List<TypeInfo> fieldTypes = structType.getAllStructFieldTypeInfos();
+
+      List<ObjectInspector> fieldInspectors = new ArrayList<ObjectInspector>();
+      for(TypeInfo fieldType : fieldTypes) {
+        fieldInspectors.add(getObjectInspector(fieldType));
+      }
+
+      StructObjectInspector structInspector = ObjectInspectorFactory.getStandardStructObjectInspector(
+          structType.getAllStructFieldNames(), fieldInspectors);
+      return structInspector;
+
+    default :
+      throw new IOException("Unknown field schema type");
+    }
+  }
+
+}

Added: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/ExitException.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/ExitException.java?rev=1091509&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/ExitException.java (added)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/ExitException.java Tue Apr 12 17:30:08 2011
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hcatalog;
+
+public  class ExitException extends SecurityException {
+  private static final long serialVersionUID = -1982617086752946683L;
+  private final int status;
+
+  /**
+   * @return the status
+   */
+  public int getStatus() {
+    return status;
+  }
+
+  public ExitException(int status) {
+
+    super("Raising exception, instead of System.exit(). Return code was: "+status);
+    this.status = status;
+  }
+}
\ No newline at end of file

Added: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/MiniCluster.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/MiniCluster.java?rev=1091509&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/MiniCluster.java (added)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/MiniCluster.java Tue Apr 12 17:30:08 2011
@@ -0,0 +1,193 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hcatalog;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.MiniMRCluster;
+
+/**
+ * This class builds a single instance of itself with the Singleton
+ * design pattern. While building the single instance, it sets up a
+ * mini cluster that actually consists of a mini DFS cluster and a
+ * mini MapReduce cluster on the local machine and also sets up the
+ * environment for Pig to run on top of the mini cluster.
+ */
+public class MiniCluster {
+  private MiniDFSCluster m_dfs = null;
+  private MiniMRCluster m_mr = null;
+  private FileSystem m_fileSys = null;
+  private JobConf m_conf = null;
+
+  private final static MiniCluster INSTANCE = new MiniCluster();
+  private static boolean isSetup = true;
+
+  private MiniCluster() {
+    setupMiniDfsAndMrClusters();
+  }
+
+  private void setupMiniDfsAndMrClusters() {
+    try {
+      final int dataNodes = 1;     // There will be 4 data nodes
+      final int taskTrackers = 1;  // There will be 4 task tracker nodes
+      Configuration config = new Configuration();
+
+      // Builds and starts the mini dfs and mapreduce clusters
+      System.setProperty("hadoop.log.dir", ".");
+      m_dfs = new MiniDFSCluster(config, dataNodes, true, null);
+
+      m_fileSys = m_dfs.getFileSystem();
+      m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1);
+
+      // Create the configuration hadoop-site.xml file
+      File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/");
+      conf_dir.mkdirs();
+      File conf_file = new File(conf_dir, "hadoop-site.xml");
+
+      // Write the necessary config info to hadoop-site.xml
+      m_conf = m_mr.createJobConf();
+      m_conf.setInt("mapred.submit.replication", 1);
+      m_conf.set("dfs.datanode.address", "0.0.0.0:0");
+      m_conf.set("dfs.datanode.http.address", "0.0.0.0:0");
+      m_conf.writeXml(new FileOutputStream(conf_file));
+
+      // Set the system properties needed by Pig
+      System.setProperty("cluster", m_conf.get("mapred.job.tracker"));
+      System.setProperty("namenode", m_conf.get("fs.default.name"));
+      System.setProperty("junit.hadoop.conf", conf_dir.getPath());
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * Returns the single instance of class MiniClusterBuilder that
+   * represents the resouces for a mini dfs cluster and a mini
+   * mapreduce cluster.
+   */
+  public static MiniCluster buildCluster() {
+    if(! isSetup){
+      INSTANCE.setupMiniDfsAndMrClusters();
+      isSetup = true;
+    }
+    return INSTANCE;
+  }
+
+  public void shutDown(){
+    INSTANCE.shutdownMiniDfsAndMrClusters();
+  }
+
+  @Override
+  protected void finalize() {
+    shutdownMiniDfsAndMrClusters();
+  }
+
+  private void shutdownMiniDfsAndMrClusters() {
+    isSetup = false;
+    try {
+      if (m_fileSys != null) { m_fileSys.close(); }
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+    if (m_dfs != null) { m_dfs.shutdown(); }
+    if (m_mr != null) { m_mr.shutdown(); }
+    m_fileSys = null;
+    m_dfs = null;
+    m_mr = null;
+  }
+
+  public Properties getProperties() {
+    errorIfNotSetup();
+    Properties properties = new Properties();
+    assert m_conf != null;
+    Iterator<Map.Entry<String, String>> iter = m_conf.iterator();
+    while (iter.hasNext()) {
+        Map.Entry<String, String> entry = iter.next();
+        properties.put(entry.getKey(), entry.getValue());
+    }
+    return properties;
+  }
+
+  public void setProperty(String name, String value) {
+    errorIfNotSetup();
+    m_conf.set(name, value);
+  }
+
+  public FileSystem getFileSystem() {
+    errorIfNotSetup();
+    return m_fileSys;
+  }
+
+  /**
+   * Throw RunTimeException if isSetup is false
+   */
+   private void errorIfNotSetup(){
+     if(isSetup) {
+       return;
+     }
+     String msg = "function called on MiniCluster that has been shutdown";
+     throw new RuntimeException(msg);
+   }
+
+   static public void createInputFile(MiniCluster miniCluster, String fileName,
+       String[] inputData)
+   throws IOException {
+     FileSystem fs = miniCluster.getFileSystem();
+     createInputFile(fs, fileName, inputData);
+   }
+
+   static public void createInputFile(FileSystem fs, String fileName,
+       String[] inputData) throws IOException {
+     Path path = new Path(fileName);
+     if(fs.exists(path)) {
+       throw new IOException("File " + fileName + " already exists on the minicluster");
+     }
+     FSDataOutputStream stream = fs.create(path);
+     PrintWriter pw = new PrintWriter(new OutputStreamWriter(stream, "UTF-8"));
+     for (int i=0; i<inputData.length; i++){
+       pw.println(inputData[i]);
+     }
+     pw.close();
+
+   }
+   /**
+    * Helper to remove a dfs file from the minicluster DFS
+    *
+    * @param miniCluster reference to the Minicluster where the file should be deleted
+    * @param fileName pathname of the file to be deleted
+    * @throws IOException
+    */
+   static public void deleteFile(MiniCluster miniCluster, String fileName)
+   throws IOException {
+     FileSystem fs = miniCluster.getFileSystem();
+     fs.delete(new Path(fileName), true);
+   }
+}

Added: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/NoExitSecurityManager.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/NoExitSecurityManager.java?rev=1091509&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/NoExitSecurityManager.java (added)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/NoExitSecurityManager.java Tue Apr 12 17:30:08 2011
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hcatalog;
+
+import java.security.Permission;
+
+public class NoExitSecurityManager extends SecurityManager {
+
+  @Override
+  public void checkPermission(Permission perm) {
+    // allow anything.
+  }
+
+  @Override
+  public void checkPermission(Permission perm, Object context) {
+    // allow anything.
+  }
+
+  @Override
+  public void checkExit(int status) {
+
+    super.checkExit(status);
+    throw new ExitException(status);
+  }
+}
\ No newline at end of file

Added: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/cli/TestPermsGrp.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/cli/TestPermsGrp.java?rev=1091509&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/cli/TestPermsGrp.java (added)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/cli/TestPermsGrp.java Tue Apr 12 17:30:08 2011
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hcatalog.cli;
+
+import java.io.FileNotFoundException;
+import java.util.ArrayList;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStore;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.serde.Constants;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hcatalog.ExitException;
+import org.apache.hcatalog.NoExitSecurityManager;
+import org.apache.hcatalog.cli.HCatCli;
+import org.apache.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
+import org.apache.hcatalog.common.HCatConstants;
+import org.apache.thrift.TException;
+
+public class TestPermsGrp extends TestCase {
+
+  private boolean isServerRunning = false;
+  private static final String msPort = "20101";
+  private HiveConf howlConf;
+  private Warehouse clientWH;
+  private Thread t;
+  private HiveMetaStoreClient msc;
+
+  private static class RunMS implements Runnable {
+
+    @Override
+    public void run() {
+      HiveMetaStore.main(new String[]{msPort});
+    }
+
+  }
+
+  @Override
+  protected void tearDown() throws Exception {
+    System.setSecurityManager(securityManager);
+  }
+
+  @Override
+  protected void setUp() throws Exception {
+
+    if(isServerRunning) {
+      return;
+    }
+
+    t = new Thread(new RunMS());
+    t.start();
+    Thread.sleep(40000);
+
+    isServerRunning = true;
+
+    securityManager = System.getSecurityManager();
+    System.setSecurityManager(new NoExitSecurityManager());
+
+    howlConf = new HiveConf(this.getClass());
+    howlConf.set("hive.metastore.local", "false");
+    howlConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + msPort);
+    howlConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTRETRIES, 3);
+
+    howlConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
+    howlConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+    howlConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+    howlConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+    clientWH = new Warehouse(howlConf);
+    msc = new HiveMetaStoreClient(howlConf,null);
+    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
+    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
+  }
+
+
+  public void testCustomPerms() throws Exception {
+
+    String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME;
+    String tblName = "simptbl";
+    String typeName = "Person";
+
+    try {
+
+      // Lets first test for default permissions, this is the case when user specified nothing.
+      Table tbl = getTable(dbName,tblName,typeName);
+      msc.createTable(tbl);
+      Path dfsPath = clientWH.getDefaultTablePath(dbName, tblName);
+      assertTrue(dfsPath.getFileSystem(howlConf).getFileStatus(dfsPath).getPermission().equals(FsPermission.getDefault()));
+      cleanupTbl(dbName, tblName, typeName);
+
+      // Next user did specify perms.
+      try{
+        HCatCli.main(new String[]{"-e","create table simptbl (name string) stored as RCFILE", "-p","rwx-wx---"});
+      }
+      catch(Exception e){
+        assertTrue(e instanceof ExitException);
+        assertEquals(((ExitException)e).getStatus(), 0);
+      }
+      dfsPath = clientWH.getDefaultTablePath(dbName, tblName);
+      assertTrue(dfsPath.getFileSystem(howlConf).getFileStatus(dfsPath).getPermission().equals(FsPermission.valueOf("drwx-wx---")));
+
+      cleanupTbl(dbName, tblName, typeName);
+
+      // User specified perms in invalid format.
+      howlConf.set(HCatConstants.HCAT_PERMS, "rwx");
+      // make sure create table fails.
+      try{
+        HCatCli.main(new String[]{"-e","create table simptbl (name string) stored as RCFILE", "-p","rwx"});
+        assert false;
+      }catch(Exception me){
+        assertTrue(me instanceof ExitException);
+      }
+      // No physical dir gets created.
+      dfsPath = clientWH.getDefaultTablePath(MetaStoreUtils.DEFAULT_DATABASE_NAME,tblName);
+      try{
+        dfsPath.getFileSystem(howlConf).getFileStatus(dfsPath);
+        assert false;
+      } catch(Exception fnfe){
+        assertTrue(fnfe instanceof FileNotFoundException);
+      }
+
+      // And no metadata gets created.
+      try{
+        msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName);
+        assert false;
+      }catch (Exception e){
+        assertTrue(e instanceof NoSuchObjectException);
+        assertEquals("default.simptbl table not found", e.getMessage());
+      }
+
+      // test for invalid group name
+      howlConf.set(HCatConstants.HCAT_PERMS, "drw-rw-rw-");
+      howlConf.set(HCatConstants.HCAT_GROUP, "THIS_CANNOT_BE_A_VALID_GRP_NAME_EVER");
+
+      try{
+        // create table must fail.
+        HCatCli.main(new String[]{"-e","create table simptbl (name string) stored as RCFILE", "-p","rw-rw-rw-","-g","THIS_CANNOT_BE_A_VALID_GRP_NAME_EVER"});
+        assert false;
+      }catch (Exception me){
+        assertTrue(me instanceof SecurityException);
+      }
+
+      try{
+        // no metadata should get created.
+        msc.getTable(dbName, tblName);
+        assert false;
+      }catch (Exception e){
+        assertTrue(e instanceof NoSuchObjectException);
+        assertEquals("default.simptbl table not found", e.getMessage());
+      }
+      try{
+        // neither dir should get created.
+        dfsPath.getFileSystem(howlConf).getFileStatus(dfsPath);
+        assert false;
+      } catch(Exception e){
+        assertTrue(e instanceof FileNotFoundException);
+      }
+
+    } catch (Exception e) {
+      System.err.println(StringUtils.stringifyException(e));
+      System.err.println("testCustomPerms failed.");
+      throw e;
+    }
+  }
+
+  private void silentDropDatabase(String dbName) throws MetaException, TException {
+    try {
+      for (String tableName : msc.getTables(dbName, "*")) {
+        msc.dropTable(dbName, tableName);
+      }
+
+    } catch (NoSuchObjectException e) {
+    }
+  }
+
+  private void cleanupTbl(String dbName, String tblName, String typeName) throws NoSuchObjectException, MetaException, TException, InvalidOperationException{
+
+    msc.dropTable(dbName, tblName);
+    msc.dropType(typeName);
+  }
+
+  private Table getTable(String dbName, String tblName, String typeName) throws NoSuchObjectException, MetaException, TException, AlreadyExistsException, InvalidObjectException{
+
+    msc.dropTable(dbName, tblName);
+    silentDropDatabase(dbName);
+
+
+    msc.dropType(typeName);
+    Type typ1 = new Type();
+    typ1.setName(typeName);
+    typ1.setFields(new ArrayList<FieldSchema>(1));
+    typ1.getFields().add(new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
+    msc.createType(typ1);
+
+    Table tbl = new Table();
+    tbl.setDbName(dbName);
+    tbl.setTableName(tblName);
+    StorageDescriptor sd = new StorageDescriptor();
+    tbl.setSd(sd);
+    sd.setCols(typ1.getFields());
+
+    sd.setSerdeInfo(new SerDeInfo());
+    return tbl;
+  }
+
+
+
+  private SecurityManager securityManager;
+
+}

Added: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/cli/TestSemanticAnalysis.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/cli/TestSemanticAnalysis.java?rev=1091509&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/cli/TestSemanticAnalysis.java (added)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/cli/TestSemanticAnalysis.java Tue Apr 12 17:30:08 2011
@@ -0,0 +1,414 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hcatalog.cli;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.hive.cli.CliSessionState;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
+import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
+import org.apache.hadoop.hive.ql.io.RCFileOutputFormat;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.mapred.TextInputFormat;
+import org.apache.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
+import org.apache.hcatalog.common.HCatConstants;
+import org.apache.hcatalog.rcfile.RCFileInputDriver;
+import org.apache.hcatalog.rcfile.RCFileOutputDriver;
+import org.apache.thrift.TException;
+
+public class TestSemanticAnalysis extends TestCase{
+
+  private Driver howlDriver;
+  private Driver hiveDriver;
+  private HiveMetaStoreClient msc;
+
+  @Override
+  protected void setUp() throws Exception {
+
+    HiveConf howlConf = new HiveConf(this.getClass());
+    howlConf.set(ConfVars.PREEXECHOOKS.varname, "");
+    howlConf.set(ConfVars.POSTEXECHOOKS.varname, "");
+    howlConf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+
+    HiveConf hiveConf = new HiveConf(howlConf,this.getClass());
+    hiveDriver = new Driver(hiveConf);
+
+    howlConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
+    howlDriver = new Driver(howlConf);
+
+    msc = new HiveMetaStoreClient(howlConf);
+    SessionState.start(new CliSessionState(howlConf));
+  }
+
+  String query;
+  private final String tblName = "junit_sem_analysis";
+
+  public void testAlterTblFFpart() throws MetaException, TException, NoSuchObjectException {
+
+    hiveDriver.run("drop table junit_sem_analysis");
+    hiveDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as TEXTFILE");
+    hiveDriver.run("alter table junit_sem_analysis add partition (b='2010-10-10')");
+    howlDriver.run("alter table junit_sem_analysis partition (b='2010-10-10') set fileformat RCFILE");
+
+    Table tbl = msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName);
+    assertEquals(TextInputFormat.class.getName(),tbl.getSd().getInputFormat());
+    assertEquals(HiveIgnoreKeyTextOutputFormat.class.getName(),tbl.getSd().getOutputFormat());
+    Map<String, String> tblParams = tbl.getParameters();
+    assertNull(tblParams.get(HCatConstants.HCAT_ISD_CLASS));
+    assertNull(tblParams.get(HCatConstants.HCAT_OSD_CLASS));
+
+    List<String> partVals = new ArrayList<String>(1);
+    partVals.add("2010-10-10");
+    Partition part = msc.getPartition(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, partVals);
+
+    assertEquals(RCFileInputFormat.class.getName(),part.getSd().getInputFormat());
+    assertEquals(RCFileOutputFormat.class.getName(),part.getSd().getOutputFormat());
+
+    Map<String,String> partParams = part.getParameters();
+    assertEquals(RCFileInputDriver.class.getName(), partParams.get(HCatConstants.HCAT_ISD_CLASS));
+    assertEquals(RCFileOutputDriver.class.getName(), partParams.get(HCatConstants.HCAT_OSD_CLASS));
+
+    howlDriver.run("drop table junit_sem_analysis");
+  }
+
+  public void testDatabaseOperations() throws MetaException {
+
+    List<String> dbs = msc.getAllDatabases();
+    String testDb1 = "testdatabaseoperatons1";
+    String testDb2 = "testdatabaseoperatons2";
+
+    if (dbs.contains(testDb1.toLowerCase())){
+      assertEquals(0,howlDriver.run("drop database "+testDb1).getResponseCode());
+    }
+
+    if (dbs.contains(testDb2.toLowerCase())){
+      assertEquals(0,howlDriver.run("drop database "+testDb2).getResponseCode());
+    }
+
+    assertEquals(0,howlDriver.run("create database "+testDb1).getResponseCode());
+    assertTrue(msc.getAllDatabases().contains(testDb1));
+    assertEquals(0,howlDriver.run("create database if not exists "+testDb1).getResponseCode());
+    assertTrue(msc.getAllDatabases().contains(testDb1));
+    assertEquals(0,howlDriver.run("create database if not exists "+testDb2).getResponseCode());
+    assertTrue(msc.getAllDatabases().contains(testDb2));
+
+    assertEquals(0,howlDriver.run("drop database "+testDb1).getResponseCode());
+    assertEquals(0,howlDriver.run("drop database "+testDb2).getResponseCode());
+    assertFalse(msc.getAllDatabases().contains(testDb1));
+    assertFalse(msc.getAllDatabases().contains(testDb2));
+  }
+
+  public void testCreateTableIfNotExists() throws MetaException, TException, NoSuchObjectException{
+
+    howlDriver.run("drop table "+tblName);
+    howlDriver.run("create table junit_sem_analysis (a int) stored as RCFILE");
+    Table tbl = msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName);
+    List<FieldSchema> cols = tbl.getSd().getCols();
+    assertEquals(1, cols.size());
+    assertTrue(cols.get(0).equals(new FieldSchema("a", "int", null)));
+    assertEquals(RCFileInputFormat.class.getName(),tbl.getSd().getInputFormat());
+    assertEquals(RCFileOutputFormat.class.getName(),tbl.getSd().getOutputFormat());
+    Map<String, String> tblParams = tbl.getParameters();
+    assertEquals(RCFileInputDriver.class.getName(), tblParams.get(HCatConstants.HCAT_ISD_CLASS));
+    assertEquals(RCFileOutputDriver.class.getName(), tblParams.get(HCatConstants.HCAT_OSD_CLASS));
+
+    CommandProcessorResponse resp = howlDriver.run("create table if not exists junit_sem_analysis (a int) stored as RCFILE");
+    assertEquals(0, resp.getResponseCode());
+    assertNull(resp.getErrorMessage());
+    tbl = msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName);
+    cols = tbl.getSd().getCols();
+    assertEquals(1, cols.size());
+    assertTrue(cols.get(0).equals(new FieldSchema("a", "int",null)));
+    assertEquals(RCFileInputFormat.class.getName(),tbl.getSd().getInputFormat());
+    assertEquals(RCFileOutputFormat.class.getName(),tbl.getSd().getOutputFormat());
+
+    tblParams = tbl.getParameters();
+    assertEquals(RCFileInputDriver.class.getName(), tblParams.get(HCatConstants.HCAT_ISD_CLASS));
+    assertEquals(RCFileOutputDriver.class.getName(), tblParams.get(HCatConstants.HCAT_OSD_CLASS));
+    howlDriver.run("drop table junit_sem_analysis");
+  }
+
+  public void testAlterTblTouch(){
+
+    howlDriver.run("drop table junit_sem_analysis");
+    howlDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
+    CommandProcessorResponse response = howlDriver.run("alter table junit_sem_analysis touch");
+    assertEquals(10, response.getResponseCode());
+    assertTrue(response.getErrorMessage().contains("Operation not supported."));
+
+    howlDriver.run("alter table junit_sem_analysis touch partition (b='12')");
+    assertEquals(10, response.getResponseCode());
+    assertTrue(response.getErrorMessage().contains("Operation not supported."));
+
+    howlDriver.run("drop table junit_sem_analysis");
+  }
+
+  public void testChangeColumns(){
+    howlDriver.run("drop table junit_sem_analysis");
+    howlDriver.run("create table junit_sem_analysis (a int, c string) partitioned by (b string) stored as RCFILE");
+    CommandProcessorResponse response = howlDriver.run("alter table junit_sem_analysis change a a1 int");
+    assertEquals(10, response.getResponseCode());
+    assertTrue(response.getErrorMessage().contains("Operation not supported."));
+
+    response = howlDriver.run("alter table junit_sem_analysis change a a string");
+    assertEquals(10, response.getResponseCode());
+    assertTrue(response.getErrorMessage().contains("Operation not supported."));
+
+    response = howlDriver.run("alter table junit_sem_analysis change a a int after c");
+    assertEquals(10, response.getResponseCode());
+    assertTrue(response.getErrorMessage().contains("Operation not supported."));
+    howlDriver.run("drop table junit_sem_analysis");
+  }
+
+  public void testAddReplaceCols() throws IOException, MetaException, TException, NoSuchObjectException{
+
+    howlDriver.run("drop table junit_sem_analysis");
+    howlDriver.run("create table junit_sem_analysis (a int, c string) partitioned by (b string) stored as RCFILE");
+    CommandProcessorResponse response = howlDriver.run("alter table junit_sem_analysis replace columns (a1 tinyint)");
+    assertEquals(10, response.getResponseCode());
+    assertTrue(response.getErrorMessage().contains("Operation not supported."));
+
+    response = howlDriver.run("alter table junit_sem_analysis add columns (d tinyint)");
+    assertEquals(0, response.getResponseCode());
+    assertNull(response.getErrorMessage());
+    Table tbl = msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName);
+    List<FieldSchema> cols = tbl.getSd().getCols();
+    assertEquals(3, cols.size());
+    assertTrue(cols.get(0).equals(new FieldSchema("a", "int", "from deserializer")));
+    assertTrue(cols.get(1).equals(new FieldSchema("c", "string", "from deserializer")));
+    assertTrue(cols.get(2).equals(new FieldSchema("d", "tinyint", null)));
+    howlDriver.run("drop table junit_sem_analysis");
+  }
+
+  public void testAlterTblClusteredBy(){
+
+    howlDriver.run("drop table junit_sem_analysis");
+    howlDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
+    CommandProcessorResponse response = howlDriver.run("alter table junit_sem_analysis clustered by (a) into 7 buckets");
+    assertEquals(10, response.getResponseCode());
+    assertTrue(response.getErrorMessage().contains("Operation not supported."));
+    howlDriver.run("drop table junit_sem_analysis");
+  }
+
+  public void testAlterTableSetFF() throws IOException, MetaException, TException, NoSuchObjectException{
+
+    howlDriver.run("drop table junit_sem_analysis");
+    howlDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
+
+    Table tbl = msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName);
+    assertEquals(RCFileInputFormat.class.getName(),tbl.getSd().getInputFormat());
+    assertEquals(RCFileOutputFormat.class.getName(),tbl.getSd().getOutputFormat());
+
+    Map<String,String> tblParams = tbl.getParameters();
+    assertEquals(RCFileInputDriver.class.getName(), tblParams.get(HCatConstants.HCAT_ISD_CLASS));
+    assertEquals(RCFileOutputDriver.class.getName(), tblParams.get(HCatConstants.HCAT_OSD_CLASS));
+
+    howlDriver.run("alter table junit_sem_analysis set fileformat INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " +
+        "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver'");
+    howlDriver.run("desc extended junit_sem_analysis");
+
+    tbl = msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName);
+    assertEquals(RCFileInputFormat.class.getName(),tbl.getSd().getInputFormat());
+    assertEquals(RCFileOutputFormat.class.getName(),tbl.getSd().getOutputFormat());
+    tblParams = tbl.getParameters();
+    assertEquals("mydriver", tblParams.get(HCatConstants.HCAT_ISD_CLASS));
+    assertEquals("yourdriver", tblParams.get(HCatConstants.HCAT_OSD_CLASS));
+
+    howlDriver.run("drop table junit_sem_analysis");
+  }
+
+  public void testAddPartFail(){
+
+    hiveDriver.run("drop table junit_sem_analysis");
+    hiveDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
+    CommandProcessorResponse response = howlDriver.run("alter table junit_sem_analysis add partition (b='2') location '/some/path'");
+    assertEquals(10, response.getResponseCode());
+    assertTrue(response.getErrorMessage().contains("FAILED: Error in semantic analysis: Operation not supported. Partitions can be added only in a table created through HCatalog. " +
+    		"It seems table junit_sem_analysis was not created through HCatalog."));
+    hiveDriver.run("drop table junit_sem_analysis");
+  }
+
+  public void testAddPartPass() throws IOException{
+
+    howlDriver.run("drop table junit_sem_analysis");
+    howlDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
+    CommandProcessorResponse response = howlDriver.run("alter table junit_sem_analysis add partition (b='2') location '/tmp'");
+    assertEquals(0, response.getResponseCode());
+    assertNull(response.getErrorMessage());
+    howlDriver.run("drop table junit_sem_analysis");
+  }
+
+  public void testCTAS(){
+    howlDriver.run("drop table junit_sem_analysis");
+    query = "create table junit_sem_analysis (a int) as select * from tbl2";
+    CommandProcessorResponse response = howlDriver.run(query);
+    assertEquals(10, response.getResponseCode());
+    assertTrue(response.getErrorMessage().contains("FAILED: Error in semantic analysis: Operation not supported. Create table as Select is not a valid operation."));
+    howlDriver.run("drop table junit_sem_analysis");
+  }
+
+  public void testStoredAs(){
+    howlDriver.run("drop table junit_sem_analysis");
+    query = "create table junit_sem_analysis (a int)";
+    CommandProcessorResponse response = howlDriver.run(query);
+    assertEquals(10, response.getResponseCode());
+    assertTrue(response.getErrorMessage().contains("FAILED: Error in semantic analysis: STORED AS specification is either incomplete or incorrect."));
+    howlDriver.run("drop table junit_sem_analysis");
+  }
+
+  public void testAddDriverInfo() throws IOException, MetaException, TException, NoSuchObjectException{
+
+    howlDriver.run("drop table junit_sem_analysis");
+    query =  "create table junit_sem_analysis (a int) partitioned by (b string)  stored as " +
+    		"INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " +
+    		"'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver' ";
+    assertEquals(0,howlDriver.run(query).getResponseCode());
+
+    Table tbl = msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName);
+    assertEquals(RCFileInputFormat.class.getName(),tbl.getSd().getInputFormat());
+    assertEquals(RCFileOutputFormat.class.getName(),tbl.getSd().getOutputFormat());
+    Map<String, String> tblParams = tbl.getParameters();
+    assertEquals("mydriver", tblParams.get(HCatConstants.HCAT_ISD_CLASS));
+    assertEquals("yourdriver", tblParams.get(HCatConstants.HCAT_OSD_CLASS));
+
+    howlDriver.run("drop table junit_sem_analysis");
+  }
+
+  public void testInvalidateNonStringPartition() throws IOException{
+
+    howlDriver.run("drop table junit_sem_analysis");
+    query =  "create table junit_sem_analysis (a int) partitioned by (b int)  stored as RCFILE";
+
+    CommandProcessorResponse response = howlDriver.run(query);
+    assertEquals(10,response.getResponseCode());
+    assertEquals("FAILED: Error in semantic analysis: Operation not supported. HCatalog only supports partition columns of type string. For column: b Found type: int",
+        response.getErrorMessage());
+
+  }
+
+  public void testInvalidateSeqFileStoredAs() throws IOException{
+
+    howlDriver.run("drop table junit_sem_analysis");
+    query =  "create table junit_sem_analysis (a int) partitioned by (b string)  stored as SEQUENCEFILE";
+
+    CommandProcessorResponse response = howlDriver.run(query);
+    assertEquals(10,response.getResponseCode());
+    assertEquals("FAILED: Error in semantic analysis: Operation not supported. HCatalog doesn't support Sequence File by default yet. You may specify it through INPUT/OUTPUT storage drivers.",
+        response.getErrorMessage());
+
+  }
+
+  public void testInvalidateTextFileStoredAs() throws IOException{
+
+    howlDriver.run("drop table junit_sem_analysis");
+    query =  "create table junit_sem_analysis (a int) partitioned by (b string)  stored as TEXTFILE";
+
+    CommandProcessorResponse response = howlDriver.run(query);
+    assertEquals(10,response.getResponseCode());
+    assertEquals("FAILED: Error in semantic analysis: Operation not supported. HCatalog doesn't support Text File by default yet. You may specify it through INPUT/OUTPUT storage drivers.",
+        response.getErrorMessage());
+
+  }
+
+  public void testInvalidateClusteredBy() throws IOException{
+
+    howlDriver.run("drop table junit_sem_analysis");
+    query =  "create table junit_sem_analysis (a int) partitioned by (b string) clustered by (a) into 10 buckets stored as TEXTFILE";
+
+    CommandProcessorResponse response = howlDriver.run(query);
+    assertEquals(10,response.getResponseCode());
+    assertEquals("FAILED: Error in semantic analysis: Operation not supported. HCatalog doesn't allow Clustered By in create table.",
+        response.getErrorMessage());
+  }
+
+  public void testCTLFail() throws IOException{
+
+    hiveDriver.run("drop table junit_sem_analysis");
+    query =  "create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE";
+
+    hiveDriver.run(query);
+    query = "create table like_table like junit_sem_analysis";
+    CommandProcessorResponse response = howlDriver.run(query);
+    assertEquals(10,response.getResponseCode());
+    assertEquals("FAILED: Error in semantic analysis: Operation not supported. CREATE TABLE LIKE is not supported.", response.getErrorMessage());
+  }
+
+  public void testCTLPass() throws IOException, MetaException, TException, NoSuchObjectException{
+
+    try{
+      howlDriver.run("drop table junit_sem_analysis");
+    }
+    catch( Exception e){
+      System.err.println(e.getMessage());
+    }
+    query =  "create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE";
+
+    howlDriver.run(query);
+    String likeTbl = "like_table";
+    howlDriver.run("drop table "+likeTbl);
+    query = "create table like_table like junit_sem_analysis";
+    CommandProcessorResponse resp = howlDriver.run(query);
+    assertEquals(10, resp.getResponseCode());
+    assertEquals("FAILED: Error in semantic analysis: Operation not supported. CREATE TABLE LIKE is not supported.", resp.getErrorMessage());
+//    Table tbl = msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, likeTbl);
+//    assertEquals(likeTbl,tbl.getTableName());
+//    List<FieldSchema> cols = tbl.getSd().getCols();
+//    assertEquals(1, cols.size());
+//    assertEquals(new FieldSchema("a", "int", null), cols.get(0));
+//    assertEquals("org.apache.hadoop.hive.ql.io.RCFileInputFormat",tbl.getSd().getInputFormat());
+//    assertEquals("org.apache.hadoop.hive.ql.io.RCFileOutputFormat",tbl.getSd().getOutputFormat());
+//    Map<String, String> tblParams = tbl.getParameters();
+//    assertEquals("org.apache.hadoop.hive.howl.rcfile.RCFileInputStorageDriver", tblParams.get("howl.isd"));
+//    assertEquals("org.apache.hadoop.hive.howl.rcfile.RCFileOutputStorageDriver", tblParams.get("howl.osd"));
+//
+//    howlDriver.run("drop table junit_sem_analysis");
+//    howlDriver.run("drop table "+likeTbl);
+  }
+
+// This test case currently fails, since add partitions don't inherit anything from tables.
+
+//  public void testAddPartInheritDrivers() throws MetaException, TException, NoSuchObjectException{
+//
+//    howlDriver.run("drop table "+tblName);
+//    howlDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE");
+//    howlDriver.run("alter table "+tblName+" add partition (b='2010-10-10')");
+//
+//    List<String> partVals = new ArrayList<String>(1);
+//    partVals.add("2010-10-10");
+//
+//    Map<String,String> map = msc.getPartition(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName, partVals).getParameters();
+//    assertEquals(map.get(InitializeInput.HOWL_ISD_CLASS), RCFileInputStorageDriver.class.getName());
+//    assertEquals(map.get(InitializeInput.HOWL_OSD_CLASS), RCFileOutputStorageDriver.class.getName());
+//  }
+}

Added: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/cli/TestUseDatabase.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/cli/TestUseDatabase.java?rev=1091509&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/cli/TestUseDatabase.java (added)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/cli/TestUseDatabase.java Tue Apr 12 17:30:08 2011
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hcatalog.cli;
+
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.hive.cli.CliSessionState;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
+
+/* Unit test for GitHub Howl issue #3 */
+public class TestUseDatabase extends TestCase{
+
+  private Driver howlDriver;
+
+  @Override
+  protected void setUp() throws Exception {
+
+    HiveConf howlConf = new HiveConf(this.getClass());
+    howlConf.set(ConfVars.PREEXECHOOKS.varname, "");
+    howlConf.set(ConfVars.POSTEXECHOOKS.varname, "");
+    howlConf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+
+    howlConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
+    howlDriver = new Driver(howlConf);
+    SessionState.start(new CliSessionState(howlConf));
+  }
+
+  String query;
+  private final String dbName = "testUseDatabase_db";
+  private final String tblName = "testUseDatabase_tbl";
+
+  public void testAlterTablePass() throws IOException{
+
+    howlDriver.run("create database " + dbName);
+    howlDriver.run("use " + dbName);
+    howlDriver.run("create table " + tblName + " (a int) partitioned by (b string) stored as RCFILE");
+
+    CommandProcessorResponse response;
+
+    response = howlDriver.run("alter table " + tblName + " add partition (b='2') location '/tmp'");
+    assertEquals(0, response.getResponseCode());
+    assertNull(response.getErrorMessage());
+
+    response = howlDriver.run("alter table " + tblName + " set fileformat INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " +
+        "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver'");
+    assertEquals(0, response.getResponseCode());
+    assertNull(response.getErrorMessage());
+
+    howlDriver.run("drop table " + tblName);
+    howlDriver.run("drop database " + dbName);
+  }
+
+}

Added: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/common/TestHCatUtil.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/common/TestHCatUtil.java?rev=1091509&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/common/TestHCatUtil.java (added)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/common/TestHCatUtil.java Tue Apr 12 17:30:08 2011
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hcatalog.common;
+
+import java.util.HashMap;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hcatalog.common.HCatUtil;
+
+public class TestHCatUtil extends TestCase{
+
+
+  public void testFsPermissionOperation(){
+
+    HashMap<String,Integer> permsCode = new HashMap<String,Integer>();
+
+    for (int i = 0; i < 8; i++){
+      for (int j = 0; j < 8; j++){
+        for (int k = 0; k < 8; k++){
+          StringBuilder sb = new StringBuilder();
+          sb.append("0");
+          sb.append(i);
+          sb.append(j);
+          sb.append(k);
+          Integer code = (((i*8)+j)*8)+k;
+          String perms = (new FsPermission(Short.decode(sb.toString()))).toString();
+          if (permsCode.containsKey(perms)){
+            assertEquals("permissions(" + perms + ") mapped to multiple codes",code,permsCode.get(perms));
+          }
+          permsCode.put(perms, code);
+          assertFsPermissionTransformationIsGood(perms);
+        }
+      }
+    }
+  }
+
+  private void assertFsPermissionTransformationIsGood(String perms) {
+    assertEquals(perms,FsPermission.valueOf("-"+perms).toString());
+  }
+
+  public void testValidateMorePermissive(){
+    assertConsistentFsPermissionBehaviour(FsAction.ALL,true,true,true,true,true,true,true,true);
+    assertConsistentFsPermissionBehaviour(FsAction.READ,false,true,false,true,false,false,false,false);
+    assertConsistentFsPermissionBehaviour(FsAction.WRITE,false,true,false,false,true,false,false,false);
+    assertConsistentFsPermissionBehaviour(FsAction.EXECUTE,false,true,true,false,false,false,false,false);
+    assertConsistentFsPermissionBehaviour(FsAction.READ_EXECUTE,false,true,true,true,false,true,false,false);
+    assertConsistentFsPermissionBehaviour(FsAction.READ_WRITE,false,true,false,true,true,false,true,false);
+    assertConsistentFsPermissionBehaviour(FsAction.WRITE_EXECUTE,false,true,true,false,true,false,false,true);
+    assertConsistentFsPermissionBehaviour(FsAction.NONE,false,true,false,false,false,false,false,false);
+  }
+
+
+  private void assertConsistentFsPermissionBehaviour(
+      FsAction base, boolean versusAll, boolean versusNone,
+      boolean versusX, boolean versusR, boolean versusW,
+      boolean versusRX, boolean versusRW,  boolean versusWX){
+
+    assertTrue(versusAll == HCatUtil.validateMorePermissive(base, FsAction.ALL));
+    assertTrue(versusX == HCatUtil.validateMorePermissive(base, FsAction.EXECUTE));
+    assertTrue(versusNone == HCatUtil.validateMorePermissive(base, FsAction.NONE));
+    assertTrue(versusR == HCatUtil.validateMorePermissive(base, FsAction.READ));
+    assertTrue(versusRX == HCatUtil.validateMorePermissive(base, FsAction.READ_EXECUTE));
+    assertTrue(versusRW == HCatUtil.validateMorePermissive(base, FsAction.READ_WRITE));
+    assertTrue(versusW == HCatUtil.validateMorePermissive(base, FsAction.WRITE));
+    assertTrue(versusWX == HCatUtil.validateMorePermissive(base, FsAction.WRITE_EXECUTE));
+  }
+
+  public void testExecutePermissionsCheck(){
+    assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.ALL));
+    assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.NONE));
+    assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.EXECUTE));
+    assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.READ_EXECUTE));
+    assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.WRITE_EXECUTE));
+
+    assertFalse(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.READ));
+    assertFalse(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.WRITE));
+    assertFalse(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.READ_WRITE));
+
+  }
+
+}

Added: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/data/TestDefaultHCatRecord.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/data/TestDefaultHCatRecord.java?rev=1091509&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/data/TestDefaultHCatRecord.java (added)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/data/TestDefaultHCatRecord.java Tue Apr 12 17:30:08 2011
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hcatalog.data;
+
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.DataOutput;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hcatalog.data.DefaultHCatRecord;
+import org.apache.hcatalog.data.HCatRecord;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+
+public class TestDefaultHCatRecord extends TestCase{
+
+  public void testRYW() throws IOException{
+
+    File f = new File("binary.dat");
+    f.delete();
+    f.createNewFile();
+    f.deleteOnExit();
+
+    OutputStream fileOutStream = new FileOutputStream(f);
+    DataOutput outStream = new DataOutputStream(fileOutStream);
+
+    HCatRecord[]  recs = getHCatRecords();
+    for(int i =0; i < recs.length; i++){
+      recs[i].write(outStream);
+    }
+    fileOutStream.flush();
+    fileOutStream.close();
+
+    InputStream fInStream = new FileInputStream(f);
+    DataInput inpStream = new DataInputStream(fInStream);
+
+    for(int i =0; i < recs.length; i++){
+      HCatRecord rec = new DefaultHCatRecord();
+      rec.readFields(inpStream);
+      Assert.assertEquals(recs[i],rec);
+    }
+
+    Assert.assertEquals(fInStream.available(), 0);
+    fInStream.close();
+
+  }
+
+  public void testCompareTo() {
+    HCatRecord[] recs = getHCatRecords();
+    Assert.assertEquals(recs[0].compareTo(recs[1]),0);
+  }
+
+  public void testEqualsObject() {
+
+    HCatRecord[] recs = getHCatRecords();
+    Assert.assertTrue(recs[0].equals(recs[1]));
+  }
+
+  private HCatRecord[] getHCatRecords(){
+
+    List<Object> rec_1 = new ArrayList<Object>(8);
+    rec_1.add(new Byte("123"));
+    rec_1.add(new Short("456"));
+    rec_1.add( new Integer(789));
+    rec_1.add( new Long(1000L));
+    rec_1.add( new Double(5.3D));
+    rec_1.add( new String("howl and hadoop"));
+    rec_1.add( null);
+    rec_1.add( "null");
+
+    HCatRecord tup_1 = new DefaultHCatRecord(rec_1);
+
+    List<Object> rec_2 = new ArrayList<Object>(8);
+    rec_2.add( new Byte("123"));
+    rec_2.add( new Short("456"));
+    rec_2.add( new Integer(789));
+    rec_2.add( new Long(1000L));
+    rec_2.add( new Double(5.3D));
+    rec_2.add( new String("howl and hadoop"));
+    rec_2.add( null);
+    rec_2.add( "null");
+    HCatRecord tup_2 = new DefaultHCatRecord(rec_2);
+
+    List<Object> rec_3 = new ArrayList<Object>(10);
+    rec_3.add(new Byte("123"));
+    rec_3.add(new Short("456"));
+    rec_3.add( new Integer(789));
+    rec_3.add( new Long(1000L));
+    rec_3.add( new Double(5.3D));
+    rec_3.add( new String("howl and hadoop"));
+    rec_3.add( null);
+    List<Integer> innerList = new ArrayList<Integer>();
+    innerList.add(314);
+    innerList.add(007);
+    rec_3.add( innerList);
+    Map<Short, String> map = new HashMap<Short, String>(3);
+    map.put(new Short("2"), "howl is cool");
+    map.put(new Short("3"), "is it?");
+    map.put(new Short("4"), "or is it not?");
+    rec_3.add(map);
+
+    HCatRecord tup_3 = new DefaultHCatRecord(rec_3);
+
+    List<Object> rec_4 = new ArrayList<Object>(8);
+    rec_4.add( new Byte("123"));
+    rec_4.add( new Short("456"));
+    rec_4.add( new Integer(789));
+    rec_4.add( new Long(1000L));
+    rec_4.add( new Double(5.3D));
+    rec_4.add( new String("howl and hadoop"));
+    rec_4.add( null);
+    rec_4.add( "null");
+
+    Map<Short, String> map2 = new HashMap<Short, String>(3);
+    map2.put(new Short("2"), "howl is cool");
+    map2.put(new Short("3"), "is it?");
+    map2.put(new Short("4"), "or is it not?");
+    rec_4.add(map2);
+    List<Integer> innerList2 = new ArrayList<Integer>();
+    innerList2.add(314);
+    innerList2.add(007);
+    rec_4.add( innerList2);
+    HCatRecord tup_4 = new DefaultHCatRecord(rec_4);
+
+    return  new HCatRecord[]{tup_1,tup_2,tup_3,tup_4};
+
+  }
+}



Mime
View raw message