incubator-hcatalog-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From tra...@apache.org
Subject svn commit: r1383152 [15/27] - in /incubator/hcatalog/trunk: ./ hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/ hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/drivers/ hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/ ...
Date Mon, 10 Sep 2012 23:29:03 GMT
Modified: incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/StoreNumbers.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/StoreNumbers.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/StoreNumbers.java (original)
+++ incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/StoreNumbers.java Mon Sep 10 23:28:55 2012
@@ -52,12 +52,12 @@ import org.apache.hcatalog.mapreduce.Out
  * 'numbers_nopart_pig_empty_initially' (unpartitioned) table with the tinyint
  * and smallint columns in "numbers" being stored as "int" (since pig cannot handle
  * tinyint and smallint)
- * 
+ *
  * Usage: hadoop jar storenumbers <serveruri> <part|nopart|nopart_pig> <-libjars hive-hcat jar>
-        If the second argument is "part" data is written to datestamp = '2010101' partition of the numbers_part_empty_initially table.
-        If the second argument is "nopart", data is written to the unpartitioned numbers_nopart_empty_initially table.
-        If the second argument is "nopart_pig", data is written to the unpartitioned numbers_nopart_pig_empty_initially table.
-        The hcat jar location should be specified as file://<full path to jar>
+ If the second argument is "part" data is written to datestamp = '2010101' partition of the numbers_part_empty_initially table.
+ If the second argument is "nopart", data is written to the unpartitioned numbers_nopart_empty_initially table.
+ If the second argument is "nopart_pig", data is written to the unpartitioned numbers_nopart_pig_empty_initially table.
+ The hcat jar location should be specified as file://<full path to jar>
  */
 public class StoreNumbers {
 
@@ -66,167 +66,167 @@ public class StoreNumbers {
     private static final String NUMBERS_NON_PARTITIONED_TABLE_NAME = "numbers_nopart_empty_initially";
     private static final String NUMBERS_NON_PARTITIONED_PIG_TABLE_NAME = "numbers_nopart_pig_empty_initially";
     private static final String IS_PIG_NON_PART_TABLE = "is.pig.non.part.table";
-    
-  public static class SumMapper 
-       extends Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>{
-      
-      Integer intnum1000;
-      // though id is given as a Short by hcat, the map will emit it as an
-      // IntWritable so we can just sum in the reduce
-      Short id;
-      
-      // though intnum5 is handed as a Byte by hcat, the map() will emit it as
-      // an IntWritable so we can just sum in the reduce
-      Byte intnum5;
-      Integer intnum100;
-      Integer intnum;
-      Long longnum;
-      Float floatnum;
-      Double doublenum;
-    @Override
-  protected void map(WritableComparable key, HCatRecord value, 
-          org.apache.hadoop.mapreduce.Mapper<WritableComparable,HCatRecord,
-          WritableComparable,HCatRecord>.Context context) 
-    throws IOException ,InterruptedException {
-        boolean isnoPartPig = context.getConfiguration().getBoolean(IS_PIG_NON_PART_TABLE, false);
-        intnum1000 = ((Integer)value.get(0));
-        id = ((Short) value.get(1));
-        intnum5 = (((Byte)value.get(2)));
-        intnum100 = (((Integer) value.get(3)));
-        intnum = ((Integer) value.get(4));
-        longnum = ((Long) value.get(5));
-        floatnum = ((Float) value.get(6));
-        doublenum = ((Double) value.get(7));
-        HCatRecord output = new DefaultHCatRecord(8);
-        output.set(0, intnum1000 + 10);
-        if(isnoPartPig)
-        {
-            output.set(1, ((int)(id + 10)));
-        } else {
-            output.set(1, ((short)(id + 10)));
-        }
-        if(isnoPartPig) {
-            output.set(2,  (int)(intnum5 + 10));
-        } else {
-            output.set(2, (byte) (intnum5 + 10));
-        }
-        
-        output.set(3, intnum100 + 10);
-        output.set(4, intnum + 10);
-        output.set(5, (long) (longnum + 10));
-        output.set(6, (float) (floatnum + 10));
-        output.set(7, (double) (doublenum + 10));
-        for(int i = 0; i < 8; i++) {
-            System.err.println("XXX: class:" + output.get(i).getClass());
-        }
-        context.write(new IntWritable(0), output);
 
-    }
-  }
-  
-  
-   public static void main(String[] args) throws Exception {
-    Configuration conf = new Configuration();
-    args = new GenericOptionsParser(conf, args).getRemainingArgs();
-    String[] otherArgs = new String[2];
-    int j = 0;
-    for(int i = 0; i < args.length; i++) {
-        if(args[i].equals("-libjars")) {
-            // generic options parser doesn't seem to work!
-            conf.set("tmpjars", args[i+1]);
-            i = i+1; // skip it , the for loop will skip its value                
-        } else {
-            otherArgs[j++] = args[i];
+    public static class SumMapper
+        extends Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
+
+        Integer intnum1000;
+        // though id is given as a Short by hcat, the map will emit it as an
+        // IntWritable so we can just sum in the reduce
+        Short id;
+
+        // though intnum5 is handed as a Byte by hcat, the map() will emit it as
+        // an IntWritable so we can just sum in the reduce
+        Byte intnum5;
+        Integer intnum100;
+        Integer intnum;
+        Long longnum;
+        Float floatnum;
+        Double doublenum;
+
+        @Override
+        protected void map(WritableComparable key, HCatRecord value,
+                           org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord,
+                               WritableComparable, HCatRecord>.Context context)
+            throws IOException, InterruptedException {
+            boolean isnoPartPig = context.getConfiguration().getBoolean(IS_PIG_NON_PART_TABLE, false);
+            intnum1000 = ((Integer) value.get(0));
+            id = ((Short) value.get(1));
+            intnum5 = (((Byte) value.get(2)));
+            intnum100 = (((Integer) value.get(3)));
+            intnum = ((Integer) value.get(4));
+            longnum = ((Long) value.get(5));
+            floatnum = ((Float) value.get(6));
+            doublenum = ((Double) value.get(7));
+            HCatRecord output = new DefaultHCatRecord(8);
+            output.set(0, intnum1000 + 10);
+            if (isnoPartPig) {
+                output.set(1, ((int) (id + 10)));
+            } else {
+                output.set(1, ((short) (id + 10)));
+            }
+            if (isnoPartPig) {
+                output.set(2, (int) (intnum5 + 10));
+            } else {
+                output.set(2, (byte) (intnum5 + 10));
+            }
+
+            output.set(3, intnum100 + 10);
+            output.set(4, intnum + 10);
+            output.set(5, (long) (longnum + 10));
+            output.set(6, (float) (floatnum + 10));
+            output.set(7, (double) (doublenum + 10));
+            for (int i = 0; i < 8; i++) {
+                System.err.println("XXX: class:" + output.get(i).getClass());
+            }
+            context.write(new IntWritable(0), output);
+
         }
     }
-    if (otherArgs.length != 2) {
-      usage();
-    }
-    String serverUri = otherArgs[0];
-    if(otherArgs[1] == null || (
+
+
+    public static void main(String[] args) throws Exception {
+        Configuration conf = new Configuration();
+        args = new GenericOptionsParser(conf, args).getRemainingArgs();
+        String[] otherArgs = new String[2];
+        int j = 0;
+        for (int i = 0; i < args.length; i++) {
+            if (args[i].equals("-libjars")) {
+                // generic options parser doesn't seem to work!
+                conf.set("tmpjars", args[i + 1]);
+                i = i + 1; // skip it , the for loop will skip its value
+            } else {
+                otherArgs[j++] = args[i];
+            }
+        }
+        if (otherArgs.length != 2) {
+            usage();
+        }
+        String serverUri = otherArgs[0];
+        if (otherArgs[1] == null || (
             !otherArgs[1].equalsIgnoreCase("part") && !otherArgs[1].equalsIgnoreCase("nopart"))
             && !otherArgs[1].equalsIgnoreCase("nopart_pig")) {
-        usage();
-    }
-    boolean writeToPartitionedTable = (otherArgs[1].equalsIgnoreCase("part"));
-    boolean writeToNonPartPigTable = (otherArgs[1].equalsIgnoreCase("nopart_pig"));
-    String tableName = NUMBERS_TABLE_NAME;
-    String dbName = "default";
-    Map<String, String> outputPartitionKvps = new HashMap<String, String>();
-    String outputTableName = null;
-    conf.set(IS_PIG_NON_PART_TABLE, "false");
-    if(writeToPartitionedTable) {
-        outputTableName = NUMBERS_PARTITIONED_TABLE_NAME;
-        outputPartitionKvps.put("datestamp", "20100101");
-    } else {
-        if(writeToNonPartPigTable) {
-            conf.set(IS_PIG_NON_PART_TABLE, "true");
-            outputTableName = NUMBERS_NON_PARTITIONED_PIG_TABLE_NAME;
-        } else {
-            outputTableName = NUMBERS_NON_PARTITIONED_TABLE_NAME;
+            usage();
         }
-        // test with null or empty randomly
-        if(new Random().nextInt(2) == 0) {
-            outputPartitionKvps = null;
+        boolean writeToPartitionedTable = (otherArgs[1].equalsIgnoreCase("part"));
+        boolean writeToNonPartPigTable = (otherArgs[1].equalsIgnoreCase("nopart_pig"));
+        String tableName = NUMBERS_TABLE_NAME;
+        String dbName = "default";
+        Map<String, String> outputPartitionKvps = new HashMap<String, String>();
+        String outputTableName = null;
+        conf.set(IS_PIG_NON_PART_TABLE, "false");
+        if (writeToPartitionedTable) {
+            outputTableName = NUMBERS_PARTITIONED_TABLE_NAME;
+            outputPartitionKvps.put("datestamp", "20100101");
+        } else {
+            if (writeToNonPartPigTable) {
+                conf.set(IS_PIG_NON_PART_TABLE, "true");
+                outputTableName = NUMBERS_NON_PARTITIONED_PIG_TABLE_NAME;
+            } else {
+                outputTableName = NUMBERS_NON_PARTITIONED_TABLE_NAME;
+            }
+            // test with null or empty randomly
+            if (new Random().nextInt(2) == 0) {
+                outputPartitionKvps = null;
+            }
         }
-    }
-    
-    String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
-    if(principalID != null)
-    conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
-    Job job = new Job(conf, "storenumbers");
-    
-    // initialize HCatInputFormat
-    HCatInputFormat.setInput(job, InputJobInfo.create(
-    		dbName, tableName, null));
-    // initialize HCatOutputFormat
-    HCatOutputFormat.setOutput(job, OutputJobInfo.create(
+
+        String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+        if (principalID != null)
+            conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
+        Job job = new Job(conf, "storenumbers");
+
+        // initialize HCatInputFormat
+        HCatInputFormat.setInput(job, InputJobInfo.create(
+            dbName, tableName, null));
+        // initialize HCatOutputFormat
+        HCatOutputFormat.setOutput(job, OutputJobInfo.create(
             dbName, outputTableName, outputPartitionKvps));
-    // test with and without specifying schema randomly
-    HCatSchema s = HCatInputFormat.getTableSchema(job);
-    if(writeToNonPartPigTable) {
-        List<HCatFieldSchema> newHfsList = new ArrayList<HCatFieldSchema>();
-        // change smallint and tinyint to int
-        for(HCatFieldSchema hfs: s.getFields()){
-            if(hfs.getTypeString().equals("smallint")) {
-                newHfsList.add(new HCatFieldSchema(hfs.getName(), 
+        // test with and without specifying schema randomly
+        HCatSchema s = HCatInputFormat.getTableSchema(job);
+        if (writeToNonPartPigTable) {
+            List<HCatFieldSchema> newHfsList = new ArrayList<HCatFieldSchema>();
+            // change smallint and tinyint to int
+            for (HCatFieldSchema hfs : s.getFields()) {
+                if (hfs.getTypeString().equals("smallint")) {
+                    newHfsList.add(new HCatFieldSchema(hfs.getName(),
                         HCatFieldSchema.Type.INT, hfs.getComment()));
-            } else if(hfs.getTypeString().equals("tinyint")) {
-                newHfsList.add(new HCatFieldSchema(hfs.getName(), 
+                } else if (hfs.getTypeString().equals("tinyint")) {
+                    newHfsList.add(new HCatFieldSchema(hfs.getName(),
                         HCatFieldSchema.Type.INT, hfs.getComment()));
-            } else {
-                newHfsList.add(hfs);
+                } else {
+                    newHfsList.add(hfs);
+                }
             }
+            s = new HCatSchema(newHfsList);
         }
-        s = new HCatSchema(newHfsList);
-    } 
-    HCatOutputFormat.setSchema(job, s);
-    
-    
-    job.setInputFormatClass(HCatInputFormat.class);
-    job.setOutputFormatClass(HCatOutputFormat.class);
-    job.setJarByClass(StoreNumbers.class);
-    job.setMapperClass(SumMapper.class);
-    job.setOutputKeyClass(IntWritable.class);
-    job.setNumReduceTasks(0);
-    job.setOutputValueClass(DefaultHCatRecord.class);
-    System.exit(job.waitForCompletion(true) ? 0 : 1);
-  }
+        HCatOutputFormat.setSchema(job, s);
+
+
+        job.setInputFormatClass(HCatInputFormat.class);
+        job.setOutputFormatClass(HCatOutputFormat.class);
+        job.setJarByClass(StoreNumbers.class);
+        job.setMapperClass(SumMapper.class);
+        job.setOutputKeyClass(IntWritable.class);
+        job.setNumReduceTasks(0);
+        job.setOutputValueClass(DefaultHCatRecord.class);
+        System.exit(job.waitForCompletion(true) ? 0 : 1);
+    }
 
 
     /**
-     * 
+     *
      */
     private static void usage() {
         System.err.println("Usage: hadoop jar storenumbers <serveruri> <part|nopart|nopart_pig> <-libjars hive-hcat jar>\n" +
-                "\tIf the second argument is \"part\" data is written to datestamp = '2010101' partition of " +
-                "the numbers_part_empty_initially table.\n\tIf the second argument is \"nopart\", data is written to " +
-                "the unpartitioned numbers_nopart_empty_initially table.\n\tIf the second argument is \"nopart_pig\", " +
-                "data is written to the unpartitioned numbers_nopart_pig_empty_initially table.\nt" +
-        "The hcat jar location should be specified as file://<full path to jar>\n");
-    System.exit(2);
-        
+            "\tIf the second argument is \"part\" data is written to datestamp = '2010101' partition of " +
+            "the numbers_part_empty_initially table.\n\tIf the second argument is \"nopart\", data is written to " +
+            "the unpartitioned numbers_nopart_empty_initially table.\n\tIf the second argument is \"nopart_pig\", " +
+            "data is written to the unpartitioned numbers_nopart_pig_empty_initially table.\nt" +
+            "The hcat jar location should be specified as file://<full path to jar>\n");
+        System.exit(2);
+
     }
-   
+
 
 }

Modified: incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/SumNumbers.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/SumNumbers.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/SumNumbers.java (original)
+++ incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/SumNumbers.java Mon Sep 10 23:28:55 2012
@@ -47,211 +47,211 @@ import org.apache.hcatalog.mapreduce.Inp
  * table. It performs a group by on the first column and a SUM operation on the
  * other columns. This is to simulate a typical operation in a map reduce program
  * to test that hcat hands the right data to the map reduce program
- * 
+ *
  * Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat jar>
-            The <tab|ctrla> argument controls the output delimiter
-            The hcat jar location should be specified as file://<full path to jar>
+ The <tab|ctrla> argument controls the output delimiter
+ The hcat jar location should be specified as file://<full path to jar>
  */
 public class SumNumbers {
 
     private static final String NUMBERS_TABLE_NAME = "numbers";
     private static final String TAB = "\t";
-    
-  public static class SumMapper 
-       extends Mapper<WritableComparable, HCatRecord, IntWritable, SumNumbers.ArrayWritable>{
-      
-      IntWritable intnum1000;
-      // though id is given as a Short by hcat, the map will emit it as an
-      // IntWritable so we can just sum in the reduce
-      IntWritable id;
-      
-      // though intnum5 is handed as a Byte by hcat, the map() will emit it as
-      // an IntWritable so we can just sum in the reduce
-      IntWritable intnum5;
-      IntWritable intnum100;
-      IntWritable intnum;
-      LongWritable longnum;
-      FloatWritable floatnum;
-      DoubleWritable doublenum;
-    @Override
-  protected void map(WritableComparable key, HCatRecord value, 
-          org.apache.hadoop.mapreduce.Mapper<WritableComparable,HCatRecord,
-          IntWritable,SumNumbers.ArrayWritable>.Context context) 
-    throws IOException ,InterruptedException {
-        intnum1000 = new IntWritable((Integer)value.get(0));
-        id = new IntWritable((Short) value.get(1));
-        intnum5 = new IntWritable(((Byte)value.get(2)));
-        intnum100 = new IntWritable(((Integer) value.get(3)));
-        intnum = new IntWritable((Integer) value.get(4));
-        longnum = new LongWritable((Long) value.get(5));
-        floatnum = new FloatWritable((Float) value.get(6));
-        doublenum = new DoubleWritable((Double) value.get(7));
-        SumNumbers.ArrayWritable outputValue = new SumNumbers.ArrayWritable(id, 
+
+    public static class SumMapper
+        extends Mapper<WritableComparable, HCatRecord, IntWritable, SumNumbers.ArrayWritable> {
+
+        IntWritable intnum1000;
+        // though id is given as a Short by hcat, the map will emit it as an
+        // IntWritable so we can just sum in the reduce
+        IntWritable id;
+
+        // though intnum5 is handed as a Byte by hcat, the map() will emit it as
+        // an IntWritable so we can just sum in the reduce
+        IntWritable intnum5;
+        IntWritable intnum100;
+        IntWritable intnum;
+        LongWritable longnum;
+        FloatWritable floatnum;
+        DoubleWritable doublenum;
+
+        @Override
+        protected void map(WritableComparable key, HCatRecord value,
+                           org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord,
+                               IntWritable, SumNumbers.ArrayWritable>.Context context)
+            throws IOException, InterruptedException {
+            intnum1000 = new IntWritable((Integer) value.get(0));
+            id = new IntWritable((Short) value.get(1));
+            intnum5 = new IntWritable(((Byte) value.get(2)));
+            intnum100 = new IntWritable(((Integer) value.get(3)));
+            intnum = new IntWritable((Integer) value.get(4));
+            longnum = new LongWritable((Long) value.get(5));
+            floatnum = new FloatWritable((Float) value.get(6));
+            doublenum = new DoubleWritable((Double) value.get(7));
+            SumNumbers.ArrayWritable outputValue = new SumNumbers.ArrayWritable(id,
                 intnum5, intnum100, intnum, longnum, floatnum, doublenum);
-        context.write(intnum1000, outputValue);
+            context.write(intnum1000, outputValue);
 
-    }
-  }
-  
-  public static class SumReducer extends Reducer<IntWritable, SumNumbers.ArrayWritable, 
-  LongWritable, Text> {
-      
-      
-    LongWritable dummyLong = null;
-      @Override
-    protected void reduce(IntWritable key, java.lang.Iterable<ArrayWritable> 
-      values, org.apache.hadoop.mapreduce.Reducer<IntWritable,ArrayWritable,LongWritable,Text>.Context context) 
-      throws IOException ,InterruptedException {
-          String output = key.toString() + TAB;
-          Long sumid = 0l;
-          Long sumintnum5 = 0l;
-          Long sumintnum100 = 0l;
-          Long sumintnum = 0l;
-          Long sumlongnum = 0l;
-          Float sumfloatnum = 0.0f;
-          Double sumdoublenum = 0.0;
-          for (ArrayWritable value : values) {
-            sumid += value.id.get();
-            sumintnum5 += value.intnum5.get();
-            sumintnum100 += value.intnum100.get();
-            sumintnum += value.intnum.get();
-            sumlongnum += value.longnum.get();
-            sumfloatnum += value.floatnum.get();
-            sumdoublenum += value.doublenum.get();
-        }
-          output += sumid + TAB;
-          output += sumintnum5 + TAB;
-          output += sumintnum100 + TAB;
-          output += sumintnum + TAB;
-          output += sumlongnum + TAB;
-          output += sumfloatnum + TAB;
-          output += sumdoublenum + TAB;
-          context.write(dummyLong, new Text(output));
-      }
-  }
-  
-   public static void main(String[] args) throws Exception {
-    Configuration conf = new Configuration();
-    args = new GenericOptionsParser(conf, args).getRemainingArgs();
-    String[] otherArgs = new String[4];
-    int j = 0;
-    for(int i = 0; i < args.length; i++) {
-        if(args[i].equals("-libjars")) {
-            // generic options parser doesn't seem to work!
-            conf.set("tmpjars", args[i+1]);
-            i = i+1; // skip it , the for loop will skip its value                
-        } else {
-            otherArgs[j++] = args[i];
         }
     }
-    if (otherArgs.length != 4) {
-      System.err.println("Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat jar>\n" +
-            "The <tab|ctrla> argument controls the output delimiter.\n" +
-            "The hcat jar location should be specified as file://<full path to jar>\n");
-      System.exit(2);
-    }
-    String serverUri = otherArgs[0];
-    String tableName = NUMBERS_TABLE_NAME;
-    String outputDir = otherArgs[1];
-    String dbName = "default";
-    
-    String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
-    if(principalID != null)
-    conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
-    Job job = new Job(conf, "sumnumbers");
-    HCatInputFormat.setInput(job, InputJobInfo.create(
-    		dbName, tableName, null));
-    // initialize HCatOutputFormat
-    
-    job.setInputFormatClass(HCatInputFormat.class);
-    job.setOutputFormatClass(TextOutputFormat.class);
-    job.setJarByClass(SumNumbers.class);
-    job.setMapperClass(SumMapper.class);
-    job.setReducerClass(SumReducer.class);
-    job.setMapOutputKeyClass(IntWritable.class);
-    job.setMapOutputValueClass(ArrayWritable.class);
-    job.setOutputKeyClass(LongWritable.class);
-    job.setOutputValueClass(Text.class);
-    FileOutputFormat.setOutputPath(job, new Path(outputDir));
-    System.exit(job.waitForCompletion(true) ? 0 : 1);
-  }
-   
-   public static class ArrayWritable implements Writable {
-
-       // though id is given as a Short by hcat, the map will emit it as an
-       // IntWritable so we can just sum in the reduce
-       IntWritable id;
-       
-       // though intnum5 is handed as a Byte by hcat, the map() will emit it as
-       // an IntWritable so we can just sum in the reduce
-       IntWritable intnum5;
-       
-       IntWritable intnum100;
-       IntWritable intnum;
-       LongWritable longnum;
-       FloatWritable floatnum;
-       DoubleWritable doublenum;
-       
-       /**
-     * 
-     */
-    public ArrayWritable() {
-        id = new IntWritable();
-        intnum5 = new IntWritable();
-        intnum100 = new IntWritable();
-        intnum = new IntWritable();
-        longnum = new LongWritable();
-        floatnum = new FloatWritable();
-        doublenum = new DoubleWritable();
-    }
-    
-    
-       
-    /**
-     * @param id
-     * @param intnum5
-     * @param intnum100
-     * @param intnum
-     * @param longnum
-     * @param floatnum
-     * @param doublenum
-     */
-    public ArrayWritable(IntWritable id, IntWritable intnum5,
-            IntWritable intnum100, IntWritable intnum, LongWritable longnum,
-            FloatWritable floatnum, DoubleWritable doublenum) {
-        this.id = id;
-        this.intnum5 = intnum5;
-        this.intnum100 = intnum100;
-        this.intnum = intnum;
-        this.longnum = longnum;
-        this.floatnum = floatnum;
-        this.doublenum = doublenum;
-    }
 
+    public static class SumReducer extends Reducer<IntWritable, SumNumbers.ArrayWritable,
+        LongWritable, Text> {
 
 
-    @Override
-    public void readFields(DataInput in) throws IOException {
-        id.readFields(in);
-        intnum5.readFields(in);
-        intnum100.readFields(in);
-        intnum.readFields(in);
-        longnum.readFields(in);
-        floatnum.readFields(in);
-        doublenum.readFields(in);
+        LongWritable dummyLong = null;
+
+        @Override
+        protected void reduce(IntWritable key, java.lang.Iterable<ArrayWritable>
+            values, org.apache.hadoop.mapreduce.Reducer<IntWritable, ArrayWritable, LongWritable, Text>.Context context)
+            throws IOException, InterruptedException {
+            String output = key.toString() + TAB;
+            Long sumid = 0l;
+            Long sumintnum5 = 0l;
+            Long sumintnum100 = 0l;
+            Long sumintnum = 0l;
+            Long sumlongnum = 0l;
+            Float sumfloatnum = 0.0f;
+            Double sumdoublenum = 0.0;
+            for (ArrayWritable value : values) {
+                sumid += value.id.get();
+                sumintnum5 += value.intnum5.get();
+                sumintnum100 += value.intnum100.get();
+                sumintnum += value.intnum.get();
+                sumlongnum += value.longnum.get();
+                sumfloatnum += value.floatnum.get();
+                sumdoublenum += value.doublenum.get();
+            }
+            output += sumid + TAB;
+            output += sumintnum5 + TAB;
+            output += sumintnum100 + TAB;
+            output += sumintnum + TAB;
+            output += sumlongnum + TAB;
+            output += sumfloatnum + TAB;
+            output += sumdoublenum + TAB;
+            context.write(dummyLong, new Text(output));
+        }
     }
 
-    @Override
-    public void write(DataOutput out) throws IOException {
-        id.write(out);
-        intnum5.write(out);
-        intnum100.write(out);
-        intnum.write(out);
-        longnum.write(out);
-        floatnum.write(out);
-        doublenum.write(out);
-        
+    public static void main(String[] args) throws Exception {
+        Configuration conf = new Configuration();
+        args = new GenericOptionsParser(conf, args).getRemainingArgs();
+        String[] otherArgs = new String[4];
+        int j = 0;
+        for (int i = 0; i < args.length; i++) {
+            if (args[i].equals("-libjars")) {
+                // generic options parser doesn't seem to work!
+                conf.set("tmpjars", args[i + 1]);
+                i = i + 1; // skip it , the for loop will skip its value
+            } else {
+                otherArgs[j++] = args[i];
+            }
+        }
+        if (otherArgs.length != 4) {
+            System.err.println("Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat jar>\n" +
+                "The <tab|ctrla> argument controls the output delimiter.\n" +
+                "The hcat jar location should be specified as file://<full path to jar>\n");
+            System.exit(2);
+        }
+        String serverUri = otherArgs[0];
+        String tableName = NUMBERS_TABLE_NAME;
+        String outputDir = otherArgs[1];
+        String dbName = "default";
+
+        String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+        if (principalID != null)
+            conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
+        Job job = new Job(conf, "sumnumbers");
+        HCatInputFormat.setInput(job, InputJobInfo.create(
+            dbName, tableName, null));
+        // initialize HCatOutputFormat
+
+        job.setInputFormatClass(HCatInputFormat.class);
+        job.setOutputFormatClass(TextOutputFormat.class);
+        job.setJarByClass(SumNumbers.class);
+        job.setMapperClass(SumMapper.class);
+        job.setReducerClass(SumReducer.class);
+        job.setMapOutputKeyClass(IntWritable.class);
+        job.setMapOutputValueClass(ArrayWritable.class);
+        job.setOutputKeyClass(LongWritable.class);
+        job.setOutputValueClass(Text.class);
+        FileOutputFormat.setOutputPath(job, new Path(outputDir));
+        System.exit(job.waitForCompletion(true) ? 0 : 1);
+    }
+
+    public static class ArrayWritable implements Writable {
+
+        // though id is given as a Short by hcat, the map will emit it as an
+        // IntWritable so we can just sum in the reduce
+        IntWritable id;
+
+        // though intnum5 is handed as a Byte by hcat, the map() will emit it as
+        // an IntWritable so we can just sum in the reduce
+        IntWritable intnum5;
+
+        IntWritable intnum100;
+        IntWritable intnum;
+        LongWritable longnum;
+        FloatWritable floatnum;
+        DoubleWritable doublenum;
+
+        /**
+         *
+         */
+        public ArrayWritable() {
+            id = new IntWritable();
+            intnum5 = new IntWritable();
+            intnum100 = new IntWritable();
+            intnum = new IntWritable();
+            longnum = new LongWritable();
+            floatnum = new FloatWritable();
+            doublenum = new DoubleWritable();
+        }
+
+
+        /**
+         * @param id
+         * @param intnum5
+         * @param intnum100
+         * @param intnum
+         * @param longnum
+         * @param floatnum
+         * @param doublenum
+         */
+        public ArrayWritable(IntWritable id, IntWritable intnum5,
+                             IntWritable intnum100, IntWritable intnum, LongWritable longnum,
+                             FloatWritable floatnum, DoubleWritable doublenum) {
+            this.id = id;
+            this.intnum5 = intnum5;
+            this.intnum100 = intnum100;
+            this.intnum = intnum;
+            this.longnum = longnum;
+            this.floatnum = floatnum;
+            this.doublenum = doublenum;
+        }
+
+
+        @Override
+        public void readFields(DataInput in) throws IOException {
+            id.readFields(in);
+            intnum5.readFields(in);
+            intnum100.readFields(in);
+            intnum.readFields(in);
+            longnum.readFields(in);
+            floatnum.readFields(in);
+            doublenum.readFields(in);
+        }
+
+        @Override
+        public void write(DataOutput out) throws IOException {
+            id.write(out);
+            intnum5.write(out);
+            intnum100.write(out);
+            intnum.write(out);
+            longnum.write(out);
+            floatnum.write(out);
+            doublenum.write(out);
+
+        }
+
     }
-       
-   }
 }

Modified: incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/TypeDataCheck.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/TypeDataCheck.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/TypeDataCheck.java (original)
+++ incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/TypeDataCheck.java Mon Sep 10 23:28:55 2012
@@ -44,138 +44,139 @@ import org.apache.hcatalog.mapreduce.Inp
  * objective by checking the type of the Objects representing the columns against
  * the schema provided as a cmdline arg. It achieves the second objective by
  * writing the data as Text to be compared against golden results.
- * 
+ *
  * The schema specification consists of the types as given by "describe <table>"
  * with each column's type separated from the next column's type by a '+'
- * 
+ *
  * Can be used against "numbers" and "complex" tables.
- * 
+ *
  * Usage: hadoop jar testudf.jar typedatacheck <serveruri> <tablename> 
  * <hive types of cols + delimited> <output dir> <tab|ctrla> <-libjars hive-hcat jar>
-            The <tab|ctrla> argument controls the output delimiter.
-            The hcat jar location should be specified as file://<full path to jar>
+ The <tab|ctrla> argument controls the output delimiter.
+ The hcat jar location should be specified as file://<full path to jar>
  */
-public class TypeDataCheck implements Tool{
+public class TypeDataCheck implements Tool {
 
-	static String SCHEMA_KEY = "schema";
-	static String DELIM = "delim";
-	private static Configuration conf = new Configuration();
-
-	public static class TypeDataCheckMapper 
-	extends Mapper<WritableComparable, HCatRecord, Long, Text>{
-
-		Long dummykey = null;
-		String[] types;
-		String delim = "\u0001";      
-		@Override
-		protected void setup(org.apache.hadoop.mapreduce.Mapper<WritableComparable,HCatRecord,Long,Text>.Context context) 
-		throws IOException ,InterruptedException {
-			String typesStr = context.getConfiguration().get(SCHEMA_KEY);
-			delim = context.getConfiguration().get(DELIM);
-			if(delim.equals("tab")) {
-				delim = "\t";
-			} else if (delim.equals("ctrla")) {
-				delim = "\u0001";
-			}
-			types = typesStr.split("\\+");
-			for(int i = 0; i < types.length; i++) {
-				types[i] = types[i].toLowerCase();
-			}
-
-
-		}
-
-		String check(HCatRecord r) throws IOException {
-			String s = "";
-			for(int i = 0; i < r.size(); i++) {
-				s += Util.check(types[i], r.get(i));
-				if(i != r.size() - 1) {
-					s += delim;
-				}
-			}
-			return s;
-		}
-
-		@Override
-		protected void map(WritableComparable key, HCatRecord value, 
-				org.apache.hadoop.mapreduce.Mapper<WritableComparable,HCatRecord,Long,Text>.Context context) 
-		throws IOException ,InterruptedException {
-			context.write(dummykey, new Text(check(value)));
-		}
-	}
-
-	public static void main(String[] args) throws Exception {
-		TypeDataCheck self = new TypeDataCheck();
-		System.exit(ToolRunner.run(conf, self, args));
-	}
-
-	public int run(String[] args) {
-		try {
-			args = new GenericOptionsParser(conf, args).getRemainingArgs();
-			String[] otherArgs = new String[5];
-			int j = 0;
-			for(int i = 0; i < args.length; i++) {
-				if(args[i].equals("-libjars")) {
-                                        conf.set("tmpjars",args[i+1]);
-					i = i+1; // skip it , the for loop will skip its value                
-				} else {
-					otherArgs[j++] = args[i];
-				}
-			}
-			if (otherArgs.length !=5 ) {
-				System.err.println("Other args:" + Arrays.asList(otherArgs));
-				System.err.println("Usage: hadoop jar testudf.jar typedatacheck " +
-						"<serveruri> <tablename> <hive types of cols + delimited> " +
-						"<output dir> <tab|ctrla> <-libjars hive-hcat jar>\n" +
-						"The <tab|ctrla> argument controls the output delimiter.\n" +
-				"The hcat jar location should be specified as file://<full path to jar>\n");
-				System.err.println(" The <tab|ctrla> argument controls the output delimiter.");
-				System.exit(2);
-			}
-			String serverUri = otherArgs[0];
-			String tableName = otherArgs[1];
-			String schemaStr = otherArgs[2];
-			String outputDir = otherArgs[3];
-			String outputdelim = otherArgs[4];
-			if(!outputdelim.equals("tab") && !outputdelim.equals("ctrla")) {
-				System.err.println("ERROR: Specify 'tab' or 'ctrla' for output delimiter");
-			}
-			String dbName = "default";
-
-			String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
-			if(principalID != null){
-				conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);				
-			}
-			Job job = new Job(conf, "typedatacheck");
-			// initialize HCatInputFormat
-			HCatInputFormat.setInput(job, InputJobInfo.create(
-					dbName, tableName, null));
-			HCatSchema s = HCatInputFormat.getTableSchema(job);
-			job.getConfiguration().set(SCHEMA_KEY, schemaStr);
-			job.getConfiguration().set(DELIM, outputdelim);
-			job.setInputFormatClass(HCatInputFormat.class);
-			job.setOutputFormatClass(TextOutputFormat.class);
-			job.setJarByClass(TypeDataCheck.class);
-			job.setMapperClass(TypeDataCheckMapper.class);
-			job.setNumReduceTasks(0);
-			job.setOutputKeyClass(Long.class);
-			job.setOutputValueClass(Text.class);
-			FileOutputFormat.setOutputPath(job, new Path(outputDir));
-			System.exit(job.waitForCompletion(true) ? 0 : 1);
-			return 0;
-		} catch (Exception e) {
-			throw new RuntimeException(e);
-		}
-	}
-
-	@Override
-	public Configuration getConf() {
-		return conf;
-	}
-
-	@Override
-	public void setConf(Configuration conf) {
-		TypeDataCheck.conf = conf;
-	}
+    static String SCHEMA_KEY = "schema";
+    static String DELIM = "delim";
+    private static Configuration conf = new Configuration();
+
+    public static class TypeDataCheckMapper
+        extends Mapper<WritableComparable, HCatRecord, Long, Text> {
+
+        Long dummykey = null;
+        String[] types;
+        String delim = "\u0001";
+
+        @Override
+        protected void setup(org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, Long, Text>.Context context)
+            throws IOException, InterruptedException {
+            String typesStr = context.getConfiguration().get(SCHEMA_KEY);
+            delim = context.getConfiguration().get(DELIM);
+            if (delim.equals("tab")) {
+                delim = "\t";
+            } else if (delim.equals("ctrla")) {
+                delim = "\u0001";
+            }
+            types = typesStr.split("\\+");
+            for (int i = 0; i < types.length; i++) {
+                types[i] = types[i].toLowerCase();
+            }
+
+
+        }
+
+        String check(HCatRecord r) throws IOException {
+            String s = "";
+            for (int i = 0; i < r.size(); i++) {
+                s += Util.check(types[i], r.get(i));
+                if (i != r.size() - 1) {
+                    s += delim;
+                }
+            }
+            return s;
+        }
+
+        @Override
+        protected void map(WritableComparable key, HCatRecord value,
+                           org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, Long, Text>.Context context)
+            throws IOException, InterruptedException {
+            context.write(dummykey, new Text(check(value)));
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        TypeDataCheck self = new TypeDataCheck();
+        System.exit(ToolRunner.run(conf, self, args));
+    }
+
+    public int run(String[] args) {
+        try {
+            args = new GenericOptionsParser(conf, args).getRemainingArgs();
+            String[] otherArgs = new String[5];
+            int j = 0;
+            for (int i = 0; i < args.length; i++) {
+                if (args[i].equals("-libjars")) {
+                    conf.set("tmpjars", args[i + 1]);
+                    i = i + 1; // skip it , the for loop will skip its value
+                } else {
+                    otherArgs[j++] = args[i];
+                }
+            }
+            if (otherArgs.length != 5) {
+                System.err.println("Other args:" + Arrays.asList(otherArgs));
+                System.err.println("Usage: hadoop jar testudf.jar typedatacheck " +
+                    "<serveruri> <tablename> <hive types of cols + delimited> " +
+                    "<output dir> <tab|ctrla> <-libjars hive-hcat jar>\n" +
+                    "The <tab|ctrla> argument controls the output delimiter.\n" +
+                    "The hcat jar location should be specified as file://<full path to jar>\n");
+                System.err.println(" The <tab|ctrla> argument controls the output delimiter.");
+                System.exit(2);
+            }
+            String serverUri = otherArgs[0];
+            String tableName = otherArgs[1];
+            String schemaStr = otherArgs[2];
+            String outputDir = otherArgs[3];
+            String outputdelim = otherArgs[4];
+            if (!outputdelim.equals("tab") && !outputdelim.equals("ctrla")) {
+                System.err.println("ERROR: Specify 'tab' or 'ctrla' for output delimiter");
+            }
+            String dbName = "default";
+
+            String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+            if (principalID != null) {
+                conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
+            }
+            Job job = new Job(conf, "typedatacheck");
+            // initialize HCatInputFormat
+            HCatInputFormat.setInput(job, InputJobInfo.create(
+                dbName, tableName, null));
+            HCatSchema s = HCatInputFormat.getTableSchema(job);
+            job.getConfiguration().set(SCHEMA_KEY, schemaStr);
+            job.getConfiguration().set(DELIM, outputdelim);
+            job.setInputFormatClass(HCatInputFormat.class);
+            job.setOutputFormatClass(TextOutputFormat.class);
+            job.setJarByClass(TypeDataCheck.class);
+            job.setMapperClass(TypeDataCheckMapper.class);
+            job.setNumReduceTasks(0);
+            job.setOutputKeyClass(Long.class);
+            job.setOutputValueClass(Text.class);
+            FileOutputFormat.setOutputPath(job, new Path(outputDir));
+            System.exit(job.waitForCompletion(true) ? 0 : 1);
+            return 0;
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    @Override
+    public Configuration getConf() {
+        return conf;
+    }
+
+    @Override
+    public void setConf(Configuration conf) {
+        TypeDataCheck.conf = conf;
+    }
 
 }

Modified: incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/Util.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/Util.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/Util.java (original)
+++ incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/Util.java Mon Sep 10 23:28:55 2012
@@ -27,8 +27,8 @@ import java.util.Map.Entry;
 public class Util {
 
     static Map<String, Class<?>> typeMap = new HashMap<String, Class<?>>();
-    
-    static{
+
+    static {
         typeMap.put("tinyint", Byte.class);
         typeMap.put("smallint", Short.class);
         typeMap.put("int", Integer.class);
@@ -41,31 +41,31 @@ public class Util {
         typeMap.put("map<string,string>", Map.class);
         typeMap.put("array<map<string,string>>", List.class);
     }
-    
+
     public static void die(String expectedType, Object o) throws IOException {
-        throw new IOException("Expected " + expectedType + ", got " +  
-              o.getClass().getName());
+        throw new IOException("Expected " + expectedType + ", got " +
+            o.getClass().getName());
     }
-    
-    
+
+
     public static String check(String type, Object o) throws IOException {
-        if(o == null) {
+        if (o == null) {
             return "null";
         }
-        if(check(typeMap.get(type), o)) {
-            if(type.equals("map<string,string>")) {
+        if (check(typeMap.get(type), o)) {
+            if (type.equals("map<string,string>")) {
                 Map<String, String> m = (Map<String, String>) o;
                 check(m);
-            } else if(type.equals("array<map<string,string>>")) {
+            } else if (type.equals("array<map<string,string>>")) {
                 List<Map<String, String>> listOfMaps = (List<Map<String, String>>) o;
-                for(Map<String, String> m: listOfMaps) {
+                for (Map<String, String> m : listOfMaps) {
                     check(m);
                 }
-            } else if(type.equals("struct<num:int,str:string,dbl:double>")) {
+            } else if (type.equals("struct<num:int,str:string,dbl:double>")) {
                 List<Object> l = (List<Object>) o;
-                if(!check(Integer.class, l.get(0)) ||
-                        !check(String.class, l.get(1)) ||
-                                !check(Double.class, l.get(2))) {
+                if (!check(Integer.class, l.get(0)) ||
+                    !check(String.class, l.get(1)) ||
+                    !check(Double.class, l.get(2))) {
                     die("struct<num:int,str:string,dbl:double>", l);
                 }
             }
@@ -74,32 +74,32 @@ public class Util {
         }
         return o.toString();
     }
-    
+
     /**
-   * @param m
-   * @throws IOException 
-   */
-  public static void check(Map<String, String> m) throws IOException {
-      if(m == null) {
-          return;
-      }
-      for(Entry<String, String> e: m.entrySet()) {
-          // just access key and value to ensure they are correct
-          if(!check(String.class, e.getKey())) {
-              die("String", e.getKey());
-          }
-          if(!check(String.class, e.getValue())) {
-              die("String", e.getValue());
-          }
-      }
-      
-  }
+     * @param m
+     * @throws IOException
+     */
+    public static void check(Map<String, String> m) throws IOException {
+        if (m == null) {
+            return;
+        }
+        for (Entry<String, String> e : m.entrySet()) {
+            // just access key and value to ensure they are correct
+            if (!check(String.class, e.getKey())) {
+                die("String", e.getKey());
+            }
+            if (!check(String.class, e.getValue())) {
+                die("String", e.getValue());
+            }
+        }
 
-  public static boolean check(Class<?> expected, Object actual) {
-        if(actual == null) {
+    }
+
+    public static boolean check(Class<?> expected, Object actual) {
+        if (actual == null) {
             return true;
         }
         return expected.isAssignableFrom(actual.getClass());
     }
-    
+
 }

Modified: incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteJson.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteJson.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteJson.java (original)
+++ incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteJson.java Mon Sep 10 23:28:55 2012
@@ -42,7 +42,7 @@ import org.apache.hcatalog.mapreduce.Out
  * table. It performs a group by on the first column and a SUM operation on the
  * other columns. This is to simulate a typical operation in a map reduce
  * program to test that hcat hands the right data to the map reduce program
- * 
+ *
  * Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat
  * jar> The <tab|ctrla> argument controls the output delimiter The hcat jar
  * location should be specified as file://<full path to jar>
@@ -50,7 +50,7 @@ import org.apache.hcatalog.mapreduce.Out
 public class WriteJson extends Configured implements Tool {
 
     public static class Map extends
-            Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
+        Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
 
         String s;
         Integer i;
@@ -58,19 +58,19 @@ public class WriteJson extends Configure
 
         @Override
         protected void map(
-                WritableComparable key,
-                HCatRecord value,
-                org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>.Context context)
-                throws IOException, InterruptedException {
-            s = value.get(0)==null?null:(String)value.get(0);
-            i = value.get(1)==null?null:(Integer)value.get(1);
-            d = value.get(2)==null?null:(Double)value.get(2);
-            
+            WritableComparable key,
+            HCatRecord value,
+            org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>.Context context)
+            throws IOException, InterruptedException {
+            s = value.get(0) == null ? null : (String) value.get(0);
+            i = value.get(1) == null ? null : (Integer) value.get(1);
+            d = value.get(2) == null ? null : (Double) value.get(2);
+
             HCatRecord record = new DefaultHCatRecord(5);
             record.set(0, s);
             record.set(1, i);
             record.set(2, d);
-            
+
             context.write(null, record);
 
         }
@@ -86,12 +86,12 @@ public class WriteJson extends Configure
         String dbName = null;
 
         String principalID = System
-                .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+            .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
         if (principalID != null)
             conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
         Job job = new Job(conf, "WriteJson");
         HCatInputFormat.setInput(job, InputJobInfo.create(dbName,
-                inputTableName, null));
+            inputTableName, null));
         // initialize HCatOutputFormat
 
         job.setInputFormatClass(HCatInputFormat.class);
@@ -101,10 +101,10 @@ public class WriteJson extends Configure
         job.setOutputValueClass(DefaultHCatRecord.class);
         job.setNumReduceTasks(0);
         HCatOutputFormat.setOutput(job, OutputJobInfo.create(dbName,
-                outputTableName, null));
+            outputTableName, null));
         HCatSchema s = HCatInputFormat.getTableSchema(job);
         System.err.println("INFO: output schema explicitly set for writing:"
-                + s);
+            + s);
         HCatOutputFormat.setSchema(job, s);
         job.setOutputFormatClass(HCatOutputFormat.class);
         return (job.waitForCompletion(true) ? 0 : 1);

Modified: incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteRC.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteRC.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteRC.java (original)
+++ incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteRC.java Mon Sep 10 23:28:55 2012
@@ -42,7 +42,7 @@ import org.apache.hcatalog.mapreduce.Out
  * table. It performs a group by on the first column and a SUM operation on the
  * other columns. This is to simulate a typical operation in a map reduce
  * program to test that hcat hands the right data to the map reduce program
- * 
+ *
  * Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat
  * jar> The <tab|ctrla> argument controls the output delimiter The hcat jar
  * location should be specified as file://<full path to jar>
@@ -50,29 +50,29 @@ import org.apache.hcatalog.mapreduce.Out
 public class WriteRC extends Configured implements Tool {
 
     public static class Map extends
-            Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
+        Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
 
         String name;
         Integer age;
         Double gpa;
-        
+
         @Override
         protected void map(
-                WritableComparable key,
-                HCatRecord value,
-                org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>.Context context)
-                throws IOException, InterruptedException {
-            name = value.get(0)==null?null:(String)value.get(0);
-            age = value.get(1)==null?null:(Integer)value.get(1);
-            gpa = value.get(2)==null?null:(Double)value.get(2);
+            WritableComparable key,
+            HCatRecord value,
+            org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>.Context context)
+            throws IOException, InterruptedException {
+            name = value.get(0) == null ? null : (String) value.get(0);
+            age = value.get(1) == null ? null : (Integer) value.get(1);
+            gpa = value.get(2) == null ? null : (Double) value.get(2);
 
             if (gpa != null) gpa = Math.floor(gpa) + 0.1;
-            
+
             HCatRecord record = new DefaultHCatRecord(5);
             record.set(0, name);
             record.set(1, age);
             record.set(2, gpa);
-            
+
             context.write(null, record);
 
         }
@@ -88,12 +88,12 @@ public class WriteRC extends Configured 
         String dbName = null;
 
         String principalID = System
-                .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+            .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
         if (principalID != null)
             conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
         Job job = new Job(conf, "WriteRC");
         HCatInputFormat.setInput(job, InputJobInfo.create(dbName,
-                inputTableName, null));
+            inputTableName, null));
         // initialize HCatOutputFormat
 
         job.setInputFormatClass(HCatInputFormat.class);
@@ -103,10 +103,10 @@ public class WriteRC extends Configured 
         job.setOutputValueClass(DefaultHCatRecord.class);
         job.setNumReduceTasks(0);
         HCatOutputFormat.setOutput(job, OutputJobInfo.create(dbName,
-                outputTableName, null));
+            outputTableName, null));
         HCatSchema s = HCatInputFormat.getTableSchema(job);
         System.err.println("INFO: output schema explicitly set for writing:"
-                + s);
+            + s);
         HCatOutputFormat.setSchema(job, s);
         job.setOutputFormatClass(HCatOutputFormat.class);
         return (job.waitForCompletion(true) ? 0 : 1);

Modified: incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteText.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteText.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteText.java (original)
+++ incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteText.java Mon Sep 10 23:28:55 2012
@@ -42,7 +42,7 @@ import org.apache.hcatalog.mapreduce.Out
  * table. It performs a group by on the first column and a SUM operation on the
  * other columns. This is to simulate a typical operation in a map reduce
  * program to test that hcat hands the right data to the map reduce program
- * 
+ *
  * Usage: hadoop jar sumnumbers <serveruri> <output dir> <-libjars hive-hcat
  * jar> The <tab|ctrla> argument controls the output delimiter The hcat jar
  * location should be specified as file://<full path to jar>
@@ -50,7 +50,7 @@ import org.apache.hcatalog.mapreduce.Out
 public class WriteText extends Configured implements Tool {
 
     public static class Map extends
-            Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
+        Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
 
         byte t;
         short si;
@@ -62,18 +62,18 @@ public class WriteText extends Configure
 
         @Override
         protected void map(
-                WritableComparable key,
-                HCatRecord value,
-                org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>.Context context)
-                throws IOException, InterruptedException {
-            t = (Byte)value.get(0);
-            si = (Short)value.get(1);
-            i = (Integer)value.get(2);
-            b = (Long)value.get(3);
-            f = (Float)value.get(4);
-            d = (Double)value.get(5);
-            s = (String)value.get(6);
-            
+            WritableComparable key,
+            HCatRecord value,
+            org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>.Context context)
+            throws IOException, InterruptedException {
+            t = (Byte) value.get(0);
+            si = (Short) value.get(1);
+            i = (Integer) value.get(2);
+            b = (Long) value.get(3);
+            f = (Float) value.get(4);
+            d = (Double) value.get(5);
+            s = (String) value.get(6);
+
             HCatRecord record = new DefaultHCatRecord(7);
             record.set(0, t);
             record.set(1, si);
@@ -82,7 +82,7 @@ public class WriteText extends Configure
             record.set(4, f);
             record.set(5, d);
             record.set(6, s);
-            
+
             context.write(null, record);
 
         }
@@ -98,12 +98,12 @@ public class WriteText extends Configure
         String dbName = null;
 
         String principalID = System
-                .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+            .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
         if (principalID != null)
             conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
         Job job = new Job(conf, "WriteText");
         HCatInputFormat.setInput(job, InputJobInfo.create(dbName,
-                inputTableName, null));
+            inputTableName, null));
         // initialize HCatOutputFormat
 
         job.setInputFormatClass(HCatInputFormat.class);
@@ -113,10 +113,10 @@ public class WriteText extends Configure
         job.setOutputValueClass(DefaultHCatRecord.class);
         job.setNumReduceTasks(0);
         HCatOutputFormat.setOutput(job, OutputJobInfo.create(dbName,
-                outputTableName, null));
+            outputTableName, null));
         HCatSchema s = HCatInputFormat.getTableSchema(job);
         System.err.println("INFO: output schema explicitly set for writing:"
-                + s);
+            + s);
         HCatOutputFormat.setSchema(job, s);
         job.setOutputFormatClass(HCatOutputFormat.class);
         return (job.waitForCompletion(true) ? 0 : 1);

Modified: incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteTextPartitioned.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteTextPartitioned.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteTextPartitioned.java (original)
+++ incubator/hcatalog/trunk/src/test/e2e/hcatalog/udfs/java/org/apache/hcatalog/utils/WriteTextPartitioned.java Mon Sep 10 23:28:55 2012
@@ -46,7 +46,7 @@ import org.apache.hcatalog.mapreduce.Out
  * table. It performs a group by on the first column and a SUM operation on the
  * other columns. This is to simulate a typical operation in a map reduce
  * program to test that hcat hands the right data to the map reduce program
- * 
+ *
  * Usage: hadoop jar org.apache.hcatalog.utils.HBaseReadWrite -libjars
  * &lt;hcat_jar&gt; * &lt;serveruri&gt; &lt;input_tablename&gt; &lt;output_tablename&gt; [filter]
  * If filter is given it will be provided as the partition to write to.
@@ -56,23 +56,23 @@ public class WriteTextPartitioned extend
     static String filter = null;
 
     public static class Map extends
-            Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
+        Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord> {
 
         @Override
         protected void map(
-                WritableComparable key,
-                HCatRecord value,
-                org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>.Context context)
-                throws IOException, InterruptedException {
-            String name = (String)value.get(0);
-            int age = (Integer)value.get(1);
-            String ds = (String)value.get(3);
-            
+            WritableComparable key,
+            HCatRecord value,
+            org.apache.hadoop.mapreduce.Mapper<WritableComparable, HCatRecord, WritableComparable, HCatRecord>.Context context)
+            throws IOException, InterruptedException {
+            String name = (String) value.get(0);
+            int age = (Integer) value.get(1);
+            String ds = (String) value.get(3);
+
             HCatRecord record = (filter == null ? new DefaultHCatRecord(3) : new DefaultHCatRecord(2));
             record.set(0, name);
             record.set(1, age);
             if (filter == null) record.set(2, ds);
-            
+
             context.write(null, record);
 
         }
@@ -89,12 +89,12 @@ public class WriteTextPartitioned extend
         String dbName = null;
 
         String principalID = System
-                .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
+            .getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL);
         if (principalID != null)
             conf.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID);
         Job job = new Job(conf, "WriteTextPartitioned");
         HCatInputFormat.setInput(job, InputJobInfo.create(dbName,
-                inputTableName, filter));
+            inputTableName, filter));
         // initialize HCatOutputFormat
 
         job.setInputFormatClass(HCatInputFormat.class);
@@ -112,7 +112,7 @@ public class WriteTextPartitioned extend
             partitionVals.put(s[0], val);
         }
         HCatOutputFormat.setOutput(job, OutputJobInfo.create(dbName,
-                outputTableName, partitionVals));
+            outputTableName, partitionVals));
         HCatSchema s = HCatInputFormat.getTableSchema(job);
         // Build the schema for this table, which is slightly different than the
         // schema for the input table

Modified: incubator/hcatalog/trunk/src/test/e2e/templeton/build.xml
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/e2e/templeton/build.xml?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/e2e/templeton/build.xml (original)
+++ incubator/hcatalog/trunk/src/test/e2e/templeton/build.xml Mon Sep 10 23:28:55 2012
@@ -17,77 +17,76 @@
 
 <project name="TestHarnessTempletonTests" default="test">
 
-  <!-- Separate property name for udfs' build.xml -->
-  <property name="e2e.lib.dir" value="${basedir}/lib"/>
-
-  <property name="test.src" value="${basedir}/tests"/>
-  <property name="driver.src" value="${basedir}/drivers"/>
-  <property name="harness.dir" value="${basedir}/../harness"/>
-  <property name="inpdir.local" value="${basedir}/inpdir/"/>
-  <property name="test.location" value="${basedir}/testWorkDir"/>
-  <property name="driver.src" value="${basedir}/drivers"/>
-
-  <!-- Check that the necessary properties are setup -->
-  <target name="property-check">
-    <fail message="Please set the property harness.webhdfs.url to the namenode base url of the cluster"
-      unless="harness.webhdfs.url"/>
-    <fail message="Please set the property harness.templeton.url to the templeton server base url of the cluster"
-      unless="harness.templeton.url"/>
-    <fail message="Please set the property inpdir.hdfs to the test input directory on hdfs"
-      unless="inpdir.hdfs"/>
-  </target>
-
-  <!-- Prep the test area -->
-  <target name="init-test">
-    <mkdir dir="${test.location}"/>
-  </target>
-
-  <target name="test" depends="property-check, init-test" >
-    <property name="tests.to.run" value=""/>
-    <exec executable="${harness.dir}/test_harness.pl" dir="${test.location}" failonerror="true">
-      <env key="HARNESS_ROOT" value="${harness.dir}"/>
-      <env key="DRIVER_ROOT" value="${basedir}/drivers"/>
-      <env key="TH_WORKING_DIR" value="${test.location}"/>
-      <env key="TH_INPDIR_LOCAL" value="${inpdir.local}"/>
-      <env key="TH_INPDIR_HDFS" value="${inpdir.hdfs}"/>
-      <env key="TH_OUT" value="."/>
-      <env key="TH_ROOT" value="."/>
-      <env key="WEBHDFS_URL" value="${harness.webhdfs.url}"/>
-      <env key="TEMPLETON_URL" value="${harness.templeton.url}"/>
-      <env key="USER_NAME" value="${test.user.name}"/>
-      <env key="HARNESS_CONF" value="${basedir}/conf/default.conf"/>
-      <env key="SECURE_MODE" value="${secure.mode}"/>
-      <arg line="${tests.to.run}"/>
-      <arg value="${basedir}/tests/serverstatus.conf"/>
-      <arg value="${basedir}/tests/ddl.conf"/>
-      <arg value="${basedir}/tests/jobsubmission.conf"/>
-    </exec>
-  </target>
-
-  <target name="test-hcat-authorization" depends="property-check, init-test" >
-    <property name="tests.to.run" value=""/>
-    <exec executable="${harness.dir}/test_harness.pl" dir="${test.location}" failonerror="true">
-      <env key="HARNESS_ROOT" value="${harness.dir}"/>
-      <env key="DRIVER_ROOT" value="${basedir}/drivers"/>
-      <env key="TH_WORKING_DIR" value="${test.location}"/>
-      <env key="TH_INPDIR_LOCAL" value="${inpdir.local}"/>
-      <env key="TH_INPDIR_HDFS" value="${inpdir.hdfs}"/>
-      <env key="TH_OUT" value="."/>
-      <env key="TH_ROOT" value="."/>
-      <env key="WEBHDFS_URL" value="${harness.webhdfs.url}"/>
-      <env key="TEMPLETON_URL" value="${harness.templeton.url}"/>
-      <env key="USER_NAME" value="${test.user.name}"/>
-      <env key="GROUP_NAME" value="${test.group.name}"/>
-      <env key="GROUP_USER_NAME" value="${test.group.user.name}"/>
-      <env key="OTHER_USER_NAME" value="${test.other.user.name}"/>
-      <env key="HARNESS_CONF" value="${basedir}/conf/default.conf"/>
-      <env key="SECURE_MODE" value="${secure.mode}"/>
-      <env key="KEYTAB_DIR" value="${keytab.dir}"/>
-      <arg line="${tests.to.run}"/>
-      <arg value="${basedir}/tests/hcatperms.conf"/>
-    </exec>
-  </target>
+    <!-- Separate property name for udfs' build.xml -->
+    <property name="e2e.lib.dir" value="${basedir}/lib"/>
 
+    <property name="test.src" value="${basedir}/tests"/>
+    <property name="driver.src" value="${basedir}/drivers"/>
+    <property name="harness.dir" value="${basedir}/../harness"/>
+    <property name="inpdir.local" value="${basedir}/inpdir/"/>
+    <property name="test.location" value="${basedir}/testWorkDir"/>
+    <property name="driver.src" value="${basedir}/drivers"/>
+
+    <!-- Check that the necessary properties are setup -->
+    <target name="property-check">
+        <fail message="Please set the property harness.webhdfs.url to the namenode base url of the cluster"
+              unless="harness.webhdfs.url"/>
+        <fail message="Please set the property harness.templeton.url to the templeton server base url of the cluster"
+              unless="harness.templeton.url"/>
+        <fail message="Please set the property inpdir.hdfs to the test input directory on hdfs"
+              unless="inpdir.hdfs"/>
+    </target>
+
+    <!-- Prep the test area -->
+    <target name="init-test">
+        <mkdir dir="${test.location}"/>
+    </target>
+
+    <target name="test" depends="property-check, init-test">
+        <property name="tests.to.run" value=""/>
+        <exec executable="${harness.dir}/test_harness.pl" dir="${test.location}" failonerror="true">
+            <env key="HARNESS_ROOT" value="${harness.dir}"/>
+            <env key="DRIVER_ROOT" value="${basedir}/drivers"/>
+            <env key="TH_WORKING_DIR" value="${test.location}"/>
+            <env key="TH_INPDIR_LOCAL" value="${inpdir.local}"/>
+            <env key="TH_INPDIR_HDFS" value="${inpdir.hdfs}"/>
+            <env key="TH_OUT" value="."/>
+            <env key="TH_ROOT" value="."/>
+            <env key="WEBHDFS_URL" value="${harness.webhdfs.url}"/>
+            <env key="TEMPLETON_URL" value="${harness.templeton.url}"/>
+            <env key="USER_NAME" value="${test.user.name}"/>
+            <env key="HARNESS_CONF" value="${basedir}/conf/default.conf"/>
+            <env key="SECURE_MODE" value="${secure.mode}"/>
+            <arg line="${tests.to.run}"/>
+            <arg value="${basedir}/tests/serverstatus.conf"/>
+            <arg value="${basedir}/tests/ddl.conf"/>
+            <arg value="${basedir}/tests/jobsubmission.conf"/>
+        </exec>
+    </target>
+
+    <target name="test-hcat-authorization" depends="property-check, init-test">
+        <property name="tests.to.run" value=""/>
+        <exec executable="${harness.dir}/test_harness.pl" dir="${test.location}" failonerror="true">
+            <env key="HARNESS_ROOT" value="${harness.dir}"/>
+            <env key="DRIVER_ROOT" value="${basedir}/drivers"/>
+            <env key="TH_WORKING_DIR" value="${test.location}"/>
+            <env key="TH_INPDIR_LOCAL" value="${inpdir.local}"/>
+            <env key="TH_INPDIR_HDFS" value="${inpdir.hdfs}"/>
+            <env key="TH_OUT" value="."/>
+            <env key="TH_ROOT" value="."/>
+            <env key="WEBHDFS_URL" value="${harness.webhdfs.url}"/>
+            <env key="TEMPLETON_URL" value="${harness.templeton.url}"/>
+            <env key="USER_NAME" value="${test.user.name}"/>
+            <env key="GROUP_NAME" value="${test.group.name}"/>
+            <env key="GROUP_USER_NAME" value="${test.group.user.name}"/>
+            <env key="OTHER_USER_NAME" value="${test.other.user.name}"/>
+            <env key="HARNESS_CONF" value="${basedir}/conf/default.conf"/>
+            <env key="SECURE_MODE" value="${secure.mode}"/>
+            <env key="KEYTAB_DIR" value="${keytab.dir}"/>
+            <arg line="${tests.to.run}"/>
+            <arg value="${basedir}/tests/hcatperms.conf"/>
+        </exec>
+    </target>
 
 
 </project>

Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/ExitException.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/ExitException.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/ExitException.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/ExitException.java Mon Sep 10 23:28:55 2012
@@ -18,20 +18,20 @@
 
 package org.apache.hcatalog;
 
-public  class ExitException extends SecurityException {
-  private static final long serialVersionUID = -1982617086752946683L;
-  private final int status;
+public class ExitException extends SecurityException {
+    private static final long serialVersionUID = -1982617086752946683L;
+    private final int status;
 
-  /**
-   * @return the status
-   */
-  public int getStatus() {
-    return status;
-  }
+    /**
+     * @return the status
+     */
+    public int getStatus() {
+        return status;
+    }
 
-  public ExitException(int status) {
+    public ExitException(int status) {
 
-    super("Raising exception, instead of System.exit(). Return code was: "+status);
-    this.status = status;
-  }
+        super("Raising exception, instead of System.exit(). Return code was: " + status);
+        this.status = status;
+    }
 }

Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/HcatTestUtils.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/HcatTestUtils.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/HcatTestUtils.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/HcatTestUtils.java Mon Sep 10 23:28:55 2012
@@ -33,74 +33,74 @@ import org.apache.hadoop.hive.ql.metadat
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-/** 
+/**
  * Utility methods for tests
  */
 public class HcatTestUtils {
-  private static final Logger LOG = LoggerFactory.getLogger(HcatTestUtils.class);
+    private static final Logger LOG = LoggerFactory.getLogger(HcatTestUtils.class);
 
-  public static FsPermission perm007 = FsPermission.createImmutable((short) 0007); // -------rwx
-  public static FsPermission perm070 = FsPermission.createImmutable((short) 0070); // ----rwx---
-  public static FsPermission perm700 = FsPermission.createImmutable((short) 0700); // -rwx------
-  public static FsPermission perm755 = FsPermission.createImmutable((short) 0755); // -rwxr-xr-x
-  public static FsPermission perm777 = FsPermission.createImmutable((short) 0777); // -rwxrwxrwx
-  public static FsPermission perm300 = FsPermission.createImmutable((short) 0300); // --wx------
-  public static FsPermission perm500 = FsPermission.createImmutable((short) 0500); // -r-x------
-  public static FsPermission perm555 = FsPermission.createImmutable((short) 0555); // -r-xr-xr-x
-  
-  /** 
-   * Returns the database path.
-   */
-  public static Path getDbPath(Hive hive, Warehouse wh, String dbName) throws MetaException, HiveException {
-    return wh.getDatabasePath(hive.getDatabase(dbName)); 
-  }
-  
-  /** 
-   * Removes all databases and tables from the metastore
-   */
-  public static void cleanupHMS(Hive hive, Warehouse wh, FsPermission defaultPerm) 
-      throws HiveException, MetaException, NoSuchObjectException {
-    for (String dbName : hive.getAllDatabases()) {
-      if (dbName.equals("default")) {
-        continue;
-      }
-      try {
-        Path path = getDbPath(hive, wh, dbName);
-        FileSystem whFs = path.getFileSystem(hive.getConf());
-        whFs.setPermission(path, defaultPerm);
-      } catch(IOException ex) {
-        //ignore
-      }
-      hive.dropDatabase(dbName, true, true, true);
-    }
-    
-    //clean tables in default db
-    for (String tablename : hive.getAllTables("default")) {
-      hive.dropTable("default", tablename, true, true);
+    public static FsPermission perm007 = FsPermission.createImmutable((short) 0007); // -------rwx
+    public static FsPermission perm070 = FsPermission.createImmutable((short) 0070); // ----rwx---
+    public static FsPermission perm700 = FsPermission.createImmutable((short) 0700); // -rwx------
+    public static FsPermission perm755 = FsPermission.createImmutable((short) 0755); // -rwxr-xr-x
+    public static FsPermission perm777 = FsPermission.createImmutable((short) 0777); // -rwxrwxrwx
+    public static FsPermission perm300 = FsPermission.createImmutable((short) 0300); // --wx------
+    public static FsPermission perm500 = FsPermission.createImmutable((short) 0500); // -r-x------
+    public static FsPermission perm555 = FsPermission.createImmutable((short) 0555); // -r-xr-xr-x
+
+    /**
+     * Returns the database path.
+     */
+    public static Path getDbPath(Hive hive, Warehouse wh, String dbName) throws MetaException, HiveException {
+        return wh.getDatabasePath(hive.getDatabase(dbName));
     }
-  }
 
-  public static void createTestDataFile(String filename, String[] lines) throws IOException {
-    FileWriter writer = null;
-    try {
-      File file = new File(filename);
-      file.deleteOnExit();
-      writer = new FileWriter(file);
-      for (String line : lines) {
-        writer.write(line + "\n");
-      }
-    } finally {
-      if (writer != null) {
-        writer.close();
-      }
+    /**
+     * Removes all databases and tables from the metastore
+     */
+    public static void cleanupHMS(Hive hive, Warehouse wh, FsPermission defaultPerm)
+        throws HiveException, MetaException, NoSuchObjectException {
+        for (String dbName : hive.getAllDatabases()) {
+            if (dbName.equals("default")) {
+                continue;
+            }
+            try {
+                Path path = getDbPath(hive, wh, dbName);
+                FileSystem whFs = path.getFileSystem(hive.getConf());
+                whFs.setPermission(path, defaultPerm);
+            } catch (IOException ex) {
+                //ignore
+            }
+            hive.dropDatabase(dbName, true, true, true);
+        }
+
+        //clean tables in default db
+        for (String tablename : hive.getAllTables("default")) {
+            hive.dropTable("default", tablename, true, true);
+        }
     }
 
-  }
+    public static void createTestDataFile(String filename, String[] lines) throws IOException {
+        FileWriter writer = null;
+        try {
+            File file = new File(filename);
+            file.deleteOnExit();
+            writer = new FileWriter(file);
+            for (String line : lines) {
+                writer.write(line + "\n");
+            }
+        } finally {
+            if (writer != null) {
+                writer.close();
+            }
+        }
 
-  public static boolean isHadoop23() {
-      String version = org.apache.hadoop.util.VersionInfo.getVersion();
-      if (version.matches("\\b0\\.23\\..+\\b"))
-          return true;
-      return false;
-  }
+    }
+
+    public static boolean isHadoop23() {
+        String version = org.apache.hadoop.util.VersionInfo.getVersion();
+        if (version.matches("\\b0\\.23\\..+\\b"))
+            return true;
+        return false;
+    }
 }

Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/MiniCluster.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/MiniCluster.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/MiniCluster.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/MiniCluster.java Mon Sep 10 23:28:55 2012
@@ -42,152 +42,159 @@ import org.apache.hadoop.mapred.MiniMRCl
  * environment for Pig to run on top of the mini cluster.
  */
 public class MiniCluster {
-  private MiniDFSCluster m_dfs = null;
-  private MiniMRCluster m_mr = null;
-  private FileSystem m_fileSys = null;
-  private JobConf m_conf = null;
-
-  private final static MiniCluster INSTANCE = new MiniCluster();
-  private static boolean isSetup = true;
-
-  private MiniCluster() {
-    setupMiniDfsAndMrClusters();
-  }
-
-  private void setupMiniDfsAndMrClusters() {
-    try {
-      final int dataNodes = 1;     // There will be 4 data nodes
-      final int taskTrackers = 1;  // There will be 4 task tracker nodes
-      Configuration config = new Configuration();
-
-      // Builds and starts the mini dfs and mapreduce clusters
-      System.setProperty("hadoop.log.dir", ".");
-      m_dfs = new MiniDFSCluster(config, dataNodes, true, null);
-
-      m_fileSys = m_dfs.getFileSystem();
-      m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1);
-
-      // Create the configuration hadoop-site.xml file
-      File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/");
-      conf_dir.mkdirs();
-      File conf_file = new File(conf_dir, "hadoop-site.xml");
-
-      // Write the necessary config info to hadoop-site.xml
-      m_conf = m_mr.createJobConf();
-      m_conf.setInt("mapred.submit.replication", 1);
-      m_conf.set("dfs.datanode.address", "0.0.0.0:0");
-      m_conf.set("dfs.datanode.http.address", "0.0.0.0:0");
-      m_conf.writeXml(new FileOutputStream(conf_file));
-
-      // Set the system properties needed by Pig
-      System.setProperty("cluster", m_conf.get("mapred.job.tracker"));
-      System.setProperty("namenode", m_conf.get("fs.default.name"));
-      System.setProperty("junit.hadoop.conf", conf_dir.getPath());
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  /**
-   * Returns the single instance of class MiniClusterBuilder that
-   * represents the resouces for a mini dfs cluster and a mini
-   * mapreduce cluster.
-   */
-  public static MiniCluster buildCluster() {
-    if(! isSetup){
-      INSTANCE.setupMiniDfsAndMrClusters();
-      isSetup = true;
-    }
-    return INSTANCE;
-  }
-
-  public void shutDown(){
-    INSTANCE.shutdownMiniDfsAndMrClusters();
-  }
-
-  @Override
-  protected void finalize() {
-    shutdownMiniDfsAndMrClusters();
-  }
-
-  private void shutdownMiniDfsAndMrClusters() {
-    isSetup = false;
-    try {
-      if (m_fileSys != null) { m_fileSys.close(); }
-    } catch (IOException e) {
-      e.printStackTrace();
-    }
-    if (m_dfs != null) { m_dfs.shutdown(); }
-    if (m_mr != null) { m_mr.shutdown(); }
-    m_fileSys = null;
-    m_dfs = null;
-    m_mr = null;
-  }
-
-  public Properties getProperties() {
-    errorIfNotSetup();
-    Properties properties = new Properties();
-    assert m_conf != null;
-    Iterator<Map.Entry<String, String>> iter = m_conf.iterator();
-    while (iter.hasNext()) {
-        Map.Entry<String, String> entry = iter.next();
-        properties.put(entry.getKey(), entry.getValue());
-    }
-    return properties;
-  }
-
-  public void setProperty(String name, String value) {
-    errorIfNotSetup();
-    m_conf.set(name, value);
-  }
-
-  public FileSystem getFileSystem() {
-    errorIfNotSetup();
-    return m_fileSys;
-  }
-
-  /**
-   * Throw RunTimeException if isSetup is false
-   */
-   private void errorIfNotSetup(){
-     if(isSetup) {
-       return;
-     }
-     String msg = "function called on MiniCluster that has been shutdown";
-     throw new RuntimeException(msg);
-   }
-
-   static public void createInputFile(MiniCluster miniCluster, String fileName,
-       String[] inputData)
-   throws IOException {
-     FileSystem fs = miniCluster.getFileSystem();
-     createInputFile(fs, fileName, inputData);
-   }
-
-   static public void createInputFile(FileSystem fs, String fileName,
-       String[] inputData) throws IOException {
-     Path path = new Path(fileName);
-     if(fs.exists(path)) {
-       throw new IOException("File " + fileName + " already exists on the minicluster");
-     }
-     FSDataOutputStream stream = fs.create(path);
-     PrintWriter pw = new PrintWriter(new OutputStreamWriter(stream, "UTF-8"));
-     for (int i=0; i<inputData.length; i++){
-       pw.println(inputData[i]);
-     }
-     pw.close();
-
-   }
-   /**
-    * Helper to remove a dfs file from the minicluster DFS
-    *
-    * @param miniCluster reference to the Minicluster where the file should be deleted
-    * @param fileName pathname of the file to be deleted
-    * @throws IOException
-    */
-   static public void deleteFile(MiniCluster miniCluster, String fileName)
-   throws IOException {
-     FileSystem fs = miniCluster.getFileSystem();
-     fs.delete(new Path(fileName), true);
-   }
+    private MiniDFSCluster m_dfs = null;
+    private MiniMRCluster m_mr = null;
+    private FileSystem m_fileSys = null;
+    private JobConf m_conf = null;
+
+    private final static MiniCluster INSTANCE = new MiniCluster();
+    private static boolean isSetup = true;
+
+    private MiniCluster() {
+        setupMiniDfsAndMrClusters();
+    }
+
+    private void setupMiniDfsAndMrClusters() {
+        try {
+            final int dataNodes = 1;     // There will be 4 data nodes
+            final int taskTrackers = 1;  // There will be 4 task tracker nodes
+            Configuration config = new Configuration();
+
+            // Builds and starts the mini dfs and mapreduce clusters
+            System.setProperty("hadoop.log.dir", ".");
+            m_dfs = new MiniDFSCluster(config, dataNodes, true, null);
+
+            m_fileSys = m_dfs.getFileSystem();
+            m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1);
+
+            // Create the configuration hadoop-site.xml file
+            File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/");
+            conf_dir.mkdirs();
+            File conf_file = new File(conf_dir, "hadoop-site.xml");
+
+            // Write the necessary config info to hadoop-site.xml
+            m_conf = m_mr.createJobConf();
+            m_conf.setInt("mapred.submit.replication", 1);
+            m_conf.set("dfs.datanode.address", "0.0.0.0:0");
+            m_conf.set("dfs.datanode.http.address", "0.0.0.0:0");
+            m_conf.writeXml(new FileOutputStream(conf_file));
+
+            // Set the system properties needed by Pig
+            System.setProperty("cluster", m_conf.get("mapred.job.tracker"));
+            System.setProperty("namenode", m_conf.get("fs.default.name"));
+            System.setProperty("junit.hadoop.conf", conf_dir.getPath());
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    /**
+     * Returns the single instance of class MiniClusterBuilder that
+     * represents the resouces for a mini dfs cluster and a mini
+     * mapreduce cluster.
+     */
+    public static MiniCluster buildCluster() {
+        if (!isSetup) {
+            INSTANCE.setupMiniDfsAndMrClusters();
+            isSetup = true;
+        }
+        return INSTANCE;
+    }
+
+    public void shutDown() {
+        INSTANCE.shutdownMiniDfsAndMrClusters();
+    }
+
+    @Override
+    protected void finalize() {
+        shutdownMiniDfsAndMrClusters();
+    }
+
+    private void shutdownMiniDfsAndMrClusters() {
+        isSetup = false;
+        try {
+            if (m_fileSys != null) {
+                m_fileSys.close();
+            }
+        } catch (IOException e) {
+            e.printStackTrace();
+        }
+        if (m_dfs != null) {
+            m_dfs.shutdown();
+        }
+        if (m_mr != null) {
+            m_mr.shutdown();
+        }
+        m_fileSys = null;
+        m_dfs = null;
+        m_mr = null;
+    }
+
+    public Properties getProperties() {
+        errorIfNotSetup();
+        Properties properties = new Properties();
+        assert m_conf != null;
+        Iterator<Map.Entry<String, String>> iter = m_conf.iterator();
+        while (iter.hasNext()) {
+            Map.Entry<String, String> entry = iter.next();
+            properties.put(entry.getKey(), entry.getValue());
+        }
+        return properties;
+    }
+
+    public void setProperty(String name, String value) {
+        errorIfNotSetup();
+        m_conf.set(name, value);
+    }
+
+    public FileSystem getFileSystem() {
+        errorIfNotSetup();
+        return m_fileSys;
+    }
+
+    /**
+     * Throw RunTimeException if isSetup is false
+     */
+    private void errorIfNotSetup() {
+        if (isSetup) {
+            return;
+        }
+        String msg = "function called on MiniCluster that has been shutdown";
+        throw new RuntimeException(msg);
+    }
+
+    static public void createInputFile(MiniCluster miniCluster, String fileName,
+                                       String[] inputData)
+        throws IOException {
+        FileSystem fs = miniCluster.getFileSystem();
+        createInputFile(fs, fileName, inputData);
+    }
+
+    static public void createInputFile(FileSystem fs, String fileName,
+                                       String[] inputData) throws IOException {
+        Path path = new Path(fileName);
+        if (fs.exists(path)) {
+            throw new IOException("File " + fileName + " already exists on the minicluster");
+        }
+        FSDataOutputStream stream = fs.create(path);
+        PrintWriter pw = new PrintWriter(new OutputStreamWriter(stream, "UTF-8"));
+        for (int i = 0; i < inputData.length; i++) {
+            pw.println(inputData[i]);
+        }
+        pw.close();
+
+    }
+
+    /**
+     * Helper to remove a dfs file from the minicluster DFS
+     *
+     * @param miniCluster reference to the Minicluster where the file should be deleted
+     * @param fileName pathname of the file to be deleted
+     * @throws IOException
+     */
+    static public void deleteFile(MiniCluster miniCluster, String fileName)
+        throws IOException {
+        FileSystem fs = miniCluster.getFileSystem();
+        fs.delete(new Path(fileName), true);
+    }
 }



Mime
View raw message