incubator-hcatalog-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From tra...@apache.org
Subject svn commit: r1383152 [19/27] - in /incubator/hcatalog/trunk: ./ hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/ hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/drivers/ hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/ ...
Date Mon, 10 Sep 2012 23:29:03 GMT
Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatOutputFormat.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatOutputFormat.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatOutputFormat.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatOutputFormat.java Mon Sep 10 23:28:55 2012
@@ -46,120 +46,122 @@ import org.slf4j.LoggerFactory;
 
 public class TestHCatOutputFormat extends TestCase {
 
-  private static final Logger LOG = LoggerFactory.getLogger(TestHCatOutputFormat.class);
-  private HiveMetaStoreClient client;
-  private HiveConf hiveConf;
-
-  private static final String dbName = "hcatOutputFormatTestDB";
-  private static final String tblName = "hcatOutputFormatTestTable";
-
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
-    hiveConf = new HiveConf(this.getClass());
-
-    try {
-      client = new HiveMetaStoreClient(hiveConf, null);
-
-      initTable();
-    } catch (Throwable e) {
-      LOG.error("Unable to open the metastore", e);
-      throw new Exception(e);
+    private static final Logger LOG = LoggerFactory.getLogger(TestHCatOutputFormat.class);
+    private HiveMetaStoreClient client;
+    private HiveConf hiveConf;
+
+    private static final String dbName = "hcatOutputFormatTestDB";
+    private static final String tblName = "hcatOutputFormatTestTable";
+
+    @Override
+    protected void setUp() throws Exception {
+        super.setUp();
+        hiveConf = new HiveConf(this.getClass());
+
+        try {
+            client = new HiveMetaStoreClient(hiveConf, null);
+
+            initTable();
+        } catch (Throwable e) {
+            LOG.error("Unable to open the metastore", e);
+            throw new Exception(e);
+        }
     }
-  }
 
-  @Override
-  protected void tearDown() throws Exception {
-    try {
-      super.tearDown();
-      client.dropTable(dbName, tblName);
-      client.dropDatabase(dbName);
-
-      client.close();
-    } catch (Throwable e) {
-        LOG.error("Unable to close metastore", e);
-      throw new Exception(e);
+    @Override
+    protected void tearDown() throws Exception {
+        try {
+            super.tearDown();
+            client.dropTable(dbName, tblName);
+            client.dropDatabase(dbName);
+
+            client.close();
+        } catch (Throwable e) {
+            LOG.error("Unable to close metastore", e);
+            throw new Exception(e);
+        }
     }
-  }
 
-  private void initTable() throws Exception {
+    private void initTable() throws Exception {
 
-    try {
-      client.dropTable(dbName, tblName);
-    } catch(Exception e) {}
-    try {
-      client.dropDatabase(dbName);
-    } catch(Exception e) {}
-    client.createDatabase(new Database(dbName, "", null,null));
-    assertNotNull((client.getDatabase(dbName).getLocationUri()));
-
-    List<FieldSchema> fields = new ArrayList<FieldSchema>();
-    fields.add(new FieldSchema("colname", Constants.STRING_TYPE_NAME, ""));
-
-    Table tbl = new Table();
-    tbl.setDbName(dbName);
-    tbl.setTableName(tblName);
-    StorageDescriptor sd = new StorageDescriptor();
-    sd.setCols(fields);
-    tbl.setSd(sd);
-
-    //sd.setLocation("hdfs://tmp");
-    sd.setInputFormat(RCFileInputFormat.class.getName());
-    sd.setOutputFormat(RCFileOutputFormat.class.getName());
-    sd.setParameters(new HashMap<String, String>());
-    sd.getParameters().put("test_param_1", "Use this for comments etc");
-    //sd.setBucketCols(new ArrayList<String>(2));
-    //sd.getBucketCols().add("name");
-    sd.setSerdeInfo(new SerDeInfo());
-    sd.getSerdeInfo().setName(tbl.getTableName());
-    sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-    sd.getSerdeInfo().getParameters().put(
-        org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
-    sd.getSerdeInfo().setSerializationLib(
-        org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
-    tbl.setPartitionKeys(fields);
-
-    Map<String, String> tableParams = new HashMap<String, String>();
-    tableParams.put("hcat.testarg", "testArgValue");
-
-    tbl.setParameters(tableParams);
-
-    client.createTable(tbl);
-    Path tblPath = new Path(client.getTable(dbName, tblName).getSd().getLocation());
-    assertTrue(tblPath.getFileSystem(hiveConf).mkdirs(new Path(tblPath,"colname=p1")));
-
-  }
-
-  public void testSetOutput() throws Exception {
-    Configuration conf = new Configuration();
-    Job job = new Job(conf, "test outputformat");
-
-    Map<String, String> partitionValues = new HashMap<String, String>();
-    partitionValues.put("colname", "p1");
-    //null server url means local mode
-    OutputJobInfo info = OutputJobInfo.create(dbName, tblName, partitionValues);
-
-    HCatOutputFormat.setOutput(job, info);
-    OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(job);
-
-    assertNotNull(jobInfo.getTableInfo());
-    assertEquals(1, jobInfo.getPartitionValues().size());
-    assertEquals("p1", jobInfo.getPartitionValues().get("colname"));
-    assertEquals(1, jobInfo.getTableInfo().getDataColumns().getFields().size());
-    assertEquals("colname", jobInfo.getTableInfo().getDataColumns().getFields().get(0).getName());
-
-    publishTest(job);
-  }
-
-  public void publishTest(Job job) throws Exception {
-    OutputCommitter committer = new FileOutputCommitterContainer(job,null);
-    committer.cleanupJob(job);
-
-    Partition part = client.getPartition(dbName, tblName, Arrays.asList("p1"));
-    assertNotNull(part);
-
-    StorerInfo storer = InternalUtil.extractStorerInfo(part.getSd(),part.getParameters());
-    assertEquals(storer.getProperties().get("hcat.testarg"), "testArgValue");
-    assertTrue(part.getSd().getLocation().indexOf("p1") != -1);
-  }
+        try {
+            client.dropTable(dbName, tblName);
+        } catch (Exception e) {
+        }
+        try {
+            client.dropDatabase(dbName);
+        } catch (Exception e) {
+        }
+        client.createDatabase(new Database(dbName, "", null, null));
+        assertNotNull((client.getDatabase(dbName).getLocationUri()));
+
+        List<FieldSchema> fields = new ArrayList<FieldSchema>();
+        fields.add(new FieldSchema("colname", Constants.STRING_TYPE_NAME, ""));
+
+        Table tbl = new Table();
+        tbl.setDbName(dbName);
+        tbl.setTableName(tblName);
+        StorageDescriptor sd = new StorageDescriptor();
+        sd.setCols(fields);
+        tbl.setSd(sd);
+
+        //sd.setLocation("hdfs://tmp");
+        sd.setInputFormat(RCFileInputFormat.class.getName());
+        sd.setOutputFormat(RCFileOutputFormat.class.getName());
+        sd.setParameters(new HashMap<String, String>());
+        sd.getParameters().put("test_param_1", "Use this for comments etc");
+        //sd.setBucketCols(new ArrayList<String>(2));
+        //sd.getBucketCols().add("name");
+        sd.setSerdeInfo(new SerDeInfo());
+        sd.getSerdeInfo().setName(tbl.getTableName());
+        sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+        sd.getSerdeInfo().getParameters().put(
+                org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
+        sd.getSerdeInfo().setSerializationLib(
+                org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
+        tbl.setPartitionKeys(fields);
+
+        Map<String, String> tableParams = new HashMap<String, String>();
+        tableParams.put("hcat.testarg", "testArgValue");
+
+        tbl.setParameters(tableParams);
+
+        client.createTable(tbl);
+        Path tblPath = new Path(client.getTable(dbName, tblName).getSd().getLocation());
+        assertTrue(tblPath.getFileSystem(hiveConf).mkdirs(new Path(tblPath, "colname=p1")));
+
+    }
+
+    public void testSetOutput() throws Exception {
+        Configuration conf = new Configuration();
+        Job job = new Job(conf, "test outputformat");
+
+        Map<String, String> partitionValues = new HashMap<String, String>();
+        partitionValues.put("colname", "p1");
+        //null server url means local mode
+        OutputJobInfo info = OutputJobInfo.create(dbName, tblName, partitionValues);
+
+        HCatOutputFormat.setOutput(job, info);
+        OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(job);
+
+        assertNotNull(jobInfo.getTableInfo());
+        assertEquals(1, jobInfo.getPartitionValues().size());
+        assertEquals("p1", jobInfo.getPartitionValues().get("colname"));
+        assertEquals(1, jobInfo.getTableInfo().getDataColumns().getFields().size());
+        assertEquals("colname", jobInfo.getTableInfo().getDataColumns().getFields().get(0).getName());
+
+        publishTest(job);
+    }
+
+    public void publishTest(Job job) throws Exception {
+        OutputCommitter committer = new FileOutputCommitterContainer(job, null);
+        committer.cleanupJob(job);
+
+        Partition part = client.getPartition(dbName, tblName, Arrays.asList("p1"));
+        assertNotNull(part);
+
+        StorerInfo storer = InternalUtil.extractStorerInfo(part.getSd(), part.getParameters());
+        assertEquals(storer.getProperties().get("hcat.testarg"), "testArgValue");
+        assertTrue(part.getSd().getLocation().indexOf("p1") != -1);
+    }
 }

Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatPartitioned.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatPartitioned.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatPartitioned.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatPartitioned.java Mon Sep 10 23:28:55 2012
@@ -36,309 +36,309 @@ import org.apache.hcatalog.data.schema.H
 
 public class TestHCatPartitioned extends HCatMapReduceTest {
 
-  private List<HCatRecord> writeRecords;
-  private List<HCatFieldSchema> partitionColumns;
+    private List<HCatRecord> writeRecords;
+    private List<HCatFieldSchema> partitionColumns;
 
-  @Override
-  protected void initialize() throws Exception {
+    @Override
+    protected void initialize() throws Exception {
 
-    tableName = "testHCatPartitionedTable";
-    writeRecords = new ArrayList<HCatRecord>();
+        tableName = "testHCatPartitionedTable";
+        writeRecords = new ArrayList<HCatRecord>();
 
-    for(int i = 0;i < 20;i++) {
-      List<Object> objList = new ArrayList<Object>();
+        for (int i = 0; i < 20; i++) {
+            List<Object> objList = new ArrayList<Object>();
 
-      objList.add(i);
-      objList.add("strvalue" + i);
-      writeRecords.add(new DefaultHCatRecord(objList));
-    }
+            objList.add(i);
+            objList.add("strvalue" + i);
+            writeRecords.add(new DefaultHCatRecord(objList));
+        }
 
-    partitionColumns = new ArrayList<HCatFieldSchema>();
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
-  }
-
-
-  @Override
-  protected List<FieldSchema> getPartitionKeys() {
-    List<FieldSchema> fields = new ArrayList<FieldSchema>();
-    //Defining partition names in unsorted order
-    fields.add(new FieldSchema("PaRT1", Constants.STRING_TYPE_NAME, ""));
-    fields.add(new FieldSchema("part0", Constants.STRING_TYPE_NAME, ""));
-    return fields;
-  }
-
-  @Override
-  protected List<FieldSchema> getTableColumns() {
-    List<FieldSchema> fields = new ArrayList<FieldSchema>();
-    fields.add(new FieldSchema("c1", Constants.INT_TYPE_NAME, ""));
-    fields.add(new FieldSchema("c2", Constants.STRING_TYPE_NAME, ""));
-    return fields;
-  }
-
-
-  public void testHCatPartitionedTable() throws Exception {
-
-    Map<String, String> partitionMap = new HashMap<String, String>();
-    partitionMap.put("part1", "p1value1");
-    partitionMap.put("part0", "p0value1");
-
-    runMRCreate(partitionMap, partitionColumns, writeRecords, 10,true);
-
-    partitionMap.clear();
-    partitionMap.put("PART1", "p1value2");
-    partitionMap.put("PART0", "p0value2");
-
-    runMRCreate(partitionMap, partitionColumns, writeRecords, 20,true);
-
-    //Test for duplicate publish
-    IOException exc = null;
-    try {
-      runMRCreate(partitionMap, partitionColumns, writeRecords, 20,true);
-    } catch(IOException e) {
-      exc = e;
+        partitionColumns = new ArrayList<HCatFieldSchema>();
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
     }
 
-    assertTrue(exc != null);
-    assertTrue(exc instanceof HCatException);
-    assertEquals(ErrorType.ERROR_DUPLICATE_PARTITION, ((HCatException) exc).getErrorType());
-
-    //Test for publish with invalid partition key name
-    exc = null;
-    partitionMap.clear();
-    partitionMap.put("px1", "p1value2");
-    partitionMap.put("px0", "p0value2");
-
-    try {
-      runMRCreate(partitionMap, partitionColumns, writeRecords, 20,true);
-    } catch(IOException e) {
-      exc = e;
+
+    @Override
+    protected List<FieldSchema> getPartitionKeys() {
+        List<FieldSchema> fields = new ArrayList<FieldSchema>();
+        //Defining partition names in unsorted order
+        fields.add(new FieldSchema("PaRT1", Constants.STRING_TYPE_NAME, ""));
+        fields.add(new FieldSchema("part0", Constants.STRING_TYPE_NAME, ""));
+        return fields;
     }
 
-    assertTrue(exc != null);
-    assertTrue(exc instanceof HCatException);
-    assertEquals(ErrorType.ERROR_MISSING_PARTITION_KEY, ((HCatException) exc).getErrorType());
-
-    //Test for publish with missing partition key values
-    exc = null;
-    partitionMap.clear();
-    partitionMap.put("px", "p1value2");
-
-    try {
-      runMRCreate(partitionMap, partitionColumns, writeRecords, 20,true);
-    } catch(IOException e) {
-      exc = e;
+    @Override
+    protected List<FieldSchema> getTableColumns() {
+        List<FieldSchema> fields = new ArrayList<FieldSchema>();
+        fields.add(new FieldSchema("c1", Constants.INT_TYPE_NAME, ""));
+        fields.add(new FieldSchema("c2", Constants.STRING_TYPE_NAME, ""));
+        return fields;
     }
 
-    assertTrue(exc != null);
-    assertTrue(exc instanceof HCatException);
-    assertEquals(ErrorType.ERROR_INVALID_PARTITION_VALUES, ((HCatException) exc).getErrorType());
 
+    public void testHCatPartitionedTable() throws Exception {
 
-    //Test for null partition value map
-    exc = null;
-    try {
-      runMRCreate(null, partitionColumns, writeRecords, 20,false);
-    } catch(IOException e) {
-      exc = e;
-    }
+        Map<String, String> partitionMap = new HashMap<String, String>();
+        partitionMap.put("part1", "p1value1");
+        partitionMap.put("part0", "p0value1");
+
+        runMRCreate(partitionMap, partitionColumns, writeRecords, 10, true);
+
+        partitionMap.clear();
+        partitionMap.put("PART1", "p1value2");
+        partitionMap.put("PART0", "p0value2");
+
+        runMRCreate(partitionMap, partitionColumns, writeRecords, 20, true);
+
+        //Test for duplicate publish
+        IOException exc = null;
+        try {
+            runMRCreate(partitionMap, partitionColumns, writeRecords, 20, true);
+        } catch (IOException e) {
+            exc = e;
+        }
+
+        assertTrue(exc != null);
+        assertTrue(exc instanceof HCatException);
+        assertEquals(ErrorType.ERROR_DUPLICATE_PARTITION, ((HCatException) exc).getErrorType());
+
+        //Test for publish with invalid partition key name
+        exc = null;
+        partitionMap.clear();
+        partitionMap.put("px1", "p1value2");
+        partitionMap.put("px0", "p0value2");
 
-    assertTrue(exc == null);
+        try {
+            runMRCreate(partitionMap, partitionColumns, writeRecords, 20, true);
+        } catch (IOException e) {
+            exc = e;
+        }
+
+        assertTrue(exc != null);
+        assertTrue(exc instanceof HCatException);
+        assertEquals(ErrorType.ERROR_MISSING_PARTITION_KEY, ((HCatException) exc).getErrorType());
+
+        //Test for publish with missing partition key values
+        exc = null;
+        partitionMap.clear();
+        partitionMap.put("px", "p1value2");
+
+        try {
+            runMRCreate(partitionMap, partitionColumns, writeRecords, 20, true);
+        } catch (IOException e) {
+            exc = e;
+        }
+
+        assertTrue(exc != null);
+        assertTrue(exc instanceof HCatException);
+        assertEquals(ErrorType.ERROR_INVALID_PARTITION_VALUES, ((HCatException) exc).getErrorType());
+
+
+        //Test for null partition value map
+        exc = null;
+        try {
+            runMRCreate(null, partitionColumns, writeRecords, 20, false);
+        } catch (IOException e) {
+            exc = e;
+        }
+
+        assertTrue(exc == null);
 //    assertTrue(exc instanceof HCatException);
 //    assertEquals(ErrorType.ERROR_PUBLISHING_PARTITION, ((HCatException) exc).getErrorType());
-    // With Dynamic partitioning, this isn't an error that the keyValues specified didn't values
+        // With Dynamic partitioning, this isn't an error that the keyValues specified didn't values
 
-    //Read should get 10 + 20 rows
-    runMRRead(30);
+        //Read should get 10 + 20 rows
+        runMRRead(30);
 
-    //Read with partition filter
-    runMRRead(10, "part1 = \"p1value1\"");
-    runMRRead(20, "part1 = \"p1value2\"");
-    runMRRead(30, "part1 = \"p1value1\" or part1 = \"p1value2\"");
-    runMRRead(10, "part0 = \"p0value1\"");
-    runMRRead(20, "part0 = \"p0value2\"");
-    runMRRead(30, "part0 = \"p0value1\" or part0 = \"p0value2\"");
+        //Read with partition filter
+        runMRRead(10, "part1 = \"p1value1\"");
+        runMRRead(20, "part1 = \"p1value2\"");
+        runMRRead(30, "part1 = \"p1value1\" or part1 = \"p1value2\"");
+        runMRRead(10, "part0 = \"p0value1\"");
+        runMRRead(20, "part0 = \"p0value2\"");
+        runMRRead(30, "part0 = \"p0value1\" or part0 = \"p0value2\"");
 
-    tableSchemaTest();
-    columnOrderChangeTest();
-    hiveReadTest();
-  }
+        tableSchemaTest();
+        columnOrderChangeTest();
+        hiveReadTest();
+    }
 
 
-  //test that new columns gets added to table schema
-  private void tableSchemaTest() throws Exception {
+    //test that new columns gets added to table schema
+    private void tableSchemaTest() throws Exception {
 
-    HCatSchema tableSchema = getTableSchema();
+        HCatSchema tableSchema = getTableSchema();
 
-    assertEquals(4, tableSchema.getFields().size());
+        assertEquals(4, tableSchema.getFields().size());
 
-    //Update partition schema to have 3 fields
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c3", Constants.STRING_TYPE_NAME, "")));
+        //Update partition schema to have 3 fields
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c3", Constants.STRING_TYPE_NAME, "")));
 
-    writeRecords = new ArrayList<HCatRecord>();
+        writeRecords = new ArrayList<HCatRecord>();
 
-    for(int i = 0;i < 20;i++) {
-      List<Object> objList = new ArrayList<Object>();
+        for (int i = 0; i < 20; i++) {
+            List<Object> objList = new ArrayList<Object>();
 
-      objList.add(i);
-      objList.add("strvalue" + i);
-      objList.add("str2value" + i);
+            objList.add(i);
+            objList.add("strvalue" + i);
+            objList.add("str2value" + i);
 
-      writeRecords.add(new DefaultHCatRecord(objList));
-    }
+            writeRecords.add(new DefaultHCatRecord(objList));
+        }
 
-    Map<String, String> partitionMap = new HashMap<String, String>();
-    partitionMap.put("part1", "p1value5");
-    partitionMap.put("part0", "p0value5");
-
-    runMRCreate(partitionMap, partitionColumns, writeRecords, 10,true);
-
-    tableSchema = getTableSchema();
-
-    //assert that c3 has got added to table schema
-    assertEquals(5, tableSchema.getFields().size());
-    assertEquals("c1", tableSchema.getFields().get(0).getName());
-    assertEquals("c2", tableSchema.getFields().get(1).getName());
-    assertEquals("c3", tableSchema.getFields().get(2).getName());
-    assertEquals("part1", tableSchema.getFields().get(3).getName());
-    assertEquals("part0", tableSchema.getFields().get(4).getName());
-
-    //Test that changing column data type fails
-    partitionMap.clear();
-    partitionMap.put("part1", "p1value6");
-    partitionMap.put("part0", "p0value6");
-
-    partitionColumns = new ArrayList<HCatFieldSchema>();
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.INT_TYPE_NAME, "")));
-
-    IOException exc = null;
-    try {
-      runMRCreate(partitionMap, partitionColumns, writeRecords, 20,true);
-    } catch(IOException e) {
-      exc = e;
-    }
+        Map<String, String> partitionMap = new HashMap<String, String>();
+        partitionMap.put("part1", "p1value5");
+        partitionMap.put("part0", "p0value5");
 
-    assertTrue(exc != null);
-    assertTrue(exc instanceof HCatException);
-    assertEquals(ErrorType.ERROR_SCHEMA_TYPE_MISMATCH, ((HCatException) exc).getErrorType());
-
-    //Test that partition key is not allowed in data
-    partitionColumns = new ArrayList<HCatFieldSchema>();
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c3", Constants.STRING_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("part1", Constants.STRING_TYPE_NAME, "")));
-
-    List<HCatRecord> recordsContainingPartitionCols = new ArrayList<HCatRecord>(20);
-    for(int i = 0;i < 20;i++) {
-      List<Object> objList = new ArrayList<Object>();
-
-      objList.add(i);
-      objList.add("c2value" + i);
-      objList.add("c3value" + i);
-      objList.add("p1value6");
+        runMRCreate(partitionMap, partitionColumns, writeRecords, 10, true);
 
-      recordsContainingPartitionCols.add(new DefaultHCatRecord(objList));
-    }
+        tableSchema = getTableSchema();
 
-    exc = null;
-    try {
-      runMRCreate(partitionMap, partitionColumns, recordsContainingPartitionCols, 20,true);
-    } catch(IOException e) {
-      exc = e;
-    }
+        //assert that c3 has got added to table schema
+        assertEquals(5, tableSchema.getFields().size());
+        assertEquals("c1", tableSchema.getFields().get(0).getName());
+        assertEquals("c2", tableSchema.getFields().get(1).getName());
+        assertEquals("c3", tableSchema.getFields().get(2).getName());
+        assertEquals("part1", tableSchema.getFields().get(3).getName());
+        assertEquals("part0", tableSchema.getFields().get(4).getName());
 
-    List<HCatRecord> records= runMRRead(20,"part1 = \"p1value6\"");
-    assertEquals(20, records.size());
-    records= runMRRead(20,"part0 = \"p0value6\"");
-    assertEquals(20, records.size());
-    Integer i =0;
-    for(HCatRecord rec : records){
-      assertEquals(5, rec.size());
-      assertTrue(rec.get(0).equals(i));
-      assertTrue(rec.get(1).equals("c2value"+i));
-      assertTrue(rec.get(2).equals("c3value"+i));
-      assertTrue(rec.get(3).equals("p1value6"));
-      assertTrue(rec.get(4).equals("p0value6"));
-      i++;
-    }
-  }
+        //Test that changing column data type fails
+        partitionMap.clear();
+        partitionMap.put("part1", "p1value6");
+        partitionMap.put("part0", "p0value6");
 
-  //check behavior while change the order of columns
-  private void columnOrderChangeTest() throws Exception {
+        partitionColumns = new ArrayList<HCatFieldSchema>();
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.INT_TYPE_NAME, "")));
 
-    HCatSchema tableSchema = getTableSchema();
+        IOException exc = null;
+        try {
+            runMRCreate(partitionMap, partitionColumns, writeRecords, 20, true);
+        } catch (IOException e) {
+            exc = e;
+        }
 
-    assertEquals(5, tableSchema.getFields().size());
+        assertTrue(exc != null);
+        assertTrue(exc instanceof HCatException);
+        assertEquals(ErrorType.ERROR_SCHEMA_TYPE_MISMATCH, ((HCatException) exc).getErrorType());
 
-    partitionColumns = new ArrayList<HCatFieldSchema>();
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c3", Constants.STRING_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
+        //Test that partition key is not allowed in data
+        partitionColumns = new ArrayList<HCatFieldSchema>();
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c3", Constants.STRING_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("part1", Constants.STRING_TYPE_NAME, "")));
 
+        List<HCatRecord> recordsContainingPartitionCols = new ArrayList<HCatRecord>(20);
+        for (int i = 0; i < 20; i++) {
+            List<Object> objList = new ArrayList<Object>();
 
-    writeRecords = new ArrayList<HCatRecord>();
+            objList.add(i);
+            objList.add("c2value" + i);
+            objList.add("c3value" + i);
+            objList.add("p1value6");
 
-    for(int i = 0;i < 10;i++) {
-      List<Object> objList = new ArrayList<Object>();
+            recordsContainingPartitionCols.add(new DefaultHCatRecord(objList));
+        }
 
-      objList.add(i);
-      objList.add("co strvalue" + i);
-      objList.add("co str2value" + i);
+        exc = null;
+        try {
+            runMRCreate(partitionMap, partitionColumns, recordsContainingPartitionCols, 20, true);
+        } catch (IOException e) {
+            exc = e;
+        }
 
-      writeRecords.add(new DefaultHCatRecord(objList));
+        List<HCatRecord> records = runMRRead(20, "part1 = \"p1value6\"");
+        assertEquals(20, records.size());
+        records = runMRRead(20, "part0 = \"p0value6\"");
+        assertEquals(20, records.size());
+        Integer i = 0;
+        for (HCatRecord rec : records) {
+            assertEquals(5, rec.size());
+            assertTrue(rec.get(0).equals(i));
+            assertTrue(rec.get(1).equals("c2value" + i));
+            assertTrue(rec.get(2).equals("c3value" + i));
+            assertTrue(rec.get(3).equals("p1value6"));
+            assertTrue(rec.get(4).equals("p0value6"));
+            i++;
+        }
     }
 
-    Map<String, String> partitionMap = new HashMap<String, String>();
-    partitionMap.put("part1", "p1value8");
-    partitionMap.put("part0", "p0value8");
-
-    Exception exc = null;
-    try {
-      runMRCreate(partitionMap, partitionColumns, writeRecords, 10,true);
-    } catch(IOException e) {
-      exc = e;
-    }
+    //check behavior while change the order of columns
+    private void columnOrderChangeTest() throws Exception {
 
-    assertTrue(exc != null);
-    assertTrue(exc instanceof HCatException);
-    assertEquals(ErrorType.ERROR_SCHEMA_COLUMN_MISMATCH, ((HCatException) exc).getErrorType());
+        HCatSchema tableSchema = getTableSchema();
 
+        assertEquals(5, tableSchema.getFields().size());
 
-    partitionColumns = new ArrayList<HCatFieldSchema>();
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
+        partitionColumns = new ArrayList<HCatFieldSchema>();
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c3", Constants.STRING_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
 
-    writeRecords = new ArrayList<HCatRecord>();
 
-    for(int i = 0;i < 10;i++) {
-      List<Object> objList = new ArrayList<Object>();
+        writeRecords = new ArrayList<HCatRecord>();
 
-      objList.add(i);
-      objList.add("co strvalue" + i);
+        for (int i = 0; i < 10; i++) {
+            List<Object> objList = new ArrayList<Object>();
 
-      writeRecords.add(new DefaultHCatRecord(objList));
-    }
+            objList.add(i);
+            objList.add("co strvalue" + i);
+            objList.add("co str2value" + i);
+
+            writeRecords.add(new DefaultHCatRecord(objList));
+        }
+
+        Map<String, String> partitionMap = new HashMap<String, String>();
+        partitionMap.put("part1", "p1value8");
+        partitionMap.put("part0", "p0value8");
+
+        Exception exc = null;
+        try {
+            runMRCreate(partitionMap, partitionColumns, writeRecords, 10, true);
+        } catch (IOException e) {
+            exc = e;
+        }
 
-    runMRCreate(partitionMap, partitionColumns, writeRecords, 10,true);
+        assertTrue(exc != null);
+        assertTrue(exc instanceof HCatException);
+        assertEquals(ErrorType.ERROR_SCHEMA_COLUMN_MISMATCH, ((HCatException) exc).getErrorType());
 
-    //Read should get 10 + 20 + 10 + 10 + 20 rows
-    runMRRead(70);
-  }
 
-  //Test that data inserted through hcatoutputformat is readable from hive
-  private void hiveReadTest() throws Exception {
+        partitionColumns = new ArrayList<HCatFieldSchema>();
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
 
-    String query = "select * from " + tableName;
-    int retCode = driver.run(query).getResponseCode();
+        writeRecords = new ArrayList<HCatRecord>();
 
-    if( retCode != 0 ) {
-      throw new Exception("Error " + retCode + " running query " + query);
+        for (int i = 0; i < 10; i++) {
+            List<Object> objList = new ArrayList<Object>();
+
+            objList.add(i);
+            objList.add("co strvalue" + i);
+
+            writeRecords.add(new DefaultHCatRecord(objList));
+        }
+
+        runMRCreate(partitionMap, partitionColumns, writeRecords, 10, true);
+
+        //Read should get 10 + 20 + 10 + 10 + 20 rows
+        runMRRead(70);
     }
 
-    ArrayList<String> res = new ArrayList<String>();
-    driver.getResults(res);
-    assertEquals(70, res.size());
-  }
+    //Test that data inserted through hcatoutputformat is readable from hive
+    private void hiveReadTest() throws Exception {
+
+        String query = "select * from " + tableName;
+        int retCode = driver.run(query).getResponseCode();
+
+        if (retCode != 0) {
+            throw new Exception("Error " + retCode + " running query " + query);
+        }
+
+        ArrayList<String> res = new ArrayList<String>();
+        driver.getResults(res);
+        assertEquals(70, res.size());
+    }
 }

Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestMultiOutputFormat.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestMultiOutputFormat.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestMultiOutputFormat.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestMultiOutputFormat.java Mon Sep 10 23:28:55 2012
@@ -74,7 +74,7 @@ public class TestMultiOutputFormat {
         // LocalJobRunner does not work with mapreduce OutputCommitter. So need
         // to use MiniMRCluster. MAPREDUCE-2350
         mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null,
-                new JobConf(conf));
+            new JobConf(conf));
         mrConf = mrCluster.createJobConf();
     }
 
@@ -111,7 +111,7 @@ public class TestMultiOutputFormat {
         JobConfigurer configurer = MultiOutputFormat.createConfigurer(job);
         configurer.addOutputFormat("out1", TextOutputFormat.class, IntWritable.class, Text.class);
         configurer.addOutputFormat("out2", SequenceFileOutputFormat.class, Text.class,
-                IntWritable.class);
+            IntWritable.class);
         Path outDir = new Path(workDir.getPath(), job.getJobName());
         FileOutputFormat.setOutputPath(configurer.getJob("out1"), new Path(outDir, "out1"));
         FileOutputFormat.setOutputPath(configurer.getJob("out2"), new Path(outDir, "out2"));
@@ -124,19 +124,19 @@ public class TestMultiOutputFormat {
         DistributedCache.addFileToClassPath(new Path(inputFile), job.getConfiguration(), fs);
         String dummyFile = createInputFile("dummy file");
         DistributedCache.addFileToClassPath(new Path(dummyFile), configurer.getJob("out1")
-                .getConfiguration(), fs);
+            .getConfiguration(), fs);
         // duplicate of the value. Merging should remove duplicates
         DistributedCache.addFileToClassPath(new Path(inputFile), configurer.getJob("out2")
-                .getConfiguration(), fs);
+            .getConfiguration(), fs);
 
         configurer.configure();
 
         // Verify if the configs are merged
         Path[] fileClassPaths = DistributedCache.getFileClassPaths(job.getConfiguration());
-        Assert.assertArrayEquals(new Path[] {new Path(inputFile), new Path(dummyFile)},
-                fileClassPaths);
-        URI[] expectedCacheFiles = new URI[] {new Path(inputFile).makeQualified(fs).toUri(),
-                new Path(dummyFile).makeQualified(fs).toUri()};
+        Assert.assertArrayEquals(new Path[]{new Path(inputFile), new Path(dummyFile)},
+            fileClassPaths);
+        URI[] expectedCacheFiles = new URI[]{new Path(inputFile).makeQualified(fs).toUri(),
+            new Path(dummyFile).makeQualified(fs).toUri()};
         URI[] cacheFiles = DistributedCache.getCacheFiles(job.getConfiguration());
         Assert.assertArrayEquals(expectedCacheFiles, cacheFiles);
 
@@ -180,9 +180,9 @@ public class TestMultiOutputFormat {
 
         configurer.addOutputFormat("out1", TextOutputFormat.class, IntWritable.class, Text.class);
         configurer.addOutputFormat("out2", SequenceFileOutputFormat.class, Text.class,
-                IntWritable.class);
+            IntWritable.class);
         configurer.addOutputFormat("out3", NullOutputFormat.class, Text.class,
-                IntWritable.class);
+            IntWritable.class);
         Path outDir = new Path(workDir.getPath(), job.getJobName());
         FileOutputFormat.setOutputPath(configurer.getJob("out1"), new Path(outDir, "out1"));
         FileOutputFormat.setOutputPath(configurer.getJob("out2"), new Path(outDir, "out2"));
@@ -237,14 +237,14 @@ public class TestMultiOutputFormat {
     }
 
     private static class MultiOutWordIndexMapper extends
-            Mapper<LongWritable, Text, Writable, Writable> {
+        Mapper<LongWritable, Text, Writable, Writable> {
 
         private IntWritable index = new IntWritable(1);
         private Text word = new Text();
 
         @Override
         protected void map(LongWritable key, Text value, Context context)
-                throws IOException, InterruptedException {
+            throws IOException, InterruptedException {
             StringTokenizer itr = new StringTokenizer(value.toString());
             while (itr.hasMoreTokens()) {
                 word.set(itr.nextToken());
@@ -256,14 +256,14 @@ public class TestMultiOutputFormat {
     }
 
     private static class WordCountMapper extends
-            Mapper<LongWritable, Text, Text, IntWritable> {
+        Mapper<LongWritable, Text, Text, IntWritable> {
 
         private final static IntWritable one = new IntWritable(1);
         private Text word = new Text();
 
         @Override
         protected void map(LongWritable key, Text value, Context context)
-                throws IOException, InterruptedException {
+            throws IOException, InterruptedException {
             StringTokenizer itr = new StringTokenizer(value.toString());
             while (itr.hasMoreTokens()) {
                 word.set(itr.nextToken());
@@ -273,13 +273,13 @@ public class TestMultiOutputFormat {
     }
 
     private static class MultiOutWordCountReducer extends
-            Reducer<Text, IntWritable, Writable, Writable> {
+        Reducer<Text, IntWritable, Writable, Writable> {
 
         private IntWritable count = new IntWritable();
 
         @Override
         protected void reduce(Text word, Iterable<IntWritable> values, Context context)
-                throws IOException, InterruptedException {
+            throws IOException, InterruptedException {
             int sum = 0;
             for (IntWritable val : values) {
                 sum += val.get();
@@ -292,23 +292,34 @@ public class TestMultiOutputFormat {
     }
 
     private static class NullOutputFormat<K, V> extends
-            org.apache.hadoop.mapreduce.lib.output.NullOutputFormat<K, V> {
+        org.apache.hadoop.mapreduce.lib.output.NullOutputFormat<K, V> {
 
         @Override
         public OutputCommitter getOutputCommitter(TaskAttemptContext context) {
             return new OutputCommitter() {
-                public void abortTask(TaskAttemptContext taskContext) { }
-                public void cleanupJob(JobContext jobContext) { }
-                public void commitJob(JobContext jobContext) { }
+                public void abortTask(TaskAttemptContext taskContext) {
+                }
+
+                public void cleanupJob(JobContext jobContext) {
+                }
+
+                public void commitJob(JobContext jobContext) {
+                }
+
                 public void commitTask(TaskAttemptContext taskContext) {
                     Assert.fail("needsTaskCommit is false but commitTask was called");
                 }
+
                 public boolean needsTaskCommit(TaskAttemptContext taskContext) {
-                  return false;
+                    return false;
+                }
+
+                public void setupJob(JobContext jobContext) {
+                }
+
+                public void setupTask(TaskAttemptContext taskContext) {
                 }
-                public void setupJob(JobContext jobContext) { }
-                public void setupTask(TaskAttemptContext taskContext) { }
-              };
+            };
         }
     }
 

Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestPassProperties.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestPassProperties.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestPassProperties.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestPassProperties.java Mon Sep 10 23:28:55 2012
@@ -46,10 +46,10 @@ import org.apache.pig.PigServer;
 import org.junit.Test;
 
 public class TestPassProperties {
-  private static final String TEST_DATA_DIR = System.getProperty("user.dir") +
-      "/build/test/data/" + TestSequenceFileReadWrite.class.getCanonicalName();
-  private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse";
-  private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data";
+    private static final String TEST_DATA_DIR = System.getProperty("user.dir") +
+            "/build/test/data/" + TestSequenceFileReadWrite.class.getCanonicalName();
+    private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse";
+    private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data";
 
     private static Driver driver;
     private static PigServer server;
@@ -79,7 +79,7 @@ public class TestPassProperties {
     }
 
     @Test
-    public void testSequenceTableWriteReadMR() throws Exception{
+    public void testSequenceTableWriteReadMR() throws Exception {
         Initialize();
         String createTable = "CREATE TABLE bad_props_table(a0 int, a1 String, a2 String) STORED AS SEQUENCEFILE";
         driver.run("drop table bad_props_table");
@@ -88,56 +88,55 @@ public class TestPassProperties {
 
         boolean caughtException = false;
         try {
-          Configuration conf = new Configuration();
-          conf.set("hive.metastore.uris", "thrift://no.such.machine:10888");
-          conf.set("hive.metastore.local", "false");
-          Job job = new Job(conf, "Write-hcat-seq-table");
-          job.setJarByClass(TestSequenceFileReadWrite.class);
-  
-          job.setMapperClass(Map.class);
-          job.setOutputKeyClass(NullWritable.class);
-          job.setOutputValueClass(DefaultHCatRecord.class);
-          job.setInputFormatClass(TextInputFormat.class);
-          TextInputFormat.setInputPaths(job, INPUT_FILE_NAME);
-  
-          HCatOutputFormat.setOutput(job, OutputJobInfo.create(
-                  MetaStoreUtils.DEFAULT_DATABASE_NAME, "bad_props_table", null));
-          job.setOutputFormatClass(HCatOutputFormat.class);
-          HCatOutputFormat.setSchema(job, getSchema());
-          job.setNumReduceTasks(0);
-          assertTrue(job.waitForCompletion(true));
-          new FileOutputCommitterContainer(job, null).cleanupJob(job);
+            Configuration conf = new Configuration();
+            conf.set("hive.metastore.uris", "thrift://no.such.machine:10888");
+            conf.set("hive.metastore.local", "false");
+            Job job = new Job(conf, "Write-hcat-seq-table");
+            job.setJarByClass(TestSequenceFileReadWrite.class);
+
+            job.setMapperClass(Map.class);
+            job.setOutputKeyClass(NullWritable.class);
+            job.setOutputValueClass(DefaultHCatRecord.class);
+            job.setInputFormatClass(TextInputFormat.class);
+            TextInputFormat.setInputPaths(job, INPUT_FILE_NAME);
+
+            HCatOutputFormat.setOutput(job, OutputJobInfo.create(
+                    MetaStoreUtils.DEFAULT_DATABASE_NAME, "bad_props_table", null));
+            job.setOutputFormatClass(HCatOutputFormat.class);
+            HCatOutputFormat.setSchema(job, getSchema());
+            job.setNumReduceTasks(0);
+            assertTrue(job.waitForCompletion(true));
+            new FileOutputCommitterContainer(job, null).cleanupJob(job);
         } catch (Exception e) {
             caughtException = true;
             assertTrue(e.getMessage().contains(
-              "Could not connect to meta store using any of the URIs provided"));
+                    "Could not connect to meta store using any of the URIs provided"));
         }
         assertTrue(caughtException);
     }
-    
-    public static class Map extends Mapper<LongWritable, Text, NullWritable, DefaultHCatRecord>{
 
-      public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
-          String[] cols = value.toString().split(",");
-          DefaultHCatRecord record = new DefaultHCatRecord(3);
-          record.set(0,Integer.parseInt(cols[0]));
-          record.set(1,cols[1]);
-          record.set(2,cols[2]);
-          context.write(NullWritable.get(), record);
-      }
-    }
+    public static class Map extends Mapper<LongWritable, Text, NullWritable, DefaultHCatRecord> {
 
-  private HCatSchema getSchema() throws HCatException {
-      HCatSchema schema = new HCatSchema(new ArrayList<HCatFieldSchema>());
-      schema.append(new HCatFieldSchema("a0", HCatFieldSchema.Type.INT,
-              ""));
-      schema.append(new HCatFieldSchema("a1",
-              HCatFieldSchema.Type.STRING, ""));
-      schema.append(new HCatFieldSchema("a2",
-              HCatFieldSchema.Type.STRING, ""));
-      return schema;
-  }
+        public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
+            String[] cols = value.toString().split(",");
+            DefaultHCatRecord record = new DefaultHCatRecord(3);
+            record.set(0, Integer.parseInt(cols[0]));
+            record.set(1, cols[1]);
+            record.set(2, cols[2]);
+            context.write(NullWritable.get(), record);
+        }
+    }
 
+    private HCatSchema getSchema() throws HCatException {
+        HCatSchema schema = new HCatSchema(new ArrayList<HCatFieldSchema>());
+        schema.append(new HCatFieldSchema("a0", HCatFieldSchema.Type.INT,
+                ""));
+        schema.append(new HCatFieldSchema("a1",
+                HCatFieldSchema.Type.STRING, ""));
+        schema.append(new HCatFieldSchema("a2",
+                HCatFieldSchema.Type.STRING, ""));
+        return schema;
+    }
 
 
 }

Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestSequenceFileReadWrite.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestSequenceFileReadWrite.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestSequenceFileReadWrite.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestSequenceFileReadWrite.java Mon Sep 10 23:28:55 2012
@@ -53,10 +53,10 @@ import org.apache.pig.data.Tuple;
 import org.junit.Test;
 
 public class TestSequenceFileReadWrite extends TestCase {
-  private static final String TEST_DATA_DIR = System.getProperty("user.dir") +
-      "/build/test/data/" + TestSequenceFileReadWrite.class.getCanonicalName();
-  private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse";
-  private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data";
+    private static final String TEST_DATA_DIR = System.getProperty("user.dir") +
+            "/build/test/data/" + TestSequenceFileReadWrite.class.getCanonicalName();
+    private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse";
+    private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data";
 
     private static Driver driver;
     private static PigServer server;
@@ -86,7 +86,7 @@ public class TestSequenceFileReadWrite e
     }
 
     @Test
-   public void testSequenceTableWriteRead() throws Exception{
+    public void testSequenceTableWriteRead() throws Exception {
         Initialize();
         String createTable = "CREATE TABLE demo_table(a0 int, a1 String, a2 String) STORED AS SEQUENCEFILE";
         driver.run("drop table demo_table");
@@ -112,10 +112,10 @@ public class TestSequenceFileReadWrite e
             numTuplesRead++;
         }
         assertEquals(input.length, numTuplesRead);
-   }
+    }
 
     @Test
-    public void testTextTableWriteRead() throws Exception{
+    public void testTextTableWriteRead() throws Exception {
         Initialize();
         String createTable = "CREATE TABLE demo_table_1(a0 int, a1 String, a2 String) STORED AS TEXTFILE";
         driver.run("drop table demo_table_1");
@@ -144,7 +144,7 @@ public class TestSequenceFileReadWrite e
     }
 
     @Test
-    public void testSequenceTableWriteReadMR() throws Exception{
+    public void testSequenceTableWriteReadMR() throws Exception {
         Initialize();
         String createTable = "CREATE TABLE demo_table_2(a0 int, a1 String, a2 String) STORED AS SEQUENCEFILE";
         driver.run("drop table demo_table_2");
@@ -238,27 +238,27 @@ public class TestSequenceFileReadWrite e
     }
 
 
-  public static class Map extends Mapper<LongWritable, Text, NullWritable, DefaultHCatRecord>{
+    public static class Map extends Mapper<LongWritable, Text, NullWritable, DefaultHCatRecord> {
 
-      public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
-          String[] cols = value.toString().split(",");
-          DefaultHCatRecord record = new DefaultHCatRecord(3);
-          record.set(0,Integer.parseInt(cols[0]));
-          record.set(1,cols[1]);
-          record.set(2,cols[2]);
-          context.write(NullWritable.get(), record);
-      }
-    }
-
-  private HCatSchema getSchema() throws HCatException {
-      HCatSchema schema = new HCatSchema(new ArrayList<HCatFieldSchema>());
-      schema.append(new HCatFieldSchema("a0", HCatFieldSchema.Type.INT,
-              ""));
-      schema.append(new HCatFieldSchema("a1",
-              HCatFieldSchema.Type.STRING, ""));
-      schema.append(new HCatFieldSchema("a2",
-              HCatFieldSchema.Type.STRING, ""));
-      return schema;
-  }
+        public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
+            String[] cols = value.toString().split(",");
+            DefaultHCatRecord record = new DefaultHCatRecord(3);
+            record.set(0, Integer.parseInt(cols[0]));
+            record.set(1, cols[1]);
+            record.set(2, cols[2]);
+            context.write(NullWritable.get(), record);
+        }
+    }
+
+    private HCatSchema getSchema() throws HCatException {
+        HCatSchema schema = new HCatSchema(new ArrayList<HCatFieldSchema>());
+        schema.append(new HCatFieldSchema("a0", HCatFieldSchema.Type.INT,
+                ""));
+        schema.append(new HCatFieldSchema("a1",
+                HCatFieldSchema.Type.STRING, ""));
+        schema.append(new HCatFieldSchema("a2",
+                HCatFieldSchema.Type.STRING, ""));
+        return schema;
+    }
 
 }

Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java Mon Sep 10 23:28:55 2012
@@ -46,11 +46,11 @@ import org.apache.hcatalog.shims.HCatHad
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-  /**
-   * TestRCFile.
-   *
-   */
-  public class TestRCFileMapReduceInputFormat extends TestCase {
+/**
+ * TestRCFile.
+ *
+ */
+public class TestRCFileMapReduceInputFormat extends TestCase {
 
     private static final Logger LOG = LoggerFactory.getLogger(TestRCFileMapReduceInputFormat.class);
 
@@ -65,18 +65,18 @@ import org.slf4j.LoggerFactory;
     private static Properties tbl;
 
     static {
-      try {
-        fs = FileSystem.getLocal(conf);
-        Path dir = new Path(System.getProperty("test.data.dir", ".") + "/mapred");
-        file = new Path(dir, "test_rcfile");
-        fs.delete(dir, true);
-        // the SerDe part is from TestLazySimpleSerDe
-        serDe = new ColumnarSerDe();
-        // Create the SerDe
-        tbl = createProperties();
-        serDe.initialize(conf, tbl);
-      } catch (Exception e) {
-      }
+        try {
+            fs = FileSystem.getLocal(conf);
+            Path dir = new Path(System.getProperty("test.data.dir", ".") + "/mapred");
+            file = new Path(dir, "test_rcfile");
+            fs.delete(dir, true);
+            // the SerDe part is from TestLazySimpleSerDe
+            serDe = new ColumnarSerDe();
+            // Create the SerDe
+            tbl = createProperties();
+            serDe.initialize(conf, tbl);
+        } catch (Exception e) {
+        }
     }
 
     private static BytesRefArrayWritable patialS = new BytesRefArrayWritable();
@@ -84,164 +84,164 @@ import org.slf4j.LoggerFactory;
     private static byte[][] bytesArray = null;
 
     private static BytesRefArrayWritable s = null;
+
     static {
-      try {
-        bytesArray = new byte[][] {"123".getBytes("UTF-8"),
-            "456".getBytes("UTF-8"), "789".getBytes("UTF-8"),
-            "1000".getBytes("UTF-8"), "5.3".getBytes("UTF-8"),
-            "hive and hadoop".getBytes("UTF-8"), new byte[0],
-            "NULL".getBytes("UTF-8")};
-        s = new BytesRefArrayWritable(bytesArray.length);
-        s.set(0, new BytesRefWritable("123".getBytes("UTF-8")));
-        s.set(1, new BytesRefWritable("456".getBytes("UTF-8")));
-        s.set(2, new BytesRefWritable("789".getBytes("UTF-8")));
-        s.set(3, new BytesRefWritable("1000".getBytes("UTF-8")));
-        s.set(4, new BytesRefWritable("5.3".getBytes("UTF-8")));
-        s.set(5, new BytesRefWritable("hive and hadoop".getBytes("UTF-8")));
-        s.set(6, new BytesRefWritable("NULL".getBytes("UTF-8")));
-        s.set(7, new BytesRefWritable("NULL".getBytes("UTF-8")));
-
-        // partial test init
-        patialS.set(0, new BytesRefWritable("NULL".getBytes("UTF-8")));
-        patialS.set(1, new BytesRefWritable("NULL".getBytes("UTF-8")));
-        patialS.set(2, new BytesRefWritable("789".getBytes("UTF-8")));
-        patialS.set(3, new BytesRefWritable("1000".getBytes("UTF-8")));
-        patialS.set(4, new BytesRefWritable("NULL".getBytes("UTF-8")));
-        patialS.set(5, new BytesRefWritable("NULL".getBytes("UTF-8")));
-        patialS.set(6, new BytesRefWritable("NULL".getBytes("UTF-8")));
-        patialS.set(7, new BytesRefWritable("NULL".getBytes("UTF-8")));
+        try {
+            bytesArray = new byte[][]{"123".getBytes("UTF-8"),
+                "456".getBytes("UTF-8"), "789".getBytes("UTF-8"),
+                "1000".getBytes("UTF-8"), "5.3".getBytes("UTF-8"),
+                "hive and hadoop".getBytes("UTF-8"), new byte[0],
+                "NULL".getBytes("UTF-8")};
+            s = new BytesRefArrayWritable(bytesArray.length);
+            s.set(0, new BytesRefWritable("123".getBytes("UTF-8")));
+            s.set(1, new BytesRefWritable("456".getBytes("UTF-8")));
+            s.set(2, new BytesRefWritable("789".getBytes("UTF-8")));
+            s.set(3, new BytesRefWritable("1000".getBytes("UTF-8")));
+            s.set(4, new BytesRefWritable("5.3".getBytes("UTF-8")));
+            s.set(5, new BytesRefWritable("hive and hadoop".getBytes("UTF-8")));
+            s.set(6, new BytesRefWritable("NULL".getBytes("UTF-8")));
+            s.set(7, new BytesRefWritable("NULL".getBytes("UTF-8")));
+
+            // partial test init
+            patialS.set(0, new BytesRefWritable("NULL".getBytes("UTF-8")));
+            patialS.set(1, new BytesRefWritable("NULL".getBytes("UTF-8")));
+            patialS.set(2, new BytesRefWritable("789".getBytes("UTF-8")));
+            patialS.set(3, new BytesRefWritable("1000".getBytes("UTF-8")));
+            patialS.set(4, new BytesRefWritable("NULL".getBytes("UTF-8")));
+            patialS.set(5, new BytesRefWritable("NULL".getBytes("UTF-8")));
+            patialS.set(6, new BytesRefWritable("NULL".getBytes("UTF-8")));
+            patialS.set(7, new BytesRefWritable("NULL".getBytes("UTF-8")));
 
-      } catch (UnsupportedEncodingException e) {
-      }
+        } catch (UnsupportedEncodingException e) {
+        }
     }
 
 
     /** For debugging and testing. */
     public static void main(String[] args) throws Exception {
-      int count = 10000;
-      boolean create = true;
+        int count = 10000;
+        boolean create = true;
+
+        String usage = "Usage: RCFile " + "[-count N]" + " file";
+        if (args.length == 0) {
+            LOG.error(usage);
+            System.exit(-1);
+        }
 
-      String usage = "Usage: RCFile " + "[-count N]" + " file";
-      if (args.length == 0) {
-        LOG.error(usage);
-        System.exit(-1);
-      }
-
-      try {
-        for (int i = 0; i < args.length; ++i) { // parse command line
-          if (args[i] == null) {
-            continue;
-          } else if (args[i].equals("-count")) {
-            count = Integer.parseInt(args[++i]);
-          } else {
-            // file is required parameter
-            file = new Path(args[i]);
-          }
-        }
-
-        if (file == null) {
-          LOG.error(usage);
-          System.exit(-1);
-        }
-
-        LOG.info("count = {}", count);
-        LOG.info("create = {}", create);
-        LOG.info("file = {}" ,file);
-
-           // test.performanceTest();
-        LOG.info("Finished.");
-      } finally {
-        fs.close();
-      }
+        try {
+            for (int i = 0; i < args.length; ++i) { // parse command line
+                if (args[i] == null) {
+                    continue;
+                } else if (args[i].equals("-count")) {
+                    count = Integer.parseInt(args[++i]);
+                } else {
+                    // file is required parameter
+                    file = new Path(args[i]);
+                }
+            }
+
+            if (file == null) {
+                LOG.error(usage);
+                System.exit(-1);
+            }
+
+            LOG.info("count = {}", count);
+            LOG.info("create = {}", create);
+            LOG.info("file = {}", file);
+
+            // test.performanceTest();
+            LOG.info("Finished.");
+        } finally {
+            fs.close();
+        }
     }
 
     private static Properties createProperties() {
-      Properties tbl = new Properties();
+        Properties tbl = new Properties();
 
-      // Set the configuration parameters
-      tbl.setProperty(Constants.SERIALIZATION_FORMAT, "9");
-      tbl.setProperty("columns",
-          "abyte,ashort,aint,along,adouble,astring,anullint,anullstring");
-      tbl.setProperty("columns.types",
-          "tinyint:smallint:int:bigint:double:string:int:string");
-      tbl.setProperty(Constants.SERIALIZATION_NULL_FORMAT, "NULL");
-      return tbl;
+        // Set the configuration parameters
+        tbl.setProperty(Constants.SERIALIZATION_FORMAT, "9");
+        tbl.setProperty("columns",
+            "abyte,ashort,aint,along,adouble,astring,anullint,anullstring");
+        tbl.setProperty("columns.types",
+            "tinyint:smallint:int:bigint:double:string:int:string");
+        tbl.setProperty(Constants.SERIALIZATION_NULL_FORMAT, "NULL");
+        return tbl;
     }
 
 
-
     public void testSynAndSplit() throws IOException, InterruptedException {
-      splitBeforeSync();
-      splitRightBeforeSync();
-      splitInMiddleOfSync();
-      splitRightAfterSync();
-      splitAfterSync();
+        splitBeforeSync();
+        splitRightBeforeSync();
+        splitInMiddleOfSync();
+        splitRightAfterSync();
+        splitAfterSync();
     }
 
-    private void splitBeforeSync() throws IOException,InterruptedException {
-      writeThenReadByRecordReader(600, 1000, 2, 17684, null);
+    private void splitBeforeSync() throws IOException, InterruptedException {
+        writeThenReadByRecordReader(600, 1000, 2, 17684, null);
     }
 
-    private void splitRightBeforeSync() throws IOException ,InterruptedException{
-      writeThenReadByRecordReader(500, 1000, 2, 17750, null);
+    private void splitRightBeforeSync() throws IOException, InterruptedException {
+        writeThenReadByRecordReader(500, 1000, 2, 17750, null);
     }
 
-    private void splitInMiddleOfSync() throws IOException,InterruptedException {
-      writeThenReadByRecordReader(500, 1000, 2, 17760, null);
+    private void splitInMiddleOfSync() throws IOException, InterruptedException {
+        writeThenReadByRecordReader(500, 1000, 2, 17760, null);
 
     }
 
     private void splitRightAfterSync() throws IOException, InterruptedException {
-      writeThenReadByRecordReader(500, 1000, 2, 17770, null);
+        writeThenReadByRecordReader(500, 1000, 2, 17770, null);
     }
 
-    private void splitAfterSync() throws IOException ,InterruptedException{
-      writeThenReadByRecordReader(500, 1000, 2, 19950, null);
+    private void splitAfterSync() throws IOException, InterruptedException {
+        writeThenReadByRecordReader(500, 1000, 2, 19950, null);
     }
 
     private void writeThenReadByRecordReader(int intervalRecordCount,
-        int writeCount, int splitNumber, long maxSplitSize, CompressionCodec codec)
+                                             int writeCount, int splitNumber, long maxSplitSize, CompressionCodec codec)
         throws IOException, InterruptedException {
-      Path testDir = new Path(System.getProperty("test.data.dir", ".")
-          + "/mapred/testsmallfirstsplit");
-      Path testFile = new Path(testDir, "test_rcfile");
-      fs.delete(testFile, true);
-      Configuration cloneConf = new Configuration(conf);
-      RCFileOutputFormat.setColumnNumber(cloneConf, bytesArray.length);
-      cloneConf.setInt(RCFile.RECORD_INTERVAL_CONF_STR, intervalRecordCount);
-
-      RCFile.Writer writer = new RCFile.Writer(fs, cloneConf, testFile, null, codec);
-
-      BytesRefArrayWritable bytes = new BytesRefArrayWritable(bytesArray.length);
-      for (int i = 0; i < bytesArray.length; i++) {
-        BytesRefWritable cu = null;
-        cu = new BytesRefWritable(bytesArray[i], 0, bytesArray[i].length);
-        bytes.set(i, cu);
-      }
-      for (int i = 0; i < writeCount; i++) {
-        writer.append(bytes);
-      }
-      writer.close();
-
-      RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable> inputFormat = new RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable>();
-      Configuration jonconf = new Configuration(cloneConf);
-      jonconf.set("mapred.input.dir", testDir.toString());
-      JobContext context = new Job(jonconf);
-      context.getConfiguration().setLong("mapred.max.split.size",maxSplitSize);
-      List<InputSplit> splits = inputFormat.getSplits(context);
-      assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
-      int readCount = 0;
-      for (int i = 0; i < splits.size(); i++) {
-        TaskAttemptContext tac = HCatHadoopShims.Instance.get().createTaskAttemptContext(jonconf, new TaskAttemptID());
-        RecordReader<LongWritable, BytesRefArrayWritable> rr = inputFormat.createRecordReader(splits.get(i), tac);
-        rr.initialize(splits.get(i), tac);
-        while (rr.nextKeyValue()) {
-          readCount++;
+        Path testDir = new Path(System.getProperty("test.data.dir", ".")
+            + "/mapred/testsmallfirstsplit");
+        Path testFile = new Path(testDir, "test_rcfile");
+        fs.delete(testFile, true);
+        Configuration cloneConf = new Configuration(conf);
+        RCFileOutputFormat.setColumnNumber(cloneConf, bytesArray.length);
+        cloneConf.setInt(RCFile.RECORD_INTERVAL_CONF_STR, intervalRecordCount);
+
+        RCFile.Writer writer = new RCFile.Writer(fs, cloneConf, testFile, null, codec);
+
+        BytesRefArrayWritable bytes = new BytesRefArrayWritable(bytesArray.length);
+        for (int i = 0; i < bytesArray.length; i++) {
+            BytesRefWritable cu = null;
+            cu = new BytesRefWritable(bytesArray[i], 0, bytesArray[i].length);
+            bytes.set(i, cu);
+        }
+        for (int i = 0; i < writeCount; i++) {
+            writer.append(bytes);
+        }
+        writer.close();
+
+        RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable> inputFormat = new RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable>();
+        Configuration jonconf = new Configuration(cloneConf);
+        jonconf.set("mapred.input.dir", testDir.toString());
+        JobContext context = new Job(jonconf);
+        context.getConfiguration().setLong("mapred.max.split.size", maxSplitSize);
+        List<InputSplit> splits = inputFormat.getSplits(context);
+        assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
+        int readCount = 0;
+        for (int i = 0; i < splits.size(); i++) {
+            TaskAttemptContext tac = HCatHadoopShims.Instance.get().createTaskAttemptContext(jonconf, new TaskAttemptID());
+            RecordReader<LongWritable, BytesRefArrayWritable> rr = inputFormat.createRecordReader(splits.get(i), tac);
+            rr.initialize(splits.get(i), tac);
+            while (rr.nextKeyValue()) {
+                readCount++;
+            }
         }
-      }
-      assertEquals("readCount should be equal to writeCount", readCount, writeCount);
+        assertEquals("readCount should be equal to writeCount", readCount, writeCount);
     }
 
-  }
+}
 
 



Mime
View raw message