incubator-hcatalog-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From tra...@apache.org
Subject svn commit: r1383152 [18/27] - in /incubator/hcatalog/trunk: ./ hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/ hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/drivers/ hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/ ...
Date Mon, 10 Sep 2012 23:29:03 GMT
Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/listener/TestNotificationListener.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/listener/TestNotificationListener.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/listener/TestNotificationListener.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/listener/TestNotificationListener.java Mon Sep 10 23:28:55 2012
@@ -59,136 +59,136 @@ import org.apache.thrift.TException;
 import junit.framework.TestCase;
 
 public class TestNotificationListener extends TestCase implements
-    MessageListener {
+        MessageListener {
 
-  private HiveConf hiveConf;
-  private Driver driver;
-  private AtomicInteger cntInvocation = new AtomicInteger(0);
-
-  @Override
-  protected void setUp() throws Exception {
-
-    super.setUp();
-    System.setProperty("java.naming.factory.initial",
-        "org.apache.activemq.jndi.ActiveMQInitialContextFactory");
-    System.setProperty("java.naming.provider.url",
-        "vm://localhost?broker.persistent=false");
-    ConnectionFactory connFac = new ActiveMQConnectionFactory(
-        "vm://localhost?broker.persistent=false");
-    Connection conn = connFac.createConnection();
-    conn.start();
-    // We want message to be sent when session commits, thus we run in
-    // transacted mode.
-    Session session = conn.createSession(true, Session.SESSION_TRANSACTED);
-    Destination hcatTopic = session
-        .createTopic(HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX);
-    MessageConsumer consumer1 = session.createConsumer(hcatTopic);
-    consumer1.setMessageListener(this);
-    Destination tblTopic = session
-        .createTopic(HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX + ".mydb.mytbl");
-    MessageConsumer consumer2 = session.createConsumer(tblTopic);
-    consumer2.setMessageListener(this);
-    Destination dbTopic = session
-        .createTopic(HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX + ".mydb");
-    MessageConsumer consumer3 = session.createConsumer(dbTopic);
-    consumer3.setMessageListener(this);
-    hiveConf = new HiveConf(this.getClass());
-    hiveConf.set(ConfVars.METASTORE_EVENT_LISTENERS.varname,
-        NotificationListener.class.getName());
-    hiveConf.set("hive.metastore.local", "true");
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    SessionState.start(new CliSessionState(hiveConf));
-    driver = new Driver(hiveConf);
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    assertEquals(7, cntInvocation.get());
-    super.tearDown();
-  }
-
-  public void testAMQListener() throws MetaException, TException,
-      UnknownTableException, NoSuchObjectException, CommandNeedRetryException,
-      UnknownDBException, InvalidPartitionException, UnknownPartitionException {
-    driver.run("create database mydb");
-    driver.run("use mydb");
-    driver.run("create table mytbl (a string) partitioned by (b string)");
-    driver.run("alter table mytbl add partition(b='2011')");
-    HiveMetaStoreClient msc = new HiveMetaStoreClient(hiveConf);
-    Map<String, String> kvs = new HashMap<String, String>(1);
-    kvs.put("b", "2011");
-    msc.markPartitionForEvent("mydb", "mytbl", kvs,
-        PartitionEventType.LOAD_DONE);
-    driver.run("alter table mytbl drop partition(b='2011')");
-    driver.run("drop table mytbl");
-    driver.run("drop database mydb");
-  }
-
-  @Override
-  public void onMessage(Message msg) {
-    cntInvocation.incrementAndGet();
-
-    String event;
-    try {
-      event = msg.getStringProperty(HCatConstants.HCAT_EVENT);
-      if (event.equals(HCatConstants.HCAT_ADD_DATABASE_EVENT)) {
-
-        assertEquals("topic://" + HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX, msg
-            .getJMSDestination().toString());
-        assertEquals("mydb",
-            ((Database) ((ObjectMessage) msg).getObject()).getName());
-      } else if (event.equals(HCatConstants.HCAT_ADD_TABLE_EVENT)) {
-
-        assertEquals("topic://hcat.mydb", msg.getJMSDestination().toString());
-        Table tbl = (Table) (((ObjectMessage) msg).getObject());
-        assertEquals("mytbl", tbl.getTableName());
-        assertEquals("mydb", tbl.getDbName());
-        assertEquals(1, tbl.getPartitionKeysSize());
-      } else if (event.equals(HCatConstants.HCAT_ADD_PARTITION_EVENT)) {
-
-        assertEquals("topic://hcat.mydb.mytbl", msg.getJMSDestination()
-            .toString());
-        Partition part = (Partition) (((ObjectMessage) msg).getObject());
-        assertEquals("mytbl", part.getTableName());
-        assertEquals("mydb", part.getDbName());
-        List<String> vals = new ArrayList<String>(1);
-        vals.add("2011");
-        assertEquals(vals, part.getValues());
-      } else if (event.equals(HCatConstants.HCAT_DROP_PARTITION_EVENT)) {
-
-        assertEquals("topic://hcat.mydb.mytbl", msg.getJMSDestination()
-            .toString());
-        Partition part = (Partition) (((ObjectMessage) msg).getObject());
-        assertEquals("mytbl", part.getTableName());
-        assertEquals("mydb", part.getDbName());
-        List<String> vals = new ArrayList<String>(1);
-        vals.add("2011");
-        assertEquals(vals, part.getValues());
-      } else if (event.equals(HCatConstants.HCAT_DROP_TABLE_EVENT)) {
-
-        assertEquals("topic://hcat.mydb", msg.getJMSDestination().toString());
-        Table tbl = (Table) (((ObjectMessage) msg).getObject());
-        assertEquals("mytbl", tbl.getTableName());
-        assertEquals("mydb", tbl.getDbName());
-        assertEquals(1, tbl.getPartitionKeysSize());
-      } else if (event.equals(HCatConstants.HCAT_DROP_DATABASE_EVENT)) {
-
-        assertEquals("topic://" + HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX, msg
-            .getJMSDestination().toString());
-        assertEquals("mydb",
-            ((Database) ((ObjectMessage) msg).getObject()).getName());
-      } else if (event.equals(HCatConstants.HCAT_PARTITION_DONE_EVENT)) {
-        assertEquals("topic://hcat.mydb.mytbl", msg.getJMSDestination()
-            .toString());
-        MapMessage mapMsg = (MapMessage) msg;
-        assert mapMsg.getString("b").equals("2011");
-      } else
-        assert false;
-    } catch (JMSException e) {
-      e.printStackTrace(System.err);
-      assert false;
+    private HiveConf hiveConf;
+    private Driver driver;
+    private AtomicInteger cntInvocation = new AtomicInteger(0);
+
+    @Override
+    protected void setUp() throws Exception {
+
+        super.setUp();
+        System.setProperty("java.naming.factory.initial",
+                "org.apache.activemq.jndi.ActiveMQInitialContextFactory");
+        System.setProperty("java.naming.provider.url",
+                "vm://localhost?broker.persistent=false");
+        ConnectionFactory connFac = new ActiveMQConnectionFactory(
+                "vm://localhost?broker.persistent=false");
+        Connection conn = connFac.createConnection();
+        conn.start();
+        // We want message to be sent when session commits, thus we run in
+        // transacted mode.
+        Session session = conn.createSession(true, Session.SESSION_TRANSACTED);
+        Destination hcatTopic = session
+                .createTopic(HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX);
+        MessageConsumer consumer1 = session.createConsumer(hcatTopic);
+        consumer1.setMessageListener(this);
+        Destination tblTopic = session
+                .createTopic(HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX + ".mydb.mytbl");
+        MessageConsumer consumer2 = session.createConsumer(tblTopic);
+        consumer2.setMessageListener(this);
+        Destination dbTopic = session
+                .createTopic(HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX + ".mydb");
+        MessageConsumer consumer3 = session.createConsumer(dbTopic);
+        consumer3.setMessageListener(this);
+        hiveConf = new HiveConf(this.getClass());
+        hiveConf.set(ConfVars.METASTORE_EVENT_LISTENERS.varname,
+                NotificationListener.class.getName());
+        hiveConf.set("hive.metastore.local", "true");
+        hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+        SessionState.start(new CliSessionState(hiveConf));
+        driver = new Driver(hiveConf);
+    }
+
+    @Override
+    protected void tearDown() throws Exception {
+        assertEquals(7, cntInvocation.get());
+        super.tearDown();
+    }
+
+    public void testAMQListener() throws MetaException, TException,
+            UnknownTableException, NoSuchObjectException, CommandNeedRetryException,
+            UnknownDBException, InvalidPartitionException, UnknownPartitionException {
+        driver.run("create database mydb");
+        driver.run("use mydb");
+        driver.run("create table mytbl (a string) partitioned by (b string)");
+        driver.run("alter table mytbl add partition(b='2011')");
+        HiveMetaStoreClient msc = new HiveMetaStoreClient(hiveConf);
+        Map<String, String> kvs = new HashMap<String, String>(1);
+        kvs.put("b", "2011");
+        msc.markPartitionForEvent("mydb", "mytbl", kvs,
+                PartitionEventType.LOAD_DONE);
+        driver.run("alter table mytbl drop partition(b='2011')");
+        driver.run("drop table mytbl");
+        driver.run("drop database mydb");
+    }
+
+    @Override
+    public void onMessage(Message msg) {
+        cntInvocation.incrementAndGet();
+
+        String event;
+        try {
+            event = msg.getStringProperty(HCatConstants.HCAT_EVENT);
+            if (event.equals(HCatConstants.HCAT_ADD_DATABASE_EVENT)) {
+
+                assertEquals("topic://" + HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX, msg
+                        .getJMSDestination().toString());
+                assertEquals("mydb",
+                        ((Database) ((ObjectMessage) msg).getObject()).getName());
+            } else if (event.equals(HCatConstants.HCAT_ADD_TABLE_EVENT)) {
+
+                assertEquals("topic://hcat.mydb", msg.getJMSDestination().toString());
+                Table tbl = (Table) (((ObjectMessage) msg).getObject());
+                assertEquals("mytbl", tbl.getTableName());
+                assertEquals("mydb", tbl.getDbName());
+                assertEquals(1, tbl.getPartitionKeysSize());
+            } else if (event.equals(HCatConstants.HCAT_ADD_PARTITION_EVENT)) {
+
+                assertEquals("topic://hcat.mydb.mytbl", msg.getJMSDestination()
+                        .toString());
+                Partition part = (Partition) (((ObjectMessage) msg).getObject());
+                assertEquals("mytbl", part.getTableName());
+                assertEquals("mydb", part.getDbName());
+                List<String> vals = new ArrayList<String>(1);
+                vals.add("2011");
+                assertEquals(vals, part.getValues());
+            } else if (event.equals(HCatConstants.HCAT_DROP_PARTITION_EVENT)) {
+
+                assertEquals("topic://hcat.mydb.mytbl", msg.getJMSDestination()
+                        .toString());
+                Partition part = (Partition) (((ObjectMessage) msg).getObject());
+                assertEquals("mytbl", part.getTableName());
+                assertEquals("mydb", part.getDbName());
+                List<String> vals = new ArrayList<String>(1);
+                vals.add("2011");
+                assertEquals(vals, part.getValues());
+            } else if (event.equals(HCatConstants.HCAT_DROP_TABLE_EVENT)) {
+
+                assertEquals("topic://hcat.mydb", msg.getJMSDestination().toString());
+                Table tbl = (Table) (((ObjectMessage) msg).getObject());
+                assertEquals("mytbl", tbl.getTableName());
+                assertEquals("mydb", tbl.getDbName());
+                assertEquals(1, tbl.getPartitionKeysSize());
+            } else if (event.equals(HCatConstants.HCAT_DROP_DATABASE_EVENT)) {
+
+                assertEquals("topic://" + HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX, msg
+                        .getJMSDestination().toString());
+                assertEquals("mydb",
+                        ((Database) ((ObjectMessage) msg).getObject()).getName());
+            } else if (event.equals(HCatConstants.HCAT_PARTITION_DONE_EVENT)) {
+                assertEquals("topic://hcat.mydb.mytbl", msg.getJMSDestination()
+                        .toString());
+                MapMessage mapMsg = (MapMessage) msg;
+                assert mapMsg.getString("b").equals("2011");
+            } else
+                assert false;
+        } catch (JMSException e) {
+            e.printStackTrace(System.err);
+            assert false;
+        }
     }
-  }
 }

Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/HCatBaseTest.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/HCatBaseTest.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/HCatBaseTest.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/HCatBaseTest.java Mon Sep 10 23:28:55 2012
@@ -38,41 +38,41 @@ import java.io.IOException;
  * Simplify writing HCatalog tests that require a HiveMetaStore.
  */
 public class HCatBaseTest {
-  protected static final Logger LOG = LoggerFactory.getLogger(HCatBaseTest.class);
-  protected static final String TEST_DATA_DIR = System.getProperty("user.dir") +
-      "/build/test/data/" + HCatBaseTest.class.getCanonicalName();
-  protected static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse";
+    protected static final Logger LOG = LoggerFactory.getLogger(HCatBaseTest.class);
+    protected static final String TEST_DATA_DIR = System.getProperty("user.dir") +
+            "/build/test/data/" + HCatBaseTest.class.getCanonicalName();
+    protected static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse";
 
-  protected HiveConf hiveConf = null;
-  protected Driver driver = null;
-  protected HiveMetaStoreClient client = null;
+    protected HiveConf hiveConf = null;
+    protected Driver driver = null;
+    protected HiveMetaStoreClient client = null;
 
-  @BeforeClass
-  public static void setUpTestDataDir() throws Exception {
-    LOG.info("Using warehouse directory " + TEST_WAREHOUSE_DIR);
-    File f = new File(TEST_WAREHOUSE_DIR);
-    if (f.exists()) {
-      FileUtil.fullyDelete(f);
+    @BeforeClass
+    public static void setUpTestDataDir() throws Exception {
+        LOG.info("Using warehouse directory " + TEST_WAREHOUSE_DIR);
+        File f = new File(TEST_WAREHOUSE_DIR);
+        if (f.exists()) {
+            FileUtil.fullyDelete(f);
+        }
+        Assert.assertTrue(new File(TEST_WAREHOUSE_DIR).mkdirs());
     }
-    Assert.assertTrue(new File(TEST_WAREHOUSE_DIR).mkdirs());
-  }
 
-  @Before
-  public void setUp() throws Exception {
-    if (driver == null) {
-      hiveConf = new HiveConf(this.getClass());
-      hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-      hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-      hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-      hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
-      driver = new Driver(hiveConf);
-      client = new HiveMetaStoreClient(hiveConf);
-      SessionState.start(new CliSessionState(hiveConf));
+    @Before
+    public void setUp() throws Exception {
+        if (driver == null) {
+            hiveConf = new HiveConf(this.getClass());
+            hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+            hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+            hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+            hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
+            driver = new Driver(hiveConf);
+            client = new HiveMetaStoreClient(hiveConf);
+            SessionState.start(new CliSessionState(hiveConf));
+        }
     }
-  }
 
-  protected void logAndRegister(PigServer server, String query) throws IOException {
-    LOG.info("Registering pig query: " + query);
-    server.registerQuery(query);
-  }
+    protected void logAndRegister(PigServer server, String query) throws IOException {
+        LOG.info("Registering pig query: " + query);
+        server.registerQuery(query);
+    }
 }

Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/HCatMapReduceTest.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/HCatMapReduceTest.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/HCatMapReduceTest.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/HCatMapReduceTest.java Mon Sep 10 23:28:55 2012
@@ -68,284 +68,283 @@ import org.slf4j.LoggerFactory;
  */
 public abstract class HCatMapReduceTest extends TestCase {
 
-  private static final Logger LOG = LoggerFactory.getLogger(HCatMapReduceTest.class);
-  protected String dbName = "default";
-  protected String tableName = "testHCatMapReduceTable";
+    private static final Logger LOG = LoggerFactory.getLogger(HCatMapReduceTest.class);
+    protected String dbName = "default";
+    protected String tableName = "testHCatMapReduceTable";
 
-  protected String inputFormat = RCFileInputFormat.class.getName();
-  protected String outputFormat = RCFileOutputFormat.class.getName();
-  protected String serdeClass = ColumnarSerDe.class.getName();
+    protected String inputFormat = RCFileInputFormat.class.getName();
+    protected String outputFormat = RCFileOutputFormat.class.getName();
+    protected String serdeClass = ColumnarSerDe.class.getName();
 
-  private static List<HCatRecord> writeRecords = new ArrayList<HCatRecord>();
-  private static List<HCatRecord> readRecords = new ArrayList<HCatRecord>();
+    private static List<HCatRecord> writeRecords = new ArrayList<HCatRecord>();
+    private static List<HCatRecord> readRecords = new ArrayList<HCatRecord>();
 
-  protected abstract void initialize() throws Exception;
+    protected abstract void initialize() throws Exception;
 
-  protected abstract List<FieldSchema> getPartitionKeys();
+    protected abstract List<FieldSchema> getPartitionKeys();
 
-  protected abstract List<FieldSchema> getTableColumns();
+    protected abstract List<FieldSchema> getTableColumns();
 
-  private HiveMetaStoreClient client;
-  protected HiveConf hiveConf;
+    private HiveMetaStoreClient client;
+    protected HiveConf hiveConf;
 
-  private FileSystem fs;
-  private String thriftUri = null;
+    private FileSystem fs;
+    private String thriftUri = null;
 
-  protected Driver driver;
+    protected Driver driver;
 
-  @Override
-  protected void setUp() throws Exception {
-    hiveConf = new HiveConf(this.getClass());
+    @Override
+    protected void setUp() throws Exception {
+        hiveConf = new HiveConf(this.getClass());
 
-    //The default org.apache.hadoop.hive.ql.hooks.PreExecutePrinter hook
-    //is present only in the ql/test directory
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    driver = new Driver(hiveConf);
-    SessionState.start(new CliSessionState(hiveConf));
+        //The default org.apache.hadoop.hive.ql.hooks.PreExecutePrinter hook
+        //is present only in the ql/test directory
+        hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+        hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+        driver = new Driver(hiveConf);
+        SessionState.start(new CliSessionState(hiveConf));
 
-    thriftUri = System.getenv("HCAT_METASTORE_URI");
+        thriftUri = System.getenv("HCAT_METASTORE_URI");
 
-    if( thriftUri != null ) {
-      LOG.info("Using URI {}", thriftUri);
+        if (thriftUri != null) {
+            LOG.info("Using URI {}", thriftUri);
 
-      hiveConf.set("hive.metastore.local", "false");
-      hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, thriftUri);
-    }
+            hiveConf.set("hive.metastore.local", "false");
+            hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, thriftUri);
+        }
 
-    fs = new LocalFileSystem();
-    fs.initialize(fs.getWorkingDirectory().toUri(), new Configuration());
+        fs = new LocalFileSystem();
+        fs.initialize(fs.getWorkingDirectory().toUri(), new Configuration());
 
-    initialize();
+        initialize();
 
-    client = new HiveMetaStoreClient(hiveConf, null);
-    initTable();
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    try {
-      String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName;
-
-      client.dropTable(databaseName, tableName);
-    } catch(Exception e) {
-      e.printStackTrace();
-      throw e;
+        client = new HiveMetaStoreClient(hiveConf, null);
+        initTable();
     }
 
-    client.close();
-  }
-
-
-
-  private void initTable() throws Exception {
-
-    String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName;
-
-    try {
-      client.dropTable(databaseName, tableName);
-    } catch(Exception e) {
-    } //can fail with NoSuchObjectException
-
+    @Override
+    protected void tearDown() throws Exception {
+        try {
+            String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName;
 
-    Table tbl = new Table();
-    tbl.setDbName(databaseName);
-    tbl.setTableName(tableName);
-    tbl.setTableType("MANAGED_TABLE");
-    StorageDescriptor sd = new StorageDescriptor();
+            client.dropTable(databaseName, tableName);
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        }
 
-    sd.setCols(getTableColumns());
-    tbl.setPartitionKeys(getPartitionKeys());
+        client.close();
+    }
 
-    tbl.setSd(sd);
 
-    sd.setBucketCols(new ArrayList<String>(2));
-    sd.setSerdeInfo(new SerDeInfo());
-    sd.getSerdeInfo().setName(tbl.getTableName());
-    sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-    sd.getSerdeInfo().getParameters().put(
-        org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
-    sd.getSerdeInfo().setSerializationLib(serdeClass);
-    sd.setInputFormat(inputFormat);
-    sd.setOutputFormat(outputFormat);
+    private void initTable() throws Exception {
 
-    Map<String, String> tableParams = new HashMap<String, String>();
-    tbl.setParameters(tableParams);
+        String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName;
 
-    client.createTable(tbl);
-  }
+        try {
+            client.dropTable(databaseName, tableName);
+        } catch (Exception e) {
+        } //can fail with NoSuchObjectException
+
+
+        Table tbl = new Table();
+        tbl.setDbName(databaseName);
+        tbl.setTableName(tableName);
+        tbl.setTableType("MANAGED_TABLE");
+        StorageDescriptor sd = new StorageDescriptor();
+
+        sd.setCols(getTableColumns());
+        tbl.setPartitionKeys(getPartitionKeys());
+
+        tbl.setSd(sd);
+
+        sd.setBucketCols(new ArrayList<String>(2));
+        sd.setSerdeInfo(new SerDeInfo());
+        sd.getSerdeInfo().setName(tbl.getTableName());
+        sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+        sd.getSerdeInfo().getParameters().put(
+                org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
+        sd.getSerdeInfo().setSerializationLib(serdeClass);
+        sd.setInputFormat(inputFormat);
+        sd.setOutputFormat(outputFormat);
 
-  //Create test input file with specified number of rows
-  private void createInputFile(Path path, int rowCount) throws IOException {
+        Map<String, String> tableParams = new HashMap<String, String>();
+        tbl.setParameters(tableParams);
 
-    if( fs.exists(path) ) {
-      fs.delete(path, true);
+        client.createTable(tbl);
     }
 
-    FSDataOutputStream os = fs.create(path);
+    //Create test input file with specified number of rows
+    private void createInputFile(Path path, int rowCount) throws IOException {
 
-    for(int i = 0;i < rowCount;i++) {
-      os.writeChars(i + "\n");
-    }
+        if (fs.exists(path)) {
+            fs.delete(path, true);
+        }
 
-    os.close();
-  }
+        FSDataOutputStream os = fs.create(path);
 
-  public static class MapCreate extends
-  Mapper<LongWritable, Text, BytesWritable, HCatRecord> {
+        for (int i = 0; i < rowCount; i++) {
+            os.writeChars(i + "\n");
+        }
 
-    static int writeCount = 0; //test will be in local mode
+        os.close();
+    }
 
-    @Override
-    public void map(LongWritable key, Text value, Context context
-    ) throws IOException, InterruptedException {
-      {
-        try {
-          HCatRecord rec = writeRecords.get(writeCount);
-          context.write(null, rec);
-          writeCount++;
+    public static class MapCreate extends
+            Mapper<LongWritable, Text, BytesWritable, HCatRecord> {
 
-        }catch(Exception e) {
+        static int writeCount = 0; //test will be in local mode
 
-          e.printStackTrace(System.err); //print since otherwise exception is lost
-          throw new IOException(e);
+        @Override
+        public void map(LongWritable key, Text value, Context context
+        ) throws IOException, InterruptedException {
+            {
+                try {
+                    HCatRecord rec = writeRecords.get(writeCount);
+                    context.write(null, rec);
+                    writeCount++;
+
+                } catch (Exception e) {
+
+                    e.printStackTrace(System.err); //print since otherwise exception is lost
+                    throw new IOException(e);
+                }
+            }
         }
-      }
     }
-  }
 
-  public static class MapRead extends
-  Mapper<WritableComparable, HCatRecord, BytesWritable, Text> {
+    public static class MapRead extends
+            Mapper<WritableComparable, HCatRecord, BytesWritable, Text> {
 
-    static int readCount = 0; //test will be in local mode
+        static int readCount = 0; //test will be in local mode
 
-    @Override
-    public void map(WritableComparable key, HCatRecord value, Context context
-    ) throws IOException, InterruptedException {
-      {
-        try {
-          readRecords.add(value);
-          readCount++;
-        } catch(Exception e) {
-          e.printStackTrace(); //print since otherwise exception is lost
-          throw new IOException(e);
+        @Override
+        public void map(WritableComparable key, HCatRecord value, Context context
+        ) throws IOException, InterruptedException {
+            {
+                try {
+                    readRecords.add(value);
+                    readCount++;
+                } catch (Exception e) {
+                    e.printStackTrace(); //print since otherwise exception is lost
+                    throw new IOException(e);
+                }
+            }
         }
-      }
     }
-  }
 
-  Job runMRCreate(Map<String, String> partitionValues,
-        List<HCatFieldSchema> partitionColumns, List<HCatRecord> records,
-        int writeCount, boolean assertWrite) throws Exception {
+    Job runMRCreate(Map<String, String> partitionValues,
+                    List<HCatFieldSchema> partitionColumns, List<HCatRecord> records,
+                    int writeCount, boolean assertWrite) throws Exception {
 
-    writeRecords = records;
-    MapCreate.writeCount = 0;
+        writeRecords = records;
+        MapCreate.writeCount = 0;
 
-    Configuration conf = new Configuration();
-    Job job = new Job(conf, "hcat mapreduce write test");
-    job.setJarByClass(this.getClass());
-    job.setMapperClass(HCatMapReduceTest.MapCreate.class);
+        Configuration conf = new Configuration();
+        Job job = new Job(conf, "hcat mapreduce write test");
+        job.setJarByClass(this.getClass());
+        job.setMapperClass(HCatMapReduceTest.MapCreate.class);
 
-    // input/output settings
-    job.setInputFormatClass(TextInputFormat.class);
+        // input/output settings
+        job.setInputFormatClass(TextInputFormat.class);
 
-    Path path = new Path(fs.getWorkingDirectory(), "mapred/testHCatMapReduceInput");
-    createInputFile(path, writeCount);
+        Path path = new Path(fs.getWorkingDirectory(), "mapred/testHCatMapReduceInput");
+        createInputFile(path, writeCount);
 
-    TextInputFormat.setInputPaths(job, path);
+        TextInputFormat.setInputPaths(job, path);
 
-    job.setOutputFormatClass(HCatOutputFormat.class);
+        job.setOutputFormatClass(HCatOutputFormat.class);
 
-    OutputJobInfo outputJobInfo = OutputJobInfo.create(dbName, tableName, partitionValues);
-    HCatOutputFormat.setOutput(job, outputJobInfo);
+        OutputJobInfo outputJobInfo = OutputJobInfo.create(dbName, tableName, partitionValues);
+        HCatOutputFormat.setOutput(job, outputJobInfo);
 
-    job.setMapOutputKeyClass(BytesWritable.class);
-    job.setMapOutputValueClass(DefaultHCatRecord.class);
+        job.setMapOutputKeyClass(BytesWritable.class);
+        job.setMapOutputValueClass(DefaultHCatRecord.class);
 
-    job.setNumReduceTasks(0);
+        job.setNumReduceTasks(0);
 
-    HCatOutputFormat.setSchema(job, new HCatSchema(partitionColumns));
+        HCatOutputFormat.setSchema(job, new HCatSchema(partitionColumns));
 
-    boolean success = job.waitForCompletion(true);
+        boolean success = job.waitForCompletion(true);
 
-    // Ensure counters are set when data has actually been read.
-    if (partitionValues != null) {
-      assertTrue(job.getCounters().getGroup("FileSystemCounters")
-          .findCounter("FILE_BYTES_READ").getValue() > 0);
-    }
+        // Ensure counters are set when data has actually been read.
+        if (partitionValues != null) {
+            assertTrue(job.getCounters().getGroup("FileSystemCounters")
+                    .findCounter("FILE_BYTES_READ").getValue() > 0);
+        }
 
-    if (!HcatTestUtils.isHadoop23()) {
-        // Local mode outputcommitter hook is not invoked in Hadoop 1.x
-        if (success) {
-            new FileOutputCommitterContainer(job,null).commitJob(job);
-        } else {
-            new FileOutputCommitterContainer(job,null).abortJob(job, JobStatus.State.FAILED);
+        if (!HcatTestUtils.isHadoop23()) {
+            // Local mode outputcommitter hook is not invoked in Hadoop 1.x
+            if (success) {
+                new FileOutputCommitterContainer(job, null).commitJob(job);
+            } else {
+                new FileOutputCommitterContainer(job, null).abortJob(job, JobStatus.State.FAILED);
+            }
+        }
+        if (assertWrite) {
+            // we assert only if we expected to assert with this call.
+            Assert.assertEquals(writeCount, MapCreate.writeCount);
         }
-    }
-    if (assertWrite){
-      // we assert only if we expected to assert with this call.
-      Assert.assertEquals(writeCount, MapCreate.writeCount);
-    }
 
-    return job;
-  }
+        return job;
+    }
 
-  List<HCatRecord> runMRRead(int readCount) throws Exception {
-    return runMRRead(readCount, null);
-  }
+    List<HCatRecord> runMRRead(int readCount) throws Exception {
+        return runMRRead(readCount, null);
+    }
 
-  List<HCatRecord> runMRRead(int readCount, String filter) throws Exception {
+    List<HCatRecord> runMRRead(int readCount, String filter) throws Exception {
 
-    MapRead.readCount = 0;
-    readRecords.clear();
+        MapRead.readCount = 0;
+        readRecords.clear();
 
-    Configuration conf = new Configuration();
-    Job job = new Job(conf, "hcat mapreduce read test");
-    job.setJarByClass(this.getClass());
-    job.setMapperClass(HCatMapReduceTest.MapRead.class);
+        Configuration conf = new Configuration();
+        Job job = new Job(conf, "hcat mapreduce read test");
+        job.setJarByClass(this.getClass());
+        job.setMapperClass(HCatMapReduceTest.MapRead.class);
 
-    // input/output settings
-    job.setInputFormatClass(HCatInputFormat.class);
-    job.setOutputFormatClass(TextOutputFormat.class);
+        // input/output settings
+        job.setInputFormatClass(HCatInputFormat.class);
+        job.setOutputFormatClass(TextOutputFormat.class);
 
-    InputJobInfo inputJobInfo = InputJobInfo.create(dbName,tableName,filter);
-    HCatInputFormat.setInput(job, inputJobInfo);
+        InputJobInfo inputJobInfo = InputJobInfo.create(dbName, tableName, filter);
+        HCatInputFormat.setInput(job, inputJobInfo);
 
-    job.setMapOutputKeyClass(BytesWritable.class);
-    job.setMapOutputValueClass(Text.class);
+        job.setMapOutputKeyClass(BytesWritable.class);
+        job.setMapOutputValueClass(Text.class);
 
-    job.setNumReduceTasks(0);
+        job.setNumReduceTasks(0);
 
-    Path path = new Path(fs.getWorkingDirectory(), "mapred/testHCatMapReduceOutput");
-    if( fs.exists(path) ) {
-      fs.delete(path, true);
-    }
+        Path path = new Path(fs.getWorkingDirectory(), "mapred/testHCatMapReduceOutput");
+        if (fs.exists(path)) {
+            fs.delete(path, true);
+        }
 
-    TextOutputFormat.setOutputPath(job, path);
+        TextOutputFormat.setOutputPath(job, path);
 
-    job.waitForCompletion(true);
-    Assert.assertEquals(readCount, MapRead.readCount);
+        job.waitForCompletion(true);
+        Assert.assertEquals(readCount, MapRead.readCount);
 
-    return readRecords;
-  }
+        return readRecords;
+    }
 
 
-  protected HCatSchema getTableSchema() throws Exception {
+    protected HCatSchema getTableSchema() throws Exception {
 
-    Configuration conf = new Configuration();
-    Job job = new Job(conf, "hcat mapreduce read schema test");
-    job.setJarByClass(this.getClass());
+        Configuration conf = new Configuration();
+        Job job = new Job(conf, "hcat mapreduce read schema test");
+        job.setJarByClass(this.getClass());
 
-    // input/output settings
-    job.setInputFormatClass(HCatInputFormat.class);
-    job.setOutputFormatClass(TextOutputFormat.class);
+        // input/output settings
+        job.setInputFormatClass(HCatInputFormat.class);
+        job.setOutputFormatClass(TextOutputFormat.class);
 
-    InputJobInfo inputJobInfo = InputJobInfo.create(dbName,tableName,null);
-    HCatInputFormat.setInput(job, inputJobInfo);
+        InputJobInfo inputJobInfo = InputJobInfo.create(dbName, tableName, null);
+        HCatInputFormat.setInput(job, inputJobInfo);
 
-    return HCatInputFormat.getTableSchema(job);
-  }
+        return HCatInputFormat.getTableSchema(job);
+    }
 
 }
 

Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatDynamicPartitioned.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatDynamicPartitioned.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatDynamicPartitioned.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatDynamicPartitioned.java Mon Sep 10 23:28:55 2012
@@ -39,124 +39,124 @@ import org.slf4j.LoggerFactory;
 
 public class TestHCatDynamicPartitioned extends HCatMapReduceTest {
 
-  private List<HCatRecord> writeRecords;
-  private List<HCatFieldSchema> dataColumns;
-  private static final Logger LOG = LoggerFactory.getLogger(TestHCatDynamicPartitioned.class);
-
-  @Override
-  protected void initialize() throws Exception {
-
-    tableName = "testHCatDynamicPartitionedTable";
-    generateWriteRecords(20,5,0);
-    generateDataColumns();
-  }
-
-  private void generateDataColumns() throws HCatException {
-    dataColumns = new ArrayList<HCatFieldSchema>();
-    dataColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
-    dataColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
-    dataColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("p1", Constants.STRING_TYPE_NAME, "")));
-  }
-
-  private void generateWriteRecords(int max, int mod,int offset) {
-    writeRecords = new ArrayList<HCatRecord>();
-
-    for(int i = 0;i < max;i++) {
-      List<Object> objList = new ArrayList<Object>();
-
-      objList.add(i);
-      objList.add("strvalue" + i);
-      objList.add(String.valueOf((i % mod)+offset));
-      writeRecords.add(new DefaultHCatRecord(objList));
-    }
-  }
-
-  @Override
-  protected List<FieldSchema> getPartitionKeys() {
-    List<FieldSchema> fields = new ArrayList<FieldSchema>();
-    fields.add(new FieldSchema("p1", Constants.STRING_TYPE_NAME, ""));
-    return fields;
-  }
-
-  @Override
-  protected List<FieldSchema> getTableColumns() {
-    List<FieldSchema> fields = new ArrayList<FieldSchema>();
-    fields.add(new FieldSchema("c1", Constants.INT_TYPE_NAME, ""));
-    fields.add(new FieldSchema("c2", Constants.STRING_TYPE_NAME, ""));
-    return fields;
-  }
-
-
-  public void testHCatDynamicPartitionedTable() throws Exception {
-
-    generateWriteRecords(20,5,0);
-    runMRCreate(null, dataColumns, writeRecords, 20,true);
-
-    runMRRead(20);
-
-    //Read with partition filter
-    runMRRead(4, "p1 = \"0\"");
-    runMRRead(8, "p1 = \"1\" or p1 = \"3\"");
-    runMRRead(4, "p1 = \"4\"");
-
-    // read from hive to test
-
-    String query = "select * from " + tableName;
-    int retCode = driver.run(query).getResponseCode();
-
-    if( retCode != 0 ) {
-      throw new Exception("Error " + retCode + " running query " + query);
-    }
-
-    ArrayList<String> res = new ArrayList<String>();
-    driver.getResults(res);
-    assertEquals(20, res.size());
-
-
-    //Test for duplicate publish
-    IOException exc = null;
-    try {
-      generateWriteRecords(20,5,0);
-      Job job = runMRCreate(null, dataColumns, writeRecords, 20,false);
-      if (HcatTestUtils.isHadoop23()) {
-          new FileOutputCommitterContainer(job,null).cleanupJob(job);
-      }
-    } catch(IOException e) {
-      exc = e;
-    }
-
-    assertTrue(exc != null);
-    assertTrue(exc instanceof HCatException);
-    assertTrue( "Got exception of type ["+((HCatException) exc).getErrorType().toString()
-        + "] Expected ERROR_PUBLISHING_PARTITION or ERROR_MOVE_FAILED",
-        (ErrorType.ERROR_PUBLISHING_PARTITION == ((HCatException) exc).getErrorType())
-        || (ErrorType.ERROR_MOVE_FAILED == ((HCatException) exc).getErrorType())
+    private List<HCatRecord> writeRecords;
+    private List<HCatFieldSchema> dataColumns;
+    private static final Logger LOG = LoggerFactory.getLogger(TestHCatDynamicPartitioned.class);
+
+    @Override
+    protected void initialize() throws Exception {
+
+        tableName = "testHCatDynamicPartitionedTable";
+        generateWriteRecords(20, 5, 0);
+        generateDataColumns();
+    }
+
+    private void generateDataColumns() throws HCatException {
+        dataColumns = new ArrayList<HCatFieldSchema>();
+        dataColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
+        dataColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
+        dataColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("p1", Constants.STRING_TYPE_NAME, "")));
+    }
+
+    private void generateWriteRecords(int max, int mod, int offset) {
+        writeRecords = new ArrayList<HCatRecord>();
+
+        for (int i = 0; i < max; i++) {
+            List<Object> objList = new ArrayList<Object>();
+
+            objList.add(i);
+            objList.add("strvalue" + i);
+            objList.add(String.valueOf((i % mod) + offset));
+            writeRecords.add(new DefaultHCatRecord(objList));
+        }
+    }
+
+    @Override
+    protected List<FieldSchema> getPartitionKeys() {
+        List<FieldSchema> fields = new ArrayList<FieldSchema>();
+        fields.add(new FieldSchema("p1", Constants.STRING_TYPE_NAME, ""));
+        return fields;
+    }
+
+    @Override
+    protected List<FieldSchema> getTableColumns() {
+        List<FieldSchema> fields = new ArrayList<FieldSchema>();
+        fields.add(new FieldSchema("c1", Constants.INT_TYPE_NAME, ""));
+        fields.add(new FieldSchema("c2", Constants.STRING_TYPE_NAME, ""));
+        return fields;
+    }
+
+
+    public void testHCatDynamicPartitionedTable() throws Exception {
+
+        generateWriteRecords(20, 5, 0);
+        runMRCreate(null, dataColumns, writeRecords, 20, true);
+
+        runMRRead(20);
+
+        //Read with partition filter
+        runMRRead(4, "p1 = \"0\"");
+        runMRRead(8, "p1 = \"1\" or p1 = \"3\"");
+        runMRRead(4, "p1 = \"4\"");
+
+        // read from hive to test
+
+        String query = "select * from " + tableName;
+        int retCode = driver.run(query).getResponseCode();
+
+        if (retCode != 0) {
+            throw new Exception("Error " + retCode + " running query " + query);
+        }
+
+        ArrayList<String> res = new ArrayList<String>();
+        driver.getResults(res);
+        assertEquals(20, res.size());
+
+
+        //Test for duplicate publish
+        IOException exc = null;
+        try {
+            generateWriteRecords(20, 5, 0);
+            Job job = runMRCreate(null, dataColumns, writeRecords, 20, false);
+            if (HcatTestUtils.isHadoop23()) {
+                new FileOutputCommitterContainer(job, null).cleanupJob(job);
+            }
+        } catch (IOException e) {
+            exc = e;
+        }
+
+        assertTrue(exc != null);
+        assertTrue(exc instanceof HCatException);
+        assertTrue("Got exception of type [" + ((HCatException) exc).getErrorType().toString()
+                + "] Expected ERROR_PUBLISHING_PARTITION or ERROR_MOVE_FAILED",
+                (ErrorType.ERROR_PUBLISHING_PARTITION == ((HCatException) exc).getErrorType())
+                        || (ErrorType.ERROR_MOVE_FAILED == ((HCatException) exc).getErrorType())
         );
-  }
+    }
 
-//TODO 1.0 miniCluster is slow this test times out, make it work
+    //TODO 1.0 miniCluster is slow this test times out, make it work
 // renaming test to make test framework skip it
-  public void _testHCatDynamicPartitionMaxPartitions() throws Exception {
-    HiveConf hc = new HiveConf(this.getClass());
+    public void _testHCatDynamicPartitionMaxPartitions() throws Exception {
+        HiveConf hc = new HiveConf(this.getClass());
 
-    int maxParts = hiveConf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS);
-    LOG.info("Max partitions allowed = {}", maxParts);
+        int maxParts = hiveConf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS);
+        LOG.info("Max partitions allowed = {}", maxParts);
 
-    IOException exc = null;
-    try {
-      generateWriteRecords(maxParts+5,maxParts+2,10);
-      runMRCreate(null,dataColumns,writeRecords,maxParts+5,false);
-    } catch(IOException e) {
-      exc = e;
-    }
-
-    if (HCatConstants.HCAT_IS_DYNAMIC_MAX_PTN_CHECK_ENABLED){
-      assertTrue(exc != null);
-      assertTrue(exc instanceof HCatException);
-      assertEquals(ErrorType.ERROR_TOO_MANY_DYNAMIC_PTNS, ((HCatException) exc).getErrorType());
-    }else{
-      assertTrue(exc == null);
-      runMRRead(maxParts+5);
+        IOException exc = null;
+        try {
+            generateWriteRecords(maxParts + 5, maxParts + 2, 10);
+            runMRCreate(null, dataColumns, writeRecords, maxParts + 5, false);
+        } catch (IOException e) {
+            exc = e;
+        }
+
+        if (HCatConstants.HCAT_IS_DYNAMIC_MAX_PTN_CHECK_ENABLED) {
+            assertTrue(exc != null);
+            assertTrue(exc instanceof HCatException);
+            assertEquals(ErrorType.ERROR_TOO_MANY_DYNAMIC_PTNS, ((HCatException) exc).getErrorType());
+        } else {
+            assertTrue(exc == null);
+            runMRRead(maxParts + 5);
+        }
     }
-  }
 }

Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveCompatibility.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveCompatibility.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveCompatibility.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveCompatibility.java Mon Sep 10 23:28:55 2012
@@ -35,95 +35,95 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class TestHCatHiveCompatibility extends HCatBaseTest {
-  private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data";
+    private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data";
 
-  @BeforeClass
-  public static void createInputData() throws Exception {
-    int LOOP_SIZE = 11;
-    File file = new File(INPUT_FILE_NAME);
-    file.deleteOnExit();
-    FileWriter writer = new FileWriter(file);
-    for (int i = 0; i < LOOP_SIZE; i++) {
-      writer.write(i + "\t1\n");
+    @BeforeClass
+    public static void createInputData() throws Exception {
+        int LOOP_SIZE = 11;
+        File file = new File(INPUT_FILE_NAME);
+        file.deleteOnExit();
+        FileWriter writer = new FileWriter(file);
+        for (int i = 0; i < LOOP_SIZE; i++) {
+            writer.write(i + "\t1\n");
+        }
+        writer.close();
     }
-    writer.close();
-  }
 
-  @Test
-  public void testUnpartedReadWrite() throws Exception{
+    @Test
+    public void testUnpartedReadWrite() throws Exception {
 
-    driver.run("drop table if exists junit_unparted_noisd");
-    String createTable = "create table junit_unparted_noisd(a int) stored as RCFILE";
-    Assert.assertEquals(0, driver.run(createTable).getResponseCode());
-
-    // assert that the table created has no hcat instrumentation, and that we're still able to read it.
-    Table table = client.getTable("default", "junit_unparted_noisd");
-    Assert.assertTrue(table.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
-
-    PigServer server = new PigServer(ExecType.LOCAL);
-    logAndRegister(server, "A = load '" + INPUT_FILE_NAME + "' as (a:int);");
-    logAndRegister(server, "store A into 'default.junit_unparted_noisd' using org.apache.hcatalog.pig.HCatStorer();");
-    logAndRegister(server, "B = load 'default.junit_unparted_noisd' using " + HCatLoader.class.getName() + "();");
-    Iterator<Tuple> itr= server.openIterator("B");
-
-    int i = 0;
-
-    while(itr.hasNext()){
-      Tuple t = itr.next();
-      Assert.assertEquals(1, t.size());
-      Assert.assertEquals(t.get(0), i);
-      i++;
-    }
-
-    Assert.assertFalse(itr.hasNext());
-    Assert.assertEquals(11, i);
+        driver.run("drop table if exists junit_unparted_noisd");
+        String createTable = "create table junit_unparted_noisd(a int) stored as RCFILE";
+        Assert.assertEquals(0, driver.run(createTable).getResponseCode());
+
+        // assert that the table created has no hcat instrumentation, and that we're still able to read it.
+        Table table = client.getTable("default", "junit_unparted_noisd");
+        Assert.assertTrue(table.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
+
+        PigServer server = new PigServer(ExecType.LOCAL);
+        logAndRegister(server, "A = load '" + INPUT_FILE_NAME + "' as (a:int);");
+        logAndRegister(server, "store A into 'default.junit_unparted_noisd' using org.apache.hcatalog.pig.HCatStorer();");
+        logAndRegister(server, "B = load 'default.junit_unparted_noisd' using " + HCatLoader.class.getName() + "();");
+        Iterator<Tuple> itr = server.openIterator("B");
+
+        int i = 0;
+
+        while (itr.hasNext()) {
+            Tuple t = itr.next();
+            Assert.assertEquals(1, t.size());
+            Assert.assertEquals(t.get(0), i);
+            i++;
+        }
+
+        Assert.assertFalse(itr.hasNext());
+        Assert.assertEquals(11, i);
+
+        // assert that the table created still has no hcat instrumentation
+        Table table2 = client.getTable("default", "junit_unparted_noisd");
+        Assert.assertTrue(table2.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
 
-    // assert that the table created still has no hcat instrumentation
-    Table table2 = client.getTable("default", "junit_unparted_noisd");
-    Assert.assertTrue(table2.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
-
-    driver.run("drop table junit_unparted_noisd");
-  }
-
-  @Test
-  public void testPartedRead() throws Exception{
-
-    driver.run("drop table if exists junit_parted_noisd");
-    String createTable = "create table junit_parted_noisd(a int) partitioned by (b string) stored as RCFILE";
-    Assert.assertEquals(0, driver.run(createTable).getResponseCode());
-
-    // assert that the table created has no hcat instrumentation, and that we're still able to read it.
-    Table table = client.getTable("default", "junit_parted_noisd");
-    Assert.assertTrue(table.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
-
-    PigServer server = new PigServer(ExecType.LOCAL);
-    logAndRegister(server, "A = load '" + INPUT_FILE_NAME + "' as (a:int);");
-    logAndRegister(server, "store A into 'default.junit_parted_noisd' using org.apache.hcatalog.pig.HCatStorer('b=42');");
-    logAndRegister(server, "B = load 'default.junit_parted_noisd' using " + HCatLoader.class.getName() + "();");
-    Iterator<Tuple> itr= server.openIterator("B");
-
-    int i = 0;
-
-    while(itr.hasNext()){
-      Tuple t = itr.next();
-      Assert.assertEquals(2, t.size()); // Contains explicit field "a" and partition "b".
-      Assert.assertEquals(t.get(0), i);
-      Assert.assertEquals(t.get(1), "42");
-      i++;
+        driver.run("drop table junit_unparted_noisd");
     }
 
-    Assert.assertFalse(itr.hasNext());
-    Assert.assertEquals(11, i);
+    @Test
+    public void testPartedRead() throws Exception {
 
-    // assert that the table created still has no hcat instrumentation
-    Table table2 = client.getTable("default", "junit_parted_noisd");
-    Assert.assertTrue(table2.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
-
-    // assert that there is one partition present, and it had hcat instrumentation inserted when it was created.
-    Partition ptn = client.getPartition("default", "junit_parted_noisd", Arrays.asList("42"));
-
-    Assert.assertNotNull(ptn);
-    Assert.assertTrue(ptn.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
-    driver.run("drop table junit_unparted_noisd");
-  }
+        driver.run("drop table if exists junit_parted_noisd");
+        String createTable = "create table junit_parted_noisd(a int) partitioned by (b string) stored as RCFILE";
+        Assert.assertEquals(0, driver.run(createTable).getResponseCode());
+
+        // assert that the table created has no hcat instrumentation, and that we're still able to read it.
+        Table table = client.getTable("default", "junit_parted_noisd");
+        Assert.assertTrue(table.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
+
+        PigServer server = new PigServer(ExecType.LOCAL);
+        logAndRegister(server, "A = load '" + INPUT_FILE_NAME + "' as (a:int);");
+        logAndRegister(server, "store A into 'default.junit_parted_noisd' using org.apache.hcatalog.pig.HCatStorer('b=42');");
+        logAndRegister(server, "B = load 'default.junit_parted_noisd' using " + HCatLoader.class.getName() + "();");
+        Iterator<Tuple> itr = server.openIterator("B");
+
+        int i = 0;
+
+        while (itr.hasNext()) {
+            Tuple t = itr.next();
+            Assert.assertEquals(2, t.size()); // Contains explicit field "a" and partition "b".
+            Assert.assertEquals(t.get(0), i);
+            Assert.assertEquals(t.get(1), "42");
+            i++;
+        }
+
+        Assert.assertFalse(itr.hasNext());
+        Assert.assertEquals(11, i);
+
+        // assert that the table created still has no hcat instrumentation
+        Table table2 = client.getTable("default", "junit_parted_noisd");
+        Assert.assertTrue(table2.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
+
+        // assert that there is one partition present, and it had hcat instrumentation inserted when it was created.
+        Partition ptn = client.getPartition("default", "junit_parted_noisd", Arrays.asList("42"));
+
+        Assert.assertNotNull(ptn);
+        Assert.assertTrue(ptn.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS));
+        driver.run("drop table junit_unparted_noisd");
+    }
 }

Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveThriftCompatibility.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveThriftCompatibility.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveThriftCompatibility.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatHiveThriftCompatibility.java Mon Sep 10 23:28:55 2012
@@ -39,77 +39,77 @@ import java.util.Iterator;
 
 public class TestHCatHiveThriftCompatibility extends HCatBaseTest {
 
-  private boolean setUpComplete = false;
-  private Path intStringSeq;
+    private boolean setUpComplete = false;
+    private Path intStringSeq;
 
-  @Before
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    if (setUpComplete) {
-      return;
+    @Before
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        if (setUpComplete) {
+            return;
+        }
+
+        ByteArrayOutputStream out = new ByteArrayOutputStream();
+        TIOStreamTransport transport = new TIOStreamTransport(out);
+        TBinaryProtocol protocol = new TBinaryProtocol(transport);
+
+        IntString intString = new IntString(1, "one", 1);
+        intString.write(protocol);
+        BytesWritable bytesWritable = new BytesWritable(out.toByteArray());
+
+        intStringSeq = new Path(TEST_DATA_DIR + "/data/intString.seq");
+        LOG.info("Creating data file: " + intStringSeq);
+
+        SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(
+                intStringSeq.getFileSystem(hiveConf), hiveConf, intStringSeq,
+                NullWritable.class, BytesWritable.class);
+        seqFileWriter.append(NullWritable.get(), bytesWritable);
+        seqFileWriter.close();
+
+        setUpComplete = true;
     }
 
-    ByteArrayOutputStream out = new ByteArrayOutputStream();
-    TIOStreamTransport transport = new TIOStreamTransport(out);
-    TBinaryProtocol protocol = new TBinaryProtocol(transport);
-
-    IntString intString = new IntString(1, "one", 1);
-    intString.write(protocol);
-    BytesWritable bytesWritable = new BytesWritable(out.toByteArray());
-
-    intStringSeq = new Path(TEST_DATA_DIR + "/data/intString.seq");
-    LOG.info("Creating data file: " + intStringSeq);
-
-    SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(
-        intStringSeq.getFileSystem(hiveConf), hiveConf, intStringSeq,
-        NullWritable.class, BytesWritable.class);
-    seqFileWriter.append(NullWritable.get(), bytesWritable);
-    seqFileWriter.close();
-
-    setUpComplete = true;
-  }
-
-  /**
-   *  Create a table with no explicit schema and ensure its correctly
-   *  discovered from the thrift struct.
-   */
-  @Test
-  public void testDynamicCols() throws Exception {
-    Assert.assertEquals(0, driver.run("drop table if exists test_thrift").getResponseCode());
-    Assert.assertEquals(0, driver.run(
-        "create external table test_thrift " +
-            "partitioned by (year string) " +
-            "row format serde 'org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer' " +
-            "with serdeproperties ( " +
-            "  'serialization.class'='org.apache.hadoop.hive.serde2.thrift.test.IntString', " +
-            "  'serialization.format'='org.apache.thrift.protocol.TBinaryProtocol') " +
-            "stored as" +
-            "  inputformat 'org.apache.hadoop.mapred.SequenceFileInputFormat'" +
-            "  outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'")
-        .getResponseCode());
-    Assert.assertEquals(0,
-        driver.run("alter table test_thrift add partition (year = '2012') location '" +
-            intStringSeq.getParent() + "'").getResponseCode());
-
-    PigServer pigServer = new PigServer(ExecType.LOCAL);
-    pigServer.registerQuery("A = load 'test_thrift' using org.apache.hcatalog.pig.HCatLoader();");
-
-    Schema expectedSchema = new Schema();
-    expectedSchema.add(new Schema.FieldSchema("myint", DataType.INTEGER));
-    expectedSchema.add(new Schema.FieldSchema("mystring", DataType.CHARARRAY));
-    expectedSchema.add(new Schema.FieldSchema("underscore_int", DataType.INTEGER));
-    expectedSchema.add(new Schema.FieldSchema("year", DataType.CHARARRAY));
-
-    Assert.assertEquals(expectedSchema, pigServer.dumpSchema("A"));
-
-    Iterator<Tuple> iterator = pigServer.openIterator("A");
-    Tuple t = iterator.next();
-    Assert.assertEquals(1, t.get(0));
-    Assert.assertEquals("one", t.get(1));
-    Assert.assertEquals(1, t.get(2));
-    Assert.assertEquals("2012", t.get(3));
+    /**
+     *  Create a table with no explicit schema and ensure its correctly
+     *  discovered from the thrift struct.
+     */
+    @Test
+    public void testDynamicCols() throws Exception {
+        Assert.assertEquals(0, driver.run("drop table if exists test_thrift").getResponseCode());
+        Assert.assertEquals(0, driver.run(
+                "create external table test_thrift " +
+                        "partitioned by (year string) " +
+                        "row format serde 'org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer' " +
+                        "with serdeproperties ( " +
+                        "  'serialization.class'='org.apache.hadoop.hive.serde2.thrift.test.IntString', " +
+                        "  'serialization.format'='org.apache.thrift.protocol.TBinaryProtocol') " +
+                        "stored as" +
+                        "  inputformat 'org.apache.hadoop.mapred.SequenceFileInputFormat'" +
+                        "  outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'")
+                .getResponseCode());
+        Assert.assertEquals(0,
+                driver.run("alter table test_thrift add partition (year = '2012') location '" +
+                        intStringSeq.getParent() + "'").getResponseCode());
+
+        PigServer pigServer = new PigServer(ExecType.LOCAL);
+        pigServer.registerQuery("A = load 'test_thrift' using org.apache.hcatalog.pig.HCatLoader();");
+
+        Schema expectedSchema = new Schema();
+        expectedSchema.add(new Schema.FieldSchema("myint", DataType.INTEGER));
+        expectedSchema.add(new Schema.FieldSchema("mystring", DataType.CHARARRAY));
+        expectedSchema.add(new Schema.FieldSchema("underscore_int", DataType.INTEGER));
+        expectedSchema.add(new Schema.FieldSchema("year", DataType.CHARARRAY));
+
+        Assert.assertEquals(expectedSchema, pigServer.dumpSchema("A"));
+
+        Iterator<Tuple> iterator = pigServer.openIterator("A");
+        Tuple t = iterator.next();
+        Assert.assertEquals(1, t.get(0));
+        Assert.assertEquals("one", t.get(1));
+        Assert.assertEquals(1, t.get(2));
+        Assert.assertEquals("2012", t.get(3));
 
-    Assert.assertFalse(iterator.hasNext());
-  }
+        Assert.assertFalse(iterator.hasNext());
+    }
 }

Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatInputFormat.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatInputFormat.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatInputFormat.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatInputFormat.java Mon Sep 10 23:28:55 2012
@@ -41,107 +41,107 @@ import java.io.IOException;
 
 public class TestHCatInputFormat extends HCatBaseTest {
 
-  private boolean setUpComplete = false;
+    private boolean setUpComplete = false;
 
-  /**
-   * Create an input sequence file with 100 records; every 10th record is bad.
-   * Load this table into Hive.
-   */
-  @Before
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    if (setUpComplete) {
-      return;
-    }
+    /**
+     * Create an input sequence file with 100 records; every 10th record is bad.
+     * Load this table into Hive.
+     */
+    @Before
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        if (setUpComplete) {
+            return;
+        }
+
+        Path intStringSeq = new Path(TEST_DATA_DIR + "/data/intString.seq");
+        LOG.info("Creating data file: " + intStringSeq);
+        SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(
+            intStringSeq.getFileSystem(hiveConf), hiveConf, intStringSeq,
+            NullWritable.class, BytesWritable.class);
+
+        ByteArrayOutputStream out = new ByteArrayOutputStream();
+        TIOStreamTransport transport = new TIOStreamTransport(out);
+        TBinaryProtocol protocol = new TBinaryProtocol(transport);
+
+        for (int i = 1; i <= 100; i++) {
+            if (i % 10 == 0) {
+                seqFileWriter.append(NullWritable.get(), new BytesWritable("bad record".getBytes()));
+            } else {
+                out.reset();
+                IntString intString = new IntString(i, Integer.toString(i), i);
+                intString.write(protocol);
+                BytesWritable bytesWritable = new BytesWritable(out.toByteArray());
+                seqFileWriter.append(NullWritable.get(), bytesWritable);
+            }
+        }
+
+        seqFileWriter.close();
+
+        // Now let's load this file into a new Hive table.
+        Assert.assertEquals(0, driver.run("drop table if exists test_bad_records").getResponseCode());
+        Assert.assertEquals(0, driver.run(
+            "create table test_bad_records " +
+                "row format serde 'org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer' " +
+                "with serdeproperties ( " +
+                "  'serialization.class'='org.apache.hadoop.hive.serde2.thrift.test.IntString', " +
+                "  'serialization.format'='org.apache.thrift.protocol.TBinaryProtocol') " +
+                "stored as" +
+                "  inputformat 'org.apache.hadoop.mapred.SequenceFileInputFormat'" +
+                "  outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'")
+            .getResponseCode());
+        Assert.assertEquals(0, driver.run("load data local inpath '" + intStringSeq.getParent() +
+            "' into table test_bad_records").getResponseCode());
 
-    Path intStringSeq = new Path(TEST_DATA_DIR + "/data/intString.seq");
-    LOG.info("Creating data file: " + intStringSeq);
-    SequenceFile.Writer seqFileWriter = SequenceFile.createWriter(
-        intStringSeq.getFileSystem(hiveConf), hiveConf, intStringSeq,
-        NullWritable.class, BytesWritable.class);
-
-    ByteArrayOutputStream out = new ByteArrayOutputStream();
-    TIOStreamTransport transport = new TIOStreamTransport(out);
-    TBinaryProtocol protocol = new TBinaryProtocol(transport);
-
-    for (int i = 1; i <= 100; i++) {
-      if (i % 10 == 0) {
-        seqFileWriter.append(NullWritable.get(), new BytesWritable("bad record".getBytes()));
-      } else {
-        out.reset();
-        IntString intString = new IntString(i, Integer.toString(i), i);
-        intString.write(protocol);
-        BytesWritable bytesWritable = new BytesWritable(out.toByteArray());
-        seqFileWriter.append(NullWritable.get(), bytesWritable);
-      }
+        setUpComplete = true;
     }
 
-    seqFileWriter.close();
+    @Test
+    public void testBadRecordHandlingPasses() throws Exception {
+        Assert.assertTrue(runJob(0.1f));
+    }
 
-    // Now let's load this file into a new Hive table.
-    Assert.assertEquals(0, driver.run("drop table if exists test_bad_records").getResponseCode());
-    Assert.assertEquals(0, driver.run(
-        "create table test_bad_records " +
-            "row format serde 'org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer' " +
-            "with serdeproperties ( " +
-            "  'serialization.class'='org.apache.hadoop.hive.serde2.thrift.test.IntString', " +
-            "  'serialization.format'='org.apache.thrift.protocol.TBinaryProtocol') " +
-            "stored as" +
-            "  inputformat 'org.apache.hadoop.mapred.SequenceFileInputFormat'" +
-            "  outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'")
-        .getResponseCode());
-    Assert.assertEquals(0, driver.run("load data local inpath '" + intStringSeq.getParent() +
-        "' into table test_bad_records").getResponseCode());
-
-    setUpComplete = true;
-  }
-
-  @Test
-  public void testBadRecordHandlingPasses() throws Exception {
-    Assert.assertTrue(runJob(0.1f));
-  }
-
-  @Test
-  public void testBadRecordHandlingFails() throws Exception {
-    Assert.assertFalse(runJob(0.01f));
-  }
-
-  private boolean runJob(float badRecordThreshold) throws Exception {
-    Configuration conf = new Configuration();
-
-    conf.setFloat(HCatConstants.HCAT_INPUT_BAD_RECORD_THRESHOLD_KEY, badRecordThreshold);
-
-    Job job = new Job(conf);
-    job.setJarByClass(this.getClass());
-    job.setMapperClass(MyMapper.class);
-
-    job.setInputFormatClass(HCatInputFormat.class);
-    job.setOutputFormatClass(TextOutputFormat.class);
-
-    HCatInputFormat.setInput(job, InputJobInfo.create("default", "test_bad_records", null));
-
-    job.setMapOutputKeyClass(HCatRecord.class);
-    job.setMapOutputValueClass(HCatRecord.class);
-
-    job.setNumReduceTasks(0);
-
-    Path path = new Path(TEST_DATA_DIR, "test_bad_record_handling_output");
-    if (path.getFileSystem(conf).exists(path)) {
-      path.getFileSystem(conf).delete(path, true);
+    @Test
+    public void testBadRecordHandlingFails() throws Exception {
+        Assert.assertFalse(runJob(0.01f));
     }
 
-    TextOutputFormat.setOutputPath(job, path);
+    private boolean runJob(float badRecordThreshold) throws Exception {
+        Configuration conf = new Configuration();
 
-    return job.waitForCompletion(true);
-  }
+        conf.setFloat(HCatConstants.HCAT_INPUT_BAD_RECORD_THRESHOLD_KEY, badRecordThreshold);
 
-  public static class MyMapper extends Mapper<NullWritable, HCatRecord, NullWritable, Text> {
-    @Override
-    public void map(NullWritable key, HCatRecord value, Context context)
-        throws IOException, InterruptedException {
-      LOG.info("HCatRecord: " + value);
-      context.write(NullWritable.get(), new Text(value.toString()));
+        Job job = new Job(conf);
+        job.setJarByClass(this.getClass());
+        job.setMapperClass(MyMapper.class);
+
+        job.setInputFormatClass(HCatInputFormat.class);
+        job.setOutputFormatClass(TextOutputFormat.class);
+
+        HCatInputFormat.setInput(job, InputJobInfo.create("default", "test_bad_records", null));
+
+        job.setMapOutputKeyClass(HCatRecord.class);
+        job.setMapOutputValueClass(HCatRecord.class);
+
+        job.setNumReduceTasks(0);
+
+        Path path = new Path(TEST_DATA_DIR, "test_bad_record_handling_output");
+        if (path.getFileSystem(conf).exists(path)) {
+            path.getFileSystem(conf).delete(path, true);
+        }
+
+        TextOutputFormat.setOutputPath(job, path);
+
+        return job.waitForCompletion(true);
+    }
+
+    public static class MyMapper extends Mapper<NullWritable, HCatRecord, NullWritable, Text> {
+        @Override
+        public void map(NullWritable key, HCatRecord value, Context context)
+            throws IOException, InterruptedException {
+            LOG.info("HCatRecord: " + value);
+            context.write(NullWritable.get(), new Text(value.toString()));
+        }
     }
-  }
 }

Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatMultiOutputFormat.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatMultiOutputFormat.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatMultiOutputFormat.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatMultiOutputFormat.java Mon Sep 10 23:28:55 2012
@@ -100,8 +100,8 @@ public class TestHCatMultiOutputFormat {
         public void run() {
             try {
                 String warehouseConf = HiveConf.ConfVars.METASTOREWAREHOUSE.varname + "="
-                        + warehousedir.toString();
-                HiveMetaStore.main(new String[] {"-v", "-p", msPort, "--hiveconf", warehouseConf});
+                    + warehousedir.toString();
+                HiveMetaStore.main(new String[]{"-v", "-p", msPort, "--hiveconf", warehouseConf});
             } catch (Throwable t) {
                 System.err.println("Exiting. Got exception from metastore: " + t.getMessage());
             }
@@ -177,7 +177,7 @@ public class TestHCatMultiOutputFormat {
         FileSystem fs = FileSystem.get(conf);
         System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
         mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null,
-                new JobConf(conf));
+            new JobConf(conf));
         mrConf = mrCluster.createJobConf();
         fs.mkdirs(warehousedir);
 
@@ -192,7 +192,7 @@ public class TestHCatMultiOutputFormat {
         hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTRETRIES, 3);
 
         hiveConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
-                HCatSemanticAnalyzer.class.getName());
+            HCatSemanticAnalyzer.class.getName());
         hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
         hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
         hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
@@ -239,9 +239,9 @@ public class TestHCatMultiOutputFormat {
         sd.setInputFormat(org.apache.hadoop.hive.ql.io.RCFileInputFormat.class.getName());
         sd.setOutputFormat(org.apache.hadoop.hive.ql.io.RCFileOutputFormat.class.getName());
         sd.getSerdeInfo().getParameters().put(
-                org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
+            org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
         sd.getSerdeInfo().setSerializationLib(
-                org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.class.getName());
+            org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.class.getName());
         tbl.setPartitionKeys(ColumnHolder.partitionCols);
 
         hmsc.createTable(tbl);
@@ -291,10 +291,10 @@ public class TestHCatMultiOutputFormat {
 
         for (int i = 0; i < tableNames.length; i++) {
             configurer.addOutputFormat(tableNames[i], HCatOutputFormat.class, BytesWritable.class,
-                    HCatRecord.class);
+                HCatRecord.class);
             HCatOutputFormat.setOutput(configurer.getJob(tableNames[i]), infoList.get(i));
             HCatOutputFormat.setSchema(configurer.getJob(tableNames[i]),
-                    schemaMap.get(tableNames[i]));
+                schemaMap.get(tableNames[i]));
         }
         configurer.configure();
 
@@ -307,26 +307,26 @@ public class TestHCatMultiOutputFormat {
             outputs.add(getTableData(tbl, "default").get(0));
         }
         Assert.assertEquals("Comparing output of table " +
-                tableNames[0] + " is not correct", outputs.get(0), "a,a,1,ag");
+            tableNames[0] + " is not correct", outputs.get(0), "a,a,1,ag");
         Assert.assertEquals("Comparing output of table " +
-                tableNames[1] + " is not correct", outputs.get(1), "a,1,ag");
+            tableNames[1] + " is not correct", outputs.get(1), "a,1,ag");
         Assert.assertEquals("Comparing output of table " +
-                tableNames[2] + " is not correct", outputs.get(2), "a,a,extra,1,ag");
+            tableNames[2] + " is not correct", outputs.get(2), "a,a,extra,1,ag");
 
         // Check permisssion on partition dirs and files created
         for (int i = 0; i < tableNames.length; i++) {
             Path partitionFile = new Path(warehousedir + "/" + tableNames[i]
-                    + "/ds=1/cluster=ag/part-m-00000");
+                + "/ds=1/cluster=ag/part-m-00000");
             FileSystem fs = partitionFile.getFileSystem(mrConf);
             Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct",
-                    fs.getFileStatus(partitionFile).getPermission(),
-                    new FsPermission(tablePerms[i]));
+                fs.getFileStatus(partitionFile).getPermission(),
+                new FsPermission(tablePerms[i]));
             Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct",
-                    fs.getFileStatus(partitionFile.getParent()).getPermission(),
-                    new FsPermission(tablePerms[i]));
+                fs.getFileStatus(partitionFile.getParent()).getPermission(),
+                new FsPermission(tablePerms[i]));
             Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct",
-                    fs.getFileStatus(partitionFile.getParent().getParent()).getPermission(),
-                    new FsPermission(tablePerms[i]));
+                fs.getFileStatus(partitionFile.getParent().getParent()).getPermission(),
+                new FsPermission(tablePerms[i]));
 
         }
         LOG.info("File permissions verified");
@@ -392,13 +392,13 @@ public class TestHCatMultiOutputFormat {
     }
 
     private static class MyMapper extends
-            Mapper<LongWritable, Text, BytesWritable, HCatRecord> {
+        Mapper<LongWritable, Text, BytesWritable, HCatRecord> {
 
         private int i = 0;
 
         @Override
         protected void map(LongWritable key, Text value, Context context)
-                throws IOException, InterruptedException {
+            throws IOException, InterruptedException {
             HCatRecord record = null;
             String[] splits = value.toString().split(",");
             switch (i) {

Modified: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatNonPartitioned.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatNonPartitioned.java?rev=1383152&r1=1383151&r2=1383152&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatNonPartitioned.java (original)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/mapreduce/TestHCatNonPartitioned.java Mon Sep 10 23:28:55 2012
@@ -35,96 +35,96 @@ import org.apache.hcatalog.data.schema.H
 
 public class TestHCatNonPartitioned extends HCatMapReduceTest {
 
-  private List<HCatRecord> writeRecords;
-  List<HCatFieldSchema> partitionColumns;
+    private List<HCatRecord> writeRecords;
+    List<HCatFieldSchema> partitionColumns;
 
-  @Override
-  protected void initialize() throws HCatException {
+    @Override
+    protected void initialize() throws HCatException {
 
-    dbName = null; //test if null dbName works ("default" is used)
-    tableName = "testHCatNonPartitionedTable";
+        dbName = null; //test if null dbName works ("default" is used)
+        tableName = "testHCatNonPartitionedTable";
 
-    writeRecords = new ArrayList<HCatRecord>();
+        writeRecords = new ArrayList<HCatRecord>();
 
-    for(int i = 0;i < 20;i++) {
-      List<Object> objList = new ArrayList<Object>();
+        for (int i = 0; i < 20; i++) {
+            List<Object> objList = new ArrayList<Object>();
 
-      objList.add(i);
-      objList.add("strvalue" + i);
-      writeRecords.add(new DefaultHCatRecord(objList));
+            objList.add(i);
+            objList.add("strvalue" + i);
+            writeRecords.add(new DefaultHCatRecord(objList));
+        }
+
+        partitionColumns = new ArrayList<HCatFieldSchema>();
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
+        partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
     }
 
-    partitionColumns = new ArrayList<HCatFieldSchema>();
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", Constants.INT_TYPE_NAME, "")));
-    partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", Constants.STRING_TYPE_NAME, "")));
-  }
-
-  @Override
-  protected List<FieldSchema> getPartitionKeys() {
-    List<FieldSchema> fields = new ArrayList<FieldSchema>();
-    //empty list, non partitioned
-    return fields;
-  }
-
-  @Override
-  protected List<FieldSchema> getTableColumns() {
-    List<FieldSchema> fields = new ArrayList<FieldSchema>();
-    fields.add(new FieldSchema("c1", Constants.INT_TYPE_NAME, ""));
-    fields.add(new FieldSchema("c2", Constants.STRING_TYPE_NAME, ""));
-    return fields;
-  }
-
-
-  public void testHCatNonPartitionedTable() throws Exception {
-
-    Map<String, String> partitionMap = new HashMap<String, String>();
-    runMRCreate(null, partitionColumns, writeRecords, 10,true);
-
-    //Test for duplicate publish
-    IOException exc = null;
-    try {
-      runMRCreate(null,  partitionColumns, writeRecords, 20,true);
-    } catch(IOException e) {
-      exc = e;
+    @Override
+    protected List<FieldSchema> getPartitionKeys() {
+        List<FieldSchema> fields = new ArrayList<FieldSchema>();
+        //empty list, non partitioned
+        return fields;
     }
 
-    assertTrue(exc != null);
-    assertTrue(exc instanceof HCatException);
-    assertEquals(ErrorType.ERROR_NON_EMPTY_TABLE, ((HCatException) exc).getErrorType());
-
-    //Test for publish with invalid partition key name
-    exc = null;
-    partitionMap.clear();
-    partitionMap.put("px", "p1value2");
-
-    try {
-      runMRCreate(partitionMap, partitionColumns, writeRecords, 20,true);
-    } catch(IOException e) {
-      exc = e;
+    @Override
+    protected List<FieldSchema> getTableColumns() {
+        List<FieldSchema> fields = new ArrayList<FieldSchema>();
+        fields.add(new FieldSchema("c1", Constants.INT_TYPE_NAME, ""));
+        fields.add(new FieldSchema("c2", Constants.STRING_TYPE_NAME, ""));
+        return fields;
     }
 
-    assertTrue(exc != null);
-    assertTrue(exc instanceof HCatException);
-    assertEquals(ErrorType.ERROR_INVALID_PARTITION_VALUES, ((HCatException) exc).getErrorType());
 
-    //Read should get 10 rows
-    runMRRead(10);
+    public void testHCatNonPartitionedTable() throws Exception {
 
-    hiveReadTest();
-  }
+        Map<String, String> partitionMap = new HashMap<String, String>();
+        runMRCreate(null, partitionColumns, writeRecords, 10, true);
 
-  //Test that data inserted through hcatoutputformat is readable from hive
-  private void hiveReadTest() throws Exception {
+        //Test for duplicate publish
+        IOException exc = null;
+        try {
+            runMRCreate(null, partitionColumns, writeRecords, 20, true);
+        } catch (IOException e) {
+            exc = e;
+        }
+
+        assertTrue(exc != null);
+        assertTrue(exc instanceof HCatException);
+        assertEquals(ErrorType.ERROR_NON_EMPTY_TABLE, ((HCatException) exc).getErrorType());
+
+        //Test for publish with invalid partition key name
+        exc = null;
+        partitionMap.clear();
+        partitionMap.put("px", "p1value2");
+
+        try {
+            runMRCreate(partitionMap, partitionColumns, writeRecords, 20, true);
+        } catch (IOException e) {
+            exc = e;
+        }
+
+        assertTrue(exc != null);
+        assertTrue(exc instanceof HCatException);
+        assertEquals(ErrorType.ERROR_INVALID_PARTITION_VALUES, ((HCatException) exc).getErrorType());
 
-    String query = "select * from " + tableName;
-    int retCode = driver.run(query).getResponseCode();
+        //Read should get 10 rows
+        runMRRead(10);
 
-    if( retCode != 0 ) {
-      throw new Exception("Error " + retCode + " running query " + query);
+        hiveReadTest();
     }
 
-    ArrayList<String> res = new ArrayList<String>();
-    driver.getResults(res);
-    assertEquals(10, res.size());
-  }
+    //Test that data inserted through hcatoutputformat is readable from hive
+    private void hiveReadTest() throws Exception {
+
+        String query = "select * from " + tableName;
+        int retCode = driver.run(query).getResponseCode();
+
+        if (retCode != 0) {
+            throw new Exception("Error " + retCode + " running query " + query);
+        }
+
+        ArrayList<String> res = new ArrayList<String>();
+        driver.getResults(res);
+        assertEquals(10, res.size());
+    }
 }



Mime
View raw message