hadoop-hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From zs...@apache.org
Subject svn commit: r901581 [10/10] - in /hadoop/hive/trunk: ./ cli/src/java/org/apache/hadoop/hive/cli/ common/src/java/org/apache/hadoop/hive/common/ common/src/java/org/apache/hadoop/hive/common/io/ contrib/src/java/org/apache/hadoop/hive/contrib/fileformat...
Date Thu, 21 Jan 2010 07:31:27 GMT
Modified: hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java?rev=901581&r1=901580&r2=901581&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (original)
+++ hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java Thu Jan 21 07:29:29 2010
@@ -27,7 +27,6 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.serde.Constants;
 import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -39,6 +38,7 @@
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.serde.Constants;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.thrift.TException;
 
@@ -49,13 +49,13 @@
   protected void setUp() throws Exception {
     super.setUp();
     hiveConf = new HiveConf(this.getClass());
-    
+
     // set some values to use for getting conf. vars
     hiveConf.set("hive.key1", "value1");
     hiveConf.set("hive.key2", "http://www.example.com");
     hiveConf.set("hive.key3", "");
     hiveConf.set("hive.key4", "0");
-    
+
     try {
       client = new HiveMetaStoreClient(hiveConf);
     } catch (Throwable e) {
@@ -77,129 +77,137 @@
   }
 
   /**
-   * tests create table and partition and tries to drop the table without droppping the partition
-   * @throws Exception 
+   * tests create table and partition and tries to drop the table without
+   * droppping the partition
+   * 
+   * @throws Exception
    */
   public void testPartition() throws Exception {
     try {
-    String dbName = "compdb";
-    String tblName = "comptbl";
-    String typeName = "Person";
-    List<String> vals = new ArrayList<String>(2);
-    vals.add("2008-07-01");
-    vals.add("14");
-  
-    client.dropTable(dbName, tblName);
-    client.dropDatabase(dbName);
-    boolean ret = client.createDatabase(dbName, "strange_loc");
-    assertTrue("Unable to create the databse " + dbName, ret);
-  
-    client.dropType(typeName);
-    Type typ1 = new Type();
-    typ1.setName(typeName);
-    typ1.setFields(new ArrayList<FieldSchema>(2));
-    typ1.getFields().add(new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
-    typ1.getFields().add(new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
-    ret = client.createType(typ1);
-    assertTrue("Unable to create type " + typeName, ret);
-  
-    Table tbl = new Table();
-    tbl.setDbName(dbName);
-    tbl.setTableName(tblName);
-    StorageDescriptor sd = new StorageDescriptor(); 
-    tbl.setSd(sd);
-    sd.setCols(typ1.getFields());
-    sd.setCompressed(false);
-    sd.setNumBuckets(1);
-    sd.setParameters(new HashMap<String, String>());
-    sd.getParameters().put("test_param_1", "Use this for comments etc");
-    sd.setBucketCols(new ArrayList<String>(2));
-    sd.getBucketCols().add("name");
-    sd.setSerdeInfo(new SerDeInfo());
-    sd.getSerdeInfo().setName(tbl.getTableName());
-    sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-    sd.getSerdeInfo().getParameters().put(Constants.SERIALIZATION_FORMAT, "1");
-    sd.setSortCols(new ArrayList<Order>());
-  
-    tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
-    tbl.getPartitionKeys().add(new FieldSchema("ds", Constants.STRING_TYPE_NAME, ""));
-    tbl.getPartitionKeys().add(new FieldSchema("hr", Constants.INT_TYPE_NAME, ""));
-  
-    client.createTable(tbl);
-  
-    Partition part = new Partition();
-    part.setDbName(dbName);
-    part.setTableName(tblName);
-    part.setValues(vals);
-    part.setParameters(new HashMap<String, String>());
-    part.setSd(tbl.getSd());
-    part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
-    part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
-  
-    Partition retp = client.add_partition(part);
-    assertNotNull("Unable to create partition " + part, retp);
-  
-    Partition part2 = client.getPartition(dbName, tblName, part.getValues());
-    assertTrue("Partitions are not same",part.equals(part2));
-  
-    FileSystem fs = FileSystem.get(this.hiveConf);
-    Path partPath = new Path(part2.getSd().getLocation());
-    
-    assertTrue(fs.exists(partPath));
-    ret = client.dropPartition(dbName, tblName, part.getValues(), true);
-    assertTrue(ret);
-    assertFalse(fs.exists(partPath));
-  
-    // add the partition again so that drop table with a partition can be tested
-    retp = client.add_partition(part);
-    assertNotNull("Unable to create partition " + part, ret);
-  
-    client.dropTable(dbName, tblName);
-  
-    ret = client.dropType(typeName);
-    assertTrue("Unable to drop type " + typeName, ret);
-
-    //recreate table as external, drop partition and it should
-    //still exist
-    tbl.setParameters(new HashMap<String, String>());
-    tbl.getParameters().put("EXTERNAL", "TRUE");
-    client.createTable(tbl);
-    retp = client.add_partition(part);
-    assertTrue(fs.exists(partPath));
-    client.dropPartition(dbName, tblName, part.getValues(), true);
-    assertTrue(fs.exists(partPath));
-    
-    ret = client.dropDatabase(dbName);
-    assertTrue("Unable to create the databse " + dbName, ret);
+      String dbName = "compdb";
+      String tblName = "comptbl";
+      String typeName = "Person";
+      List<String> vals = new ArrayList<String>(2);
+      vals.add("2008-07-01");
+      vals.add("14");
+
+      client.dropTable(dbName, tblName);
+      client.dropDatabase(dbName);
+      boolean ret = client.createDatabase(dbName, "strange_loc");
+      assertTrue("Unable to create the databse " + dbName, ret);
+
+      client.dropType(typeName);
+      Type typ1 = new Type();
+      typ1.setName(typeName);
+      typ1.setFields(new ArrayList<FieldSchema>(2));
+      typ1.getFields().add(
+          new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
+      typ1.getFields().add(
+          new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
+      ret = client.createType(typ1);
+      assertTrue("Unable to create type " + typeName, ret);
+
+      Table tbl = new Table();
+      tbl.setDbName(dbName);
+      tbl.setTableName(tblName);
+      StorageDescriptor sd = new StorageDescriptor();
+      tbl.setSd(sd);
+      sd.setCols(typ1.getFields());
+      sd.setCompressed(false);
+      sd.setNumBuckets(1);
+      sd.setParameters(new HashMap<String, String>());
+      sd.getParameters().put("test_param_1", "Use this for comments etc");
+      sd.setBucketCols(new ArrayList<String>(2));
+      sd.getBucketCols().add("name");
+      sd.setSerdeInfo(new SerDeInfo());
+      sd.getSerdeInfo().setName(tbl.getTableName());
+      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+      sd.getSerdeInfo().getParameters()
+          .put(Constants.SERIALIZATION_FORMAT, "1");
+      sd.setSortCols(new ArrayList<Order>());
+
+      tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
+      tbl.getPartitionKeys().add(
+          new FieldSchema("ds", Constants.STRING_TYPE_NAME, ""));
+      tbl.getPartitionKeys().add(
+          new FieldSchema("hr", Constants.INT_TYPE_NAME, ""));
+
+      client.createTable(tbl);
+
+      Partition part = new Partition();
+      part.setDbName(dbName);
+      part.setTableName(tblName);
+      part.setValues(vals);
+      part.setParameters(new HashMap<String, String>());
+      part.setSd(tbl.getSd());
+      part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
+      part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
+
+      Partition retp = client.add_partition(part);
+      assertNotNull("Unable to create partition " + part, retp);
+
+      Partition part2 = client.getPartition(dbName, tblName, part.getValues());
+      assertTrue("Partitions are not same", part.equals(part2));
+
+      FileSystem fs = FileSystem.get(hiveConf);
+      Path partPath = new Path(part2.getSd().getLocation());
+
+      assertTrue(fs.exists(partPath));
+      ret = client.dropPartition(dbName, tblName, part.getValues(), true);
+      assertTrue(ret);
+      assertFalse(fs.exists(partPath));
+
+      // add the partition again so that drop table with a partition can be
+      // tested
+      retp = client.add_partition(part);
+      assertNotNull("Unable to create partition " + part, ret);
+
+      client.dropTable(dbName, tblName);
+
+      ret = client.dropType(typeName);
+      assertTrue("Unable to drop type " + typeName, ret);
+
+      // recreate table as external, drop partition and it should
+      // still exist
+      tbl.setParameters(new HashMap<String, String>());
+      tbl.getParameters().put("EXTERNAL", "TRUE");
+      client.createTable(tbl);
+      retp = client.add_partition(part);
+      assertTrue(fs.exists(partPath));
+      client.dropPartition(dbName, tblName, part.getValues(), true);
+      assertTrue(fs.exists(partPath));
+
+      ret = client.dropDatabase(dbName);
+      assertTrue("Unable to create the databse " + dbName, ret);
     } catch (Exception e) {
       System.err.println(StringUtils.stringifyException(e));
       System.err.println("testPartition() failed.");
       throw e;
     }
   }
-  
+
   public void testAlterPartition() throws Throwable {
-    
+
     try {
       String dbName = "compdb";
       String tblName = "comptbl";
       List<String> vals = new ArrayList<String>(2);
       vals.add("2008-07-01");
       vals.add("14");
-    
+
       client.dropTable(dbName, tblName);
       client.dropDatabase(dbName);
       boolean ret = client.createDatabase(dbName, "strange_loc");
       assertTrue("Unable to create the databse " + dbName, ret);
-    
+
       ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
       cols.add(new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
       cols.add(new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
-    
+
       Table tbl = new Table();
       tbl.setDbName(dbName);
       tbl.setTableName(tblName);
-      StorageDescriptor sd = new StorageDescriptor(); 
+      StorageDescriptor sd = new StorageDescriptor();
       tbl.setSd(sd);
       sd.setCols(cols);
       sd.setCompressed(false);
@@ -211,15 +219,18 @@
       sd.setSerdeInfo(new SerDeInfo());
       sd.getSerdeInfo().setName(tbl.getTableName());
       sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-      sd.getSerdeInfo().getParameters().put(Constants.SERIALIZATION_FORMAT, "1");
+      sd.getSerdeInfo().getParameters()
+          .put(Constants.SERIALIZATION_FORMAT, "1");
       sd.setSortCols(new ArrayList<Order>());
-    
+
       tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
-      tbl.getPartitionKeys().add(new FieldSchema("ds", Constants.STRING_TYPE_NAME, ""));
-      tbl.getPartitionKeys().add(new FieldSchema("hr", Constants.INT_TYPE_NAME, ""));
-    
+      tbl.getPartitionKeys().add(
+          new FieldSchema("ds", Constants.STRING_TYPE_NAME, ""));
+      tbl.getPartitionKeys().add(
+          new FieldSchema("hr", Constants.INT_TYPE_NAME, ""));
+
       client.createTable(tbl);
-    
+
       Partition part = new Partition();
       part.setDbName(dbName);
       part.setTableName(tblName);
@@ -228,271 +239,294 @@
       part.setSd(tbl.getSd());
       part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
       part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
-    
+
       client.add_partition(part);
-    
+
       Partition part2 = client.getPartition(dbName, tblName, part.getValues());
-      
+
       part2.getParameters().put("retention", "10");
       part2.getSd().setNumBuckets(12);
       part2.getSd().getSerdeInfo().getParameters().put("abc", "1");
       client.alter_partition(dbName, tblName, part2);
-    
+
       Partition part3 = client.getPartition(dbName, tblName, part.getValues());
-      assertEquals("couldn't alter partition", part3.getParameters().get("retention"), "10");
-      assertEquals("couldn't alter partition", part3.getSd().getSerdeInfo().getParameters().get("abc"), "1");
-      assertEquals("couldn't alter partition", part3.getSd().getNumBuckets(), 12);
-      
+      assertEquals("couldn't alter partition", part3.getParameters().get(
+          "retention"), "10");
+      assertEquals("couldn't alter partition", part3.getSd().getSerdeInfo()
+          .getParameters().get("abc"), "1");
+      assertEquals("couldn't alter partition", part3.getSd().getNumBuckets(),
+          12);
+
       client.dropTable(dbName, tblName);
-      
+
       ret = client.dropDatabase(dbName);
       assertTrue("Unable to create the databse " + dbName, ret);
-      } catch (Exception e) {
-        System.err.println(StringUtils.stringifyException(e));
-        System.err.println("testPartition() failed.");
-        throw e;
-      }
+    } catch (Exception e) {
+      System.err.println(StringUtils.stringifyException(e));
+      System.err.println("testPartition() failed.");
+      throw e;
+    }
   }
 
   public void testDatabase() throws Throwable {
     try {
-    // clear up any existing databases
-    client.dropDatabase("test1");
-    client.dropDatabase("test2");
-    
-    boolean ret = client.createDatabase("test1", "strange_loc");
-    assertTrue("Unable to create the databse", ret);
-
-    Database db = client.getDatabase("test1");
-    
-    assertEquals("name of returned db is different from that of inserted db", "test1", db.getName());
-    assertEquals("location of the returned db is different from that of inserted db", "strange_loc", db.getDescription());
-    
-    boolean ret2 = client.createDatabase("test2", "another_strange_loc");
-    assertTrue("Unable to create the databse", ret2);
-
-    Database db2 = client.getDatabase("test2");
-    
-    assertEquals("name of returned db is different from that of inserted db", "test2", db2.getName());
-    assertEquals("location of the returned db is different from that of inserted db", "another_strange_loc", db2.getDescription());
-    
-    List<String> dbs = client.getDatabases();
-    
-    assertTrue("first database is not test1", dbs.contains("test1"));
-    assertTrue("second database is not test2", dbs.contains("test2"));
-    
-    ret = client.dropDatabase("test1");
-    assertTrue("couldn't delete first database", ret);
-    ret = client.dropDatabase("test2");
-    assertTrue("couldn't delete second database", ret);
+      // clear up any existing databases
+      client.dropDatabase("test1");
+      client.dropDatabase("test2");
+
+      boolean ret = client.createDatabase("test1", "strange_loc");
+      assertTrue("Unable to create the databse", ret);
+
+      Database db = client.getDatabase("test1");
+
+      assertEquals("name of returned db is different from that of inserted db",
+          "test1", db.getName());
+      assertEquals(
+          "location of the returned db is different from that of inserted db",
+          "strange_loc", db.getDescription());
+
+      boolean ret2 = client.createDatabase("test2", "another_strange_loc");
+      assertTrue("Unable to create the databse", ret2);
+
+      Database db2 = client.getDatabase("test2");
+
+      assertEquals("name of returned db is different from that of inserted db",
+          "test2", db2.getName());
+      assertEquals(
+          "location of the returned db is different from that of inserted db",
+          "another_strange_loc", db2.getDescription());
+
+      List<String> dbs = client.getDatabases();
+
+      assertTrue("first database is not test1", dbs.contains("test1"));
+      assertTrue("second database is not test2", dbs.contains("test2"));
+
+      ret = client.dropDatabase("test1");
+      assertTrue("couldn't delete first database", ret);
+      ret = client.dropDatabase("test2");
+      assertTrue("couldn't delete second database", ret);
     } catch (Throwable e) {
       System.err.println(StringUtils.stringifyException(e));
       System.err.println("testDatabase() failed.");
       throw e;
     }
   }
-  
+
   public void testSimpleTypeApi() throws Exception {
     try {
-    client.dropType(Constants.INT_TYPE_NAME);
-    
-    Type typ1 = new Type();
-    typ1.setName(Constants.INT_TYPE_NAME);
-    boolean ret = client.createType(typ1);
-    assertTrue("Unable to create type", ret);
-    
-    Type typ1_2 = client.getType(Constants.INT_TYPE_NAME);
-    assertNotNull(typ1_2);
-    assertEquals(typ1.getName(), typ1_2.getName());
-    
-    ret = client.dropType(Constants.INT_TYPE_NAME);
-    assertTrue("unable to drop type integer", ret);
-    
-    Type typ1_3 = null;
-    typ1_3 = client.getType(Constants.INT_TYPE_NAME);
-    assertNull("unable to drop type integer",typ1_3);
+      client.dropType(Constants.INT_TYPE_NAME);
+
+      Type typ1 = new Type();
+      typ1.setName(Constants.INT_TYPE_NAME);
+      boolean ret = client.createType(typ1);
+      assertTrue("Unable to create type", ret);
+
+      Type typ1_2 = client.getType(Constants.INT_TYPE_NAME);
+      assertNotNull(typ1_2);
+      assertEquals(typ1.getName(), typ1_2.getName());
+
+      ret = client.dropType(Constants.INT_TYPE_NAME);
+      assertTrue("unable to drop type integer", ret);
+
+      Type typ1_3 = null;
+      typ1_3 = client.getType(Constants.INT_TYPE_NAME);
+      assertNull("unable to drop type integer", typ1_3);
     } catch (Exception e) {
       System.err.println(StringUtils.stringifyException(e));
       System.err.println("testSimpleTypeApi() failed.");
       throw e;
     }
-}
-  
+  }
+
   // TODO:pc need to enhance this with complex fields and getType_all function
   public void testComplexTypeApi() throws Exception {
     try {
-    client.dropType("Person");
-    
-    Type typ1 = new Type();
-    typ1.setName("Person");
-    typ1.setFields(new ArrayList<FieldSchema>(2));
-    typ1.getFields().add(new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
-    typ1.getFields().add(new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
-    boolean ret = client.createType(typ1);
-    assertTrue("Unable to create type", ret);
-    
-    Type typ1_2 = client.getType("Person");
-    assertNotNull("type Person not found", typ1_2);
-    assertEquals(typ1.getName(), typ1_2.getName());
-    assertEquals(typ1.getFields().size(), typ1_2.getFields().size());
-    assertEquals(typ1.getFields().get(0), typ1_2.getFields().get(0));
-    assertEquals(typ1.getFields().get(1), typ1_2.getFields().get(1));
-    
-    client.dropType("Family");
-    
-    Type fam = new Type();
-    fam.setName("Family");
-    fam.setFields(new ArrayList<FieldSchema>(2));
-    fam.getFields().add(new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
-    fam.getFields().add(new FieldSchema("members", MetaStoreUtils.getListType(typ1.getName()), ""));
-    
-    ret = client.createType(fam);
-    assertTrue("Unable to create type " + fam.getName(), ret);
-    
-    Type fam2 = client.getType("Family");
-    assertNotNull("type Person not found", fam2);
-    assertEquals(fam.getName(), fam2.getName());
-    assertEquals(fam.getFields().size(), fam2.getFields().size());
-    assertEquals(fam.getFields().get(0), fam2.getFields().get(0));
-    assertEquals(fam.getFields().get(1), fam2.getFields().get(1));
-    
-    ret = client.dropType("Family");
-    assertTrue("unable to drop type Family", ret);
-    
-    ret = client.dropType("Person");
-    assertTrue("unable to drop type Person", ret);
-    
-    Type typ1_3 = null;
-    typ1_3 = client.getType("Person");
-    assertNull("unable to drop type Person",typ1_3);
+      client.dropType("Person");
+
+      Type typ1 = new Type();
+      typ1.setName("Person");
+      typ1.setFields(new ArrayList<FieldSchema>(2));
+      typ1.getFields().add(
+          new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
+      typ1.getFields().add(
+          new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
+      boolean ret = client.createType(typ1);
+      assertTrue("Unable to create type", ret);
+
+      Type typ1_2 = client.getType("Person");
+      assertNotNull("type Person not found", typ1_2);
+      assertEquals(typ1.getName(), typ1_2.getName());
+      assertEquals(typ1.getFields().size(), typ1_2.getFields().size());
+      assertEquals(typ1.getFields().get(0), typ1_2.getFields().get(0));
+      assertEquals(typ1.getFields().get(1), typ1_2.getFields().get(1));
+
+      client.dropType("Family");
+
+      Type fam = new Type();
+      fam.setName("Family");
+      fam.setFields(new ArrayList<FieldSchema>(2));
+      fam.getFields().add(
+          new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
+      fam.getFields().add(
+          new FieldSchema("members",
+              MetaStoreUtils.getListType(typ1.getName()), ""));
+
+      ret = client.createType(fam);
+      assertTrue("Unable to create type " + fam.getName(), ret);
+
+      Type fam2 = client.getType("Family");
+      assertNotNull("type Person not found", fam2);
+      assertEquals(fam.getName(), fam2.getName());
+      assertEquals(fam.getFields().size(), fam2.getFields().size());
+      assertEquals(fam.getFields().get(0), fam2.getFields().get(0));
+      assertEquals(fam.getFields().get(1), fam2.getFields().get(1));
+
+      ret = client.dropType("Family");
+      assertTrue("unable to drop type Family", ret);
+
+      ret = client.dropType("Person");
+      assertTrue("unable to drop type Person", ret);
+
+      Type typ1_3 = null;
+      typ1_3 = client.getType("Person");
+      assertNull("unable to drop type Person", typ1_3);
     } catch (Exception e) {
       System.err.println(StringUtils.stringifyException(e));
       System.err.println("testComplexTypeApi() failed.");
       throw e;
     }
   }
-  
+
   public void testSimpleTable() throws Exception {
     try {
-    String dbName = "simpdb";
-    String tblName = "simptbl";
-    String tblName2 = "simptbl2";
-    String typeName = "Person";
-  
-    client.dropTable(dbName, tblName);
-    client.dropDatabase(dbName);
-    boolean ret = client.createDatabase(dbName, "strange_loc");
-    assertTrue("Unable to create the databse " + dbName, ret);
-    
-    client.dropType(typeName);
-    Type typ1 = new Type();
-    typ1.setName(typeName);
-    typ1.setFields(new ArrayList<FieldSchema>(2));
-    typ1.getFields().add(new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
-    typ1.getFields().add(new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
-    ret = client.createType(typ1);
-    assertTrue("Unable to create type " + typeName, ret);
-    
-    Table tbl = new Table();
-    tbl.setDbName(dbName);
-    tbl.setTableName(tblName);
-    StorageDescriptor sd = new StorageDescriptor();
-    tbl.setSd(sd);
-    sd.setCols(typ1.getFields());
-    sd.setCompressed(false);
-    sd.setNumBuckets(1);
-    sd.setParameters(new HashMap<String, String>());
-    sd.getParameters().put("test_param_1", "Use this for comments etc");
-    sd.setBucketCols(new ArrayList<String>(2));
-    sd.getBucketCols().add("name");
-    sd.setSerdeInfo(new SerDeInfo());
-    sd.getSerdeInfo().setName(tbl.getTableName());
-    sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-    sd.getSerdeInfo().getParameters().put(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
-    sd.getSerdeInfo().setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
-    tbl.setPartitionKeys(new ArrayList<FieldSchema>());
-    
-    client.createTable(tbl);
-    
-    Table tbl2 = client.getTable(dbName, tblName);
-    assertNotNull(tbl2);
-    assertEquals(tbl2.getDbName(), dbName);
-    assertEquals(tbl2.getTableName(), tblName);
-    assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
-    assertEquals(tbl2.getSd().isCompressed(), false);
-    assertEquals(tbl2.getSd().getNumBuckets(), 1);
-    assertEquals(tbl2.getSd().getLocation(), tbl.getSd().getLocation());
-    assertNotNull(tbl2.getSd().getSerdeInfo());
-    sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-    sd.getSerdeInfo().getParameters().put(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
-    
-    tbl2.setTableName(tblName2);
-    tbl2.setParameters(new HashMap<String, String>());
-    tbl2.getParameters().put("EXTERNAL", "TRUE");
-    tbl2.getSd().setLocation(tbl.getSd().getLocation() +"-2");
-    
-    List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
-    assertNotNull(fieldSchemas);
-    assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
-    for (FieldSchema fs : tbl.getSd().getCols()) {
-      assertTrue(fieldSchemas.contains(fs));
-    }
-    
-    List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
-    assertNotNull(fieldSchemasFull);
-    assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size()+tbl.getPartitionKeys().size());
-    for (FieldSchema fs : tbl.getSd().getCols()) {
-      assertTrue(fieldSchemasFull.contains(fs));
-    }
-    for (FieldSchema fs : tbl.getPartitionKeys()) {
-      assertTrue(fieldSchemasFull.contains(fs));
-    }
-    
-    client.createTable(tbl2);
-  
-    Table tbl3 = client.getTable(dbName, tblName2);
-    assertNotNull(tbl3);
-    assertEquals(tbl3.getDbName(), dbName);
-    assertEquals(tbl3.getTableName(), tblName2);
-    assertEquals(tbl3.getSd().getCols().size(), typ1.getFields().size());
-    assertEquals(tbl3.getSd().isCompressed(), false);
-    assertEquals(tbl3.getSd().getNumBuckets(), 1);
-    assertEquals(tbl3.getSd().getLocation(), tbl2.getSd().getLocation());
-    assertEquals(tbl3.getParameters(), tbl2.getParameters());
-    
-    fieldSchemas = client.getFields(dbName, tblName2);
-    assertNotNull(fieldSchemas);
-    assertEquals(fieldSchemas.size(), tbl2.getSd().getCols().size());
-    for (FieldSchema fs : tbl2.getSd().getCols()) {
-      assertTrue(fieldSchemas.contains(fs));
-    }
-    
-    fieldSchemasFull = client.getSchema(dbName, tblName2);
-    assertNotNull(fieldSchemasFull);
-    assertEquals(fieldSchemasFull.size(), tbl2.getSd().getCols().size()+tbl2.getPartitionKeys().size());
-    for (FieldSchema fs : tbl2.getSd().getCols()) {
-      assertTrue(fieldSchemasFull.contains(fs));
-    }
-    for (FieldSchema fs : tbl2.getPartitionKeys()) {
-      assertTrue(fieldSchemasFull.contains(fs));
-    }
-    
-  
-    assertEquals("Use this for comments etc", tbl2.getSd().getParameters().get("test_param_1"));
-    assertEquals("name", tbl2.getSd().getBucketCols().get(0));
-    assertTrue("Partition key list is not empty",  (tbl2.getPartitionKeys() == null) || (tbl2.getPartitionKeys().size() == 0));
-    
-    FileSystem fs = FileSystem.get(hiveConf);
-    client.dropTable(dbName, tblName);
-    assertFalse(fs.exists(new Path(tbl.getSd().getLocation())));
-    
-    client.dropTable(dbName, tblName2);
-    assertTrue(fs.exists(new Path(tbl2.getSd().getLocation())));
-  
-    ret = client.dropType(typeName);
-    assertTrue("Unable to drop type " + typeName, ret);
-    ret = client.dropDatabase(dbName);
-    assertTrue("Unable to drop databse " + dbName, ret);
+      String dbName = "simpdb";
+      String tblName = "simptbl";
+      String tblName2 = "simptbl2";
+      String typeName = "Person";
+
+      client.dropTable(dbName, tblName);
+      client.dropDatabase(dbName);
+      boolean ret = client.createDatabase(dbName, "strange_loc");
+      assertTrue("Unable to create the databse " + dbName, ret);
+
+      client.dropType(typeName);
+      Type typ1 = new Type();
+      typ1.setName(typeName);
+      typ1.setFields(new ArrayList<FieldSchema>(2));
+      typ1.getFields().add(
+          new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
+      typ1.getFields().add(
+          new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
+      ret = client.createType(typ1);
+      assertTrue("Unable to create type " + typeName, ret);
+
+      Table tbl = new Table();
+      tbl.setDbName(dbName);
+      tbl.setTableName(tblName);
+      StorageDescriptor sd = new StorageDescriptor();
+      tbl.setSd(sd);
+      sd.setCols(typ1.getFields());
+      sd.setCompressed(false);
+      sd.setNumBuckets(1);
+      sd.setParameters(new HashMap<String, String>());
+      sd.getParameters().put("test_param_1", "Use this for comments etc");
+      sd.setBucketCols(new ArrayList<String>(2));
+      sd.getBucketCols().add("name");
+      sd.setSerdeInfo(new SerDeInfo());
+      sd.getSerdeInfo().setName(tbl.getTableName());
+      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+      sd.getSerdeInfo().getParameters().put(
+          org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
+      sd.getSerdeInfo().setSerializationLib(
+          org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
+      tbl.setPartitionKeys(new ArrayList<FieldSchema>());
+
+      client.createTable(tbl);
+
+      Table tbl2 = client.getTable(dbName, tblName);
+      assertNotNull(tbl2);
+      assertEquals(tbl2.getDbName(), dbName);
+      assertEquals(tbl2.getTableName(), tblName);
+      assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
+      assertEquals(tbl2.getSd().isCompressed(), false);
+      assertEquals(tbl2.getSd().getNumBuckets(), 1);
+      assertEquals(tbl2.getSd().getLocation(), tbl.getSd().getLocation());
+      assertNotNull(tbl2.getSd().getSerdeInfo());
+      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+      sd.getSerdeInfo().getParameters().put(
+          org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
+
+      tbl2.setTableName(tblName2);
+      tbl2.setParameters(new HashMap<String, String>());
+      tbl2.getParameters().put("EXTERNAL", "TRUE");
+      tbl2.getSd().setLocation(tbl.getSd().getLocation() + "-2");
+
+      List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
+      assertNotNull(fieldSchemas);
+      assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
+      for (FieldSchema fs : tbl.getSd().getCols()) {
+        assertTrue(fieldSchemas.contains(fs));
+      }
+
+      List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
+      assertNotNull(fieldSchemasFull);
+      assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size()
+          + tbl.getPartitionKeys().size());
+      for (FieldSchema fs : tbl.getSd().getCols()) {
+        assertTrue(fieldSchemasFull.contains(fs));
+      }
+      for (FieldSchema fs : tbl.getPartitionKeys()) {
+        assertTrue(fieldSchemasFull.contains(fs));
+      }
+
+      client.createTable(tbl2);
+
+      Table tbl3 = client.getTable(dbName, tblName2);
+      assertNotNull(tbl3);
+      assertEquals(tbl3.getDbName(), dbName);
+      assertEquals(tbl3.getTableName(), tblName2);
+      assertEquals(tbl3.getSd().getCols().size(), typ1.getFields().size());
+      assertEquals(tbl3.getSd().isCompressed(), false);
+      assertEquals(tbl3.getSd().getNumBuckets(), 1);
+      assertEquals(tbl3.getSd().getLocation(), tbl2.getSd().getLocation());
+      assertEquals(tbl3.getParameters(), tbl2.getParameters());
+
+      fieldSchemas = client.getFields(dbName, tblName2);
+      assertNotNull(fieldSchemas);
+      assertEquals(fieldSchemas.size(), tbl2.getSd().getCols().size());
+      for (FieldSchema fs : tbl2.getSd().getCols()) {
+        assertTrue(fieldSchemas.contains(fs));
+      }
+
+      fieldSchemasFull = client.getSchema(dbName, tblName2);
+      assertNotNull(fieldSchemasFull);
+      assertEquals(fieldSchemasFull.size(), tbl2.getSd().getCols().size()
+          + tbl2.getPartitionKeys().size());
+      for (FieldSchema fs : tbl2.getSd().getCols()) {
+        assertTrue(fieldSchemasFull.contains(fs));
+      }
+      for (FieldSchema fs : tbl2.getPartitionKeys()) {
+        assertTrue(fieldSchemasFull.contains(fs));
+      }
+
+      assertEquals("Use this for comments etc", tbl2.getSd().getParameters()
+          .get("test_param_1"));
+      assertEquals("name", tbl2.getSd().getBucketCols().get(0));
+      assertTrue("Partition key list is not empty",
+          (tbl2.getPartitionKeys() == null)
+              || (tbl2.getPartitionKeys().size() == 0));
+
+      FileSystem fs = FileSystem.get(hiveConf);
+      client.dropTable(dbName, tblName);
+      assertFalse(fs.exists(new Path(tbl.getSd().getLocation())));
+
+      client.dropTable(dbName, tblName2);
+      assertTrue(fs.exists(new Path(tbl2.getSd().getLocation())));
+
+      ret = client.dropType(typeName);
+      assertTrue("Unable to drop type " + typeName, ret);
+      ret = client.dropDatabase(dbName);
+      assertTrue("Unable to drop databse " + dbName, ret);
     } catch (Exception e) {
       System.err.println(StringUtils.stringifyException(e));
       System.err.println("testSimpleTable() failed.");
@@ -530,15 +564,17 @@
       sd.setSerdeInfo(new SerDeInfo());
       sd.getSerdeInfo().setName(tbl.getTableName());
       sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-      sd.getSerdeInfo().getParameters().put(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
+      sd.getSerdeInfo().getParameters().put(
+          org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
       boolean failed = false;
       try {
         client.createTable(tbl);
       } catch (InvalidObjectException ex) {
         failed = true;
       }
-      if(!failed) {
-        assertTrue("Able to create table with invalid name: " + invTblName, false);
+      if (!failed) {
+        assertTrue("Able to create table with invalid name: " + invTblName,
+            false);
       }
       ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
       cols.add(new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
@@ -548,7 +584,7 @@
       tbl.setTableName(tblName);
       tbl.getSd().setCols(cols);
       client.createTable(tbl);
-      
+
       // now try to invalid alter table
       Table tbl2 = client.getTable(dbName, tblName);
       failed = false;
@@ -559,11 +595,12 @@
       } catch (InvalidOperationException ex) {
         failed = true;
       }
-      if(!failed) {
-        assertTrue("Able to rename table with invalid name: " + invTblName, false);
+      if (!failed) {
+        assertTrue("Able to rename table with invalid name: " + invTblName,
+            false);
       }
       // try a valid alter table
-      tbl2.setTableName(tblName+"_renamed");
+      tbl2.setTableName(tblName + "_renamed");
       tbl2.getSd().setCols(cols);
       tbl2.getSd().setNumBuckets(32);
       client.alter_table(dbName, tblName, tbl2);
@@ -572,37 +609,42 @@
           tbl2.getSd().getNumBuckets(), tbl3.getSd().getNumBuckets());
       // check that data has moved
       FileSystem fs = FileSystem.get(hiveConf);
-      assertFalse("old table location still exists", fs.exists(new Path(tbl.getSd().getLocation())));
-      assertTrue("data did not move to new location", fs.exists(new Path(tbl3.getSd().getLocation())));
-      assertEquals("alter table didn't move data correct location", tbl3.getSd().getLocation(),
-          tbl2.getSd().getLocation());
+      assertFalse("old table location still exists", fs.exists(new Path(tbl
+          .getSd().getLocation())));
+      assertTrue("data did not move to new location", fs.exists(new Path(tbl3
+          .getSd().getLocation())));
+      assertEquals("alter table didn't move data correct location", tbl3
+          .getSd().getLocation(), tbl2.getSd().getLocation());
     } catch (Exception e) {
       System.err.println(StringUtils.stringifyException(e));
       System.err.println("testSimpleTable() failed.");
       throw e;
     }
   }
+
   public void testComplexTable() throws Exception {
-  
+
     String dbName = "compdb";
     String tblName = "comptbl";
     String typeName = "Person";
-  
+
     try {
       client.dropTable(dbName, tblName);
       client.dropDatabase(dbName);
       boolean ret = client.createDatabase(dbName, "strange_loc");
       assertTrue("Unable to create the databse " + dbName, ret);
-  
+
       client.dropType(typeName);
       Type typ1 = new Type();
       typ1.setName(typeName);
       typ1.setFields(new ArrayList<FieldSchema>(2));
-      typ1.getFields().add(new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
-      typ1.getFields().add(new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
+      typ1.getFields().add(
+          new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
+      typ1.getFields().add(
+          new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
       ret = client.createType(typ1);
       assertTrue("Unable to create type " + typeName, ret);
-  
+
       Table tbl = new Table();
       tbl.setDbName(dbName);
       tbl.setTableName(tblName);
@@ -618,42 +660,52 @@
       sd.setSerdeInfo(new SerDeInfo());
       sd.getSerdeInfo().setName(tbl.getTableName());
       sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-      sd.getSerdeInfo().getParameters().put(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "9");
-      sd.getSerdeInfo().setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
-  
+      sd.getSerdeInfo().getParameters().put(
+          org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "9");
+      sd.getSerdeInfo().setSerializationLib(
+          org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
+
       tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
-      tbl.getPartitionKeys().add(new FieldSchema("ds", org.apache.hadoop.hive.serde.Constants.DATE_TYPE_NAME, ""));
-      tbl.getPartitionKeys().add(new FieldSchema("hr", org.apache.hadoop.hive.serde.Constants.INT_TYPE_NAME, ""));
-  
+      tbl.getPartitionKeys().add(
+          new FieldSchema("ds",
+              org.apache.hadoop.hive.serde.Constants.DATE_TYPE_NAME, ""));
+      tbl.getPartitionKeys().add(
+          new FieldSchema("hr",
+              org.apache.hadoop.hive.serde.Constants.INT_TYPE_NAME, ""));
+
       client.createTable(tbl);
-  
+
       Table tbl2 = client.getTable(dbName, tblName);
       assertEquals(tbl2.getDbName(), dbName);
       assertEquals(tbl2.getTableName(), tblName);
       assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
       assertFalse(tbl2.getSd().isCompressed());
       assertEquals(tbl2.getSd().getNumBuckets(), 1);
-  
-      assertEquals("Use this for comments etc", tbl2.getSd().getParameters().get("test_param_1"));
+
+      assertEquals("Use this for comments etc", tbl2.getSd().getParameters()
+          .get("test_param_1"));
       assertEquals("name", tbl2.getSd().getBucketCols().get(0));
-  
+
       assertNotNull(tbl2.getPartitionKeys());
       assertEquals(2, tbl2.getPartitionKeys().size());
-      assertEquals(Constants.DATE_TYPE_NAME, tbl2.getPartitionKeys().get(0).getType());
-      assertEquals(Constants.INT_TYPE_NAME, tbl2.getPartitionKeys().get(1).getType());
+      assertEquals(Constants.DATE_TYPE_NAME, tbl2.getPartitionKeys().get(0)
+          .getType());
+      assertEquals(Constants.INT_TYPE_NAME, tbl2.getPartitionKeys().get(1)
+          .getType());
       assertEquals("ds", tbl2.getPartitionKeys().get(0).getName());
       assertEquals("hr", tbl2.getPartitionKeys().get(1).getName());
-      
+
       List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
       assertNotNull(fieldSchemas);
       assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
       for (FieldSchema fs : tbl.getSd().getCols()) {
         assertTrue(fieldSchemas.contains(fs));
       }
-      
+
       List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
       assertNotNull(fieldSchemasFull);
-      assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size()+tbl.getPartitionKeys().size());
+      assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size()
+          + tbl.getPartitionKeys().size());
       for (FieldSchema fs : tbl.getSd().getCols()) {
         assertTrue(fieldSchemasFull.contains(fs));
       }
@@ -672,37 +724,37 @@
       assertTrue("Unable to create the databse " + dbName, ret);
     }
   }
-  
+
   public void testGetConfigValue() {
 
     String val = "value";
-    
+
     try {
       assertEquals(client.getConfigValue("hive.key1", val), "value1");
-      assertEquals(client.getConfigValue("hive.key2", val), 
-                                         "http://www.example.com");
+      assertEquals(client.getConfigValue("hive.key2", val),
+          "http://www.example.com");
       assertEquals(client.getConfigValue("hive.key3", val), "");
       assertEquals(client.getConfigValue("hive.key4", val), "0");
       assertEquals(client.getConfigValue("hive.key5", val), val);
       assertEquals(client.getConfigValue(null, val), val);
     } catch (TException e) {
       e.printStackTrace();
-      assert(false);
+      assert (false);
     } catch (ConfigValSecurityException e) {
       e.printStackTrace();
-      assert(false);
+      assert (false);
     }
-    
+
     boolean threwException = false;
     try {
       // Attempting to get the password should throw an exception
       client.getConfigValue("javax.jdo.option.ConnectionPassword", "password");
     } catch (TException e) {
       e.printStackTrace();
-      assert(false);
+      assert (false);
     } catch (ConfigValSecurityException e) {
       threwException = true;
     }
-    assert(threwException);
+    assert (threwException);
   }
 }
\ No newline at end of file

Modified: hadoop/hive/trunk/service/src/java/org/apache/hadoop/hive/service/HiveClient.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/service/src/java/org/apache/hadoop/hive/service/HiveClient.java?rev=901581&r1=901580&r2=901581&view=diff
==============================================================================
--- hadoop/hive/trunk/service/src/java/org/apache/hadoop/hive/service/HiveClient.java (original)
+++ hadoop/hive/trunk/service/src/java/org/apache/hadoop/hive/service/HiveClient.java Thu Jan 21 07:29:29 2010
@@ -18,31 +18,11 @@
 
 package org.apache.hadoop.hive.service;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.service.ThriftHive;
-import org.apache.hadoop.hive.service.ThriftHive.*;
-import org.apache.hadoop.hive.service.HiveServerException;
-import org.apache.hadoop.hive.serde2.Deserializer;
-import org.apache.hadoop.hive.serde2.SerDeException;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.util.StringUtils;
-
-import com.facebook.fb303.FacebookBase;
-import com.facebook.fb303.FacebookService;
-import com.facebook.fb303.fb_status;
-import org.apache.thrift.TException;
 import org.apache.thrift.protocol.TProtocol;
 
-import org.apache.hadoop.hive.metastore.api.*;
-
 /**
- * Thrift Hive Client
- * Just an empty class that can be used to run queries
- * on a stand alone hive server
+ * Thrift Hive Client Just an empty class that can be used to run queries on a
+ * stand alone hive server
  */
 public class HiveClient extends ThriftHive.Client implements HiveInterface {
   public HiveClient(TProtocol prot) {

Modified: hadoop/hive/trunk/service/src/java/org/apache/hadoop/hive/service/HiveInterface.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/service/src/java/org/apache/hadoop/hive/service/HiveInterface.java?rev=901581&r1=901580&r2=901581&view=diff
==============================================================================
--- hadoop/hive/trunk/service/src/java/org/apache/hadoop/hive/service/HiveInterface.java (original)
+++ hadoop/hive/trunk/service/src/java/org/apache/hadoop/hive/service/HiveInterface.java Thu Jan 21 07:29:29 2010
@@ -20,15 +20,17 @@
 
 /**
  * HiveInterface extends 2 interfaces, ThriftHive and ThriftHiveMetastore.
- *
- * ThriftHive.Iface is defined in: 
- *    service/src/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java
- * ThriftHiveMetastore.Iface is defined in: 
- *    metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
- *
+ * 
+ * ThriftHive.Iface is defined in:
+ * service/src/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java
+ * ThriftHiveMetastore.Iface is defined in:
+ * metastore/src/gen-javabean/org/apache
+ * /hadoop/hive/metastore/api/ThriftHiveMetastore.java
+ * 
  * These interfaces are generated by Thrift. The thrift files are in:
- *    ThriftHive: service/if/hive_service.thrift
- *    ThriftHiveMetastore: metastore/if/hive_metastore.thrift
+ * ThriftHive: service/if/hive_service.thrift ThriftHiveMetastore:
+ * metastore/if/hive_metastore.thrift
  */
-public interface HiveInterface extends ThriftHive.Iface, org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface {
+public interface HiveInterface extends ThriftHive.Iface,
+    org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface {
 }

Modified: hadoop/hive/trunk/service/src/java/org/apache/hadoop/hive/service/HiveServer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/service/src/java/org/apache/hadoop/hive/service/HiveServer.java?rev=901581&r1=901580&r2=901581&view=diff
==============================================================================
--- hadoop/hive/trunk/service/src/java/org/apache/hadoop/hive/service/HiveServer.java (original)
+++ hadoop/hive/trunk/service/src/java/org/apache/hadoop/hive/service/HiveServer.java Thu Jan 21 07:29:29 2010
@@ -25,10 +25,16 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.service.ThriftHive;
-import org.apache.hadoop.hive.service.HiveServerException;
+import org.apache.hadoop.hive.metastore.HiveMetaStore;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Schema;
+import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.plan.api.QueryPlan;
+import org.apache.hadoop.hive.ql.processors.CommandProcessor;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory;
 import org.apache.hadoop.hive.ql.session.SessionState;
-
+import org.apache.hadoop.mapred.ClusterStatus;
+import org.apache.hadoop.mapred.JobTracker;
 import org.apache.thrift.TException;
 import org.apache.thrift.TProcessor;
 import org.apache.thrift.TProcessorFactory;
@@ -40,16 +46,6 @@
 import org.apache.thrift.transport.TTransport;
 import org.apache.thrift.transport.TTransportFactory;
 
-import org.apache.hadoop.hive.ql.plan.api.Query;
-import org.apache.hadoop.hive.ql.plan.api.QueryPlan;
-import org.apache.hadoop.hive.ql.processors.CommandProcessor;
-import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.metastore.api.*;
-import org.apache.hadoop.hive.metastore.*;
-import org.apache.hadoop.mapred.ClusterStatus;
-import org.apache.hadoop.mapred.JobTracker;
-
 /**
  * Thrift Hive Server Implementation
  */
@@ -57,21 +53,16 @@
   private final static String VERSION = "0";
 
   /**
-   * Handler which implements the Hive Interface
-   * This class can be used in lieu of the HiveClient class
-   * to get an embedded server
+   * Handler which implements the Hive Interface This class can be used in lieu
+   * of the HiveClient class to get an embedded server
    */
-  public static class HiveServerHandler extends HiveMetaStore.HMSHandler implements HiveInterface {
+  public static class HiveServerHandler extends HiveMetaStore.HMSHandler
+      implements HiveInterface {
     /**
-     * Hive server uses org.apache.hadoop.hive.ql.Driver for run() and 
+     * Hive server uses org.apache.hadoop.hive.ql.Driver for run() and
      * getResults() methods.
      */
-    private Driver driver;
-
-    /**
-     * Stores state per connection
-     */
-    private SessionState session;
+    private final Driver driver;
 
     /**
      * Flag that indicates whether the last executed command was a Hive query
@@ -97,24 +88,25 @@
 
     /**
      * Executes a query.
-     *
-     * @param cmd HiveQL query to execute
+     * 
+     * @param cmd
+     *          HiveQL query to execute
      */
     public void execute(String cmd) throws HiveServerException, TException {
       HiveServerHandler.LOG.info("Running the query: " + cmd);
-      SessionState ss = SessionState.get();
+      SessionState.get();
 
       String cmd_trimmed = cmd.trim();
       String[] tokens = cmd_trimmed.split("\\s");
       String cmd_1 = cmd_trimmed.substring(tokens[0].length()).trim();
-      
+
       int ret = 0;
       String errorMessage = "";
       String SQLState = null;
 
       try {
         CommandProcessor proc = CommandProcessorFactory.get(tokens[0]);
-        if(proc != null) {
+        if (proc != null) {
           if (proc instanceof Driver) {
             isHiveQuery = true;
             Driver.DriverResponse response = driver.runCommand(cmd);
@@ -133,15 +125,16 @@
       }
 
       if (ret != 0) {
-        throw new HiveServerException("Query returned non-zero code: " + ret +
-                                      ", cause: " + errorMessage, ret, SQLState);
+        throw new HiveServerException("Query returned non-zero code: " + ret
+            + ", cause: " + errorMessage, ret, SQLState);
       }
     }
 
     /**
      * Return the status information about the Map-Reduce cluster
      */
-    public HiveClusterStatus getClusterStatus() throws HiveServerException, TException {
+    public HiveClusterStatus getClusterStatus() throws HiveServerException,
+        TException {
       HiveClusterStatus hcs;
       try {
         ClusterStatus cs = driver.getClusterStatus();
@@ -150,26 +143,21 @@
         // Convert the ClusterStatus to its Thrift equivalent: HiveClusterStatus
         int state;
         switch (jbs) {
-          case INITIALIZING:
-            state = JobTrackerState.INITIALIZING;
-            break;
-          case RUNNING:
-            state = JobTrackerState.RUNNING;
-            break;
-          default:
-            String errorMsg = "Unrecognized JobTracker state: " + jbs.toString();
-            throw new Exception(errorMsg);
+        case INITIALIZING:
+          state = JobTrackerState.INITIALIZING;
+          break;
+        case RUNNING:
+          state = JobTrackerState.RUNNING;
+          break;
+        default:
+          String errorMsg = "Unrecognized JobTracker state: " + jbs.toString();
+          throw new Exception(errorMsg);
         }
 
-        hcs = new HiveClusterStatus(
-            cs.getTaskTrackers(),
-            cs.getMapTasks(),
-            cs.getReduceTasks(),
-            cs.getMaxMapTasks(),
-            cs.getMaxReduceTasks(),
+        hcs = new HiveClusterStatus(cs.getTaskTrackers(), cs.getMapTasks(), cs
+            .getReduceTasks(), cs.getMaxMapTasks(), cs.getMaxReduceTasks(),
             state);
-      }
-      catch (Exception e) {
+      } catch (Exception e) {
         LOG.error(e.toString());
         e.printStackTrace();
         HiveServerException ex = new HiveServerException();
@@ -183,9 +171,10 @@
      * Return the Hive schema of the query result
      */
     public Schema getSchema() throws HiveServerException, TException {
-      if (!isHiveQuery)
+      if (!isHiveQuery) {
         // Return empty schema if the last command was not a Hive query
         return new Schema();
+      }
 
       try {
         Schema schema = driver.getSchema();
@@ -194,8 +183,7 @@
         }
         LOG.info("Returning schema: " + schema);
         return schema;
-      }
-      catch (Exception e) {
+      } catch (Exception e) {
         LOG.error(e.toString());
         e.printStackTrace();
         HiveServerException ex = new HiveServerException();
@@ -208,9 +196,10 @@
      * Return the Thrift schema of the query result
      */
     public Schema getThriftSchema() throws HiveServerException, TException {
-      if (!isHiveQuery)
+      if (!isHiveQuery) {
         // Return empty schema if the last command was not a Hive query
         return new Schema();
+      }
 
       try {
         Schema schema = driver.getThriftSchema();
@@ -219,8 +208,7 @@
         }
         LOG.info("Returning schema: " + schema);
         return schema;
-      }
-      catch (Exception e) {
+      } catch (Exception e) {
         LOG.error(e.toString());
         e.printStackTrace();
         HiveServerException ex = new HiveServerException();
@@ -229,16 +217,17 @@
       }
     }
 
-
     /**
      * Fetches the next row in a query result set.
-     *
-     * @return the next row in a query result set. null if there is no more row to fetch.
+     * 
+     * @return the next row in a query result set. null if there is no more row
+     *         to fetch.
      */
     public String fetchOne() throws HiveServerException, TException {
-      if (!isHiveQuery)
+      if (!isHiveQuery) {
         // Return no results if the last command was not a Hive query
         return "";
+      }
 
       Vector<String> result = new Vector<String>();
       driver.setMaxRows(1);
@@ -259,23 +248,27 @@
 
     /**
      * Fetches numRows rows.
-     *
-     * @param numRows Number of rows to fetch.
-     * @return A list of rows. The size of the list is numRows if there are at least
-     *         numRows rows available to return. The size is smaller than numRows if
-     *         there aren't enough rows. The list will be empty if there is no more
-     *         row to fetch or numRows == 0.
-     * @throws HiveServerException Invalid value for numRows (numRows < 0)
+     * 
+     * @param numRows
+     *          Number of rows to fetch.
+     * @return A list of rows. The size of the list is numRows if there are at
+     *         least numRows rows available to return. The size is smaller than
+     *         numRows if there aren't enough rows. The list will be empty if
+     *         there is no more row to fetch or numRows == 0.
+     * @throws HiveServerException
+     *           Invalid value for numRows (numRows < 0)
      */
-    public List<String> fetchN(int numRows) throws HiveServerException, TException {
+    public List<String> fetchN(int numRows) throws HiveServerException,
+        TException {
       if (numRows < 0) {
         HiveServerException ex = new HiveServerException();
         ex.setMessage("Invalid argument for number of rows: " + numRows);
         throw ex;
       }
-      if (!isHiveQuery)
+      if (!isHiveQuery) {
         // Return no results if the last command was not a Hive query
         return new Vector<String>();
+      }
 
       Vector<String> result = new Vector<String>();
       driver.setMaxRows(numRows);
@@ -291,16 +284,19 @@
 
     /**
      * Fetches all the rows in a result set.
-     *
-     * @return All the rows in a result set of a query executed using execute method.
-     *
-     * TODO: Currently the server buffers all the rows before returning them
-     * to the client. Decide whether the buffering should be done in the client.
+     * 
+     * @return All the rows in a result set of a query executed using execute
+     *         method.
+     * 
+     *         TODO: Currently the server buffers all the rows before returning
+     *         them to the client. Decide whether the buffering should be done
+     *         in the client.
      */
     public List<String> fetchAll() throws HiveServerException, TException {
-      if (!isHiveQuery)
+      if (!isHiveQuery) {
         // Return no results if the last command was not a Hive query
         return new Vector<String>();
+      }
 
       Vector<String> rows = new Vector<String>();
       Vector<String> result = new Vector<String>();
@@ -341,8 +337,7 @@
       // will be returned in a single QueryPlan
       try {
         qp.addToQueries(driver.getQueryPlan());
-      }
-      catch (Exception e) {
+      } catch (Exception e) {
         HiveServerException ex = new HiveServerException();
         ex.setMessage(e.toString());
         throw ex;
@@ -353,10 +348,11 @@
   }
 
   public static class ThriftHiveProcessorFactory extends TProcessorFactory {
-    public ThriftHiveProcessorFactory (TProcessor processor) {
+    public ThriftHiveProcessorFactory(TProcessor processor) {
       super(processor);
     }
 
+    @Override
     public TProcessor getProcessor(TTransport trans) {
       try {
         Iface handler = new HiveServerHandler();

Modified: hadoop/hive/trunk/service/src/test/org/apache/hadoop/hive/service/TestHiveServer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/service/src/test/org/apache/hadoop/hive/service/TestHiveServer.java?rev=901581&r1=901580&r2=901581&view=diff
==============================================================================
--- hadoop/hive/trunk/service/src/test/org/apache/hadoop/hive/service/TestHiveServer.java (original)
+++ hadoop/hive/trunk/service/src/test/org/apache/hadoop/hive/service/TestHiveServer.java Thu Jan 21 07:29:29 2010
@@ -1,48 +1,49 @@
 package org.apache.hadoop.hive.service;
 
-import java.util.*;
+import java.util.List;
+import java.util.Properties;
 
-import org.apache.hadoop.fs.Path;
 import junit.framework.TestCase;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Schema;
-import org.apache.hadoop.hive.service.HiveInterface;
-import org.apache.hadoop.hive.service.HiveClient;
-import org.apache.hadoop.hive.service.HiveServer;
-import org.apache.thrift.protocol.TProtocol;
+import org.apache.hadoop.hive.serde.Constants;
+import org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe;
+import org.apache.hadoop.io.BytesWritable;
 import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.thrift.protocol.TProtocol;
 import org.apache.thrift.transport.TSocket;
 import org.apache.thrift.transport.TTransport;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe;
-import org.apache.hadoop.hive.serde.Constants;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.BytesWritable;
 
 public class TestHiveServer extends TestCase {
 
   private HiveInterface client;
   private final static String host = "localhost";
   private final static int port = 10000;
-  private Path dataFilePath;
+  private final Path dataFilePath;
 
   private static String tableName = "testhivedrivertable";
-  private HiveConf conf;
+  private final HiveConf conf;
   private boolean standAloneServer = false;
   private TTransport transport;
 
   public TestHiveServer(String name) {
     super(name);
     conf = new HiveConf(TestHiveServer.class);
-    String dataFileDir = conf.get("test.data.files").replace('\\', '/').replace("c:", "");
+    String dataFileDir = conf.get("test.data.files").replace('\\', '/')
+        .replace("c:", "");
     dataFilePath = new Path(dataFileDir, "kv1.txt");
     // See data/conf/hive-site.xml
     String paramStr = System.getProperty("test.service.standalone.server");
-    if (paramStr != null && paramStr.equals("true"))
+    if (paramStr != null && paramStr.equals("true")) {
       standAloneServer = true;
+    }
   }
 
+  @Override
   protected void setUp() throws Exception {
     super.setUp();
     if (standAloneServer) {
@@ -51,16 +52,15 @@
         TProtocol protocol = new TBinaryProtocol(transport);
         client = new HiveClient(protocol);
         transport.open();
-      }
-      catch (Throwable e) {
+      } catch (Throwable e) {
         e.printStackTrace();
       }
-    }
-    else {
+    } else {
       client = new HiveServer.HiveServerHandler();
     }
   }
 
+  @Override
   protected void tearDown() throws Exception {
     super.tearDown();
     if (standAloneServer) {
@@ -76,26 +76,26 @@
 
     try {
       client.execute("create table " + tableName + " (num int)");
-      client.execute("load data local inpath '" + dataFilePath.toString() + "' into table " + tableName);
+      client.execute("load data local inpath '" + dataFilePath.toString()
+          + "' into table " + tableName);
       client.execute("select count(1) as cnt from " + tableName);
       String row = client.fetchOne();
       assertEquals(row, "500");
-      
+
       Schema hiveSchema = client.getSchema();
       List<FieldSchema> listFields = hiveSchema.getFieldSchemas();
       assertEquals(listFields.size(), 1);
       assertEquals(listFields.get(0).getName(), "cnt");
       assertEquals(listFields.get(0).getType(), "bigint");
-      
+
       Schema thriftSchema = client.getThriftSchema();
       List<FieldSchema> listThriftFields = thriftSchema.getFieldSchemas();
       assertEquals(listThriftFields.size(), 1);
       assertEquals(listThriftFields.get(0).getName(), "cnt");
       assertEquals(listThriftFields.get(0).getType(), "i64");
-      
+
       client.execute("drop table " + tableName);
-    }
-    catch (Throwable t) {
+    } catch (Throwable t) {
       t.printStackTrace();
     }
   }
@@ -107,7 +107,8 @@
     }
 
     client.execute("create table " + tableName + " (num int)");
-    client.execute("load data local inpath '" + dataFilePath.toString() + "' into table " + tableName);
+    client.execute("load data local inpath '" + dataFilePath.toString()
+        + "' into table " + tableName);
     client.execute("select count(1) from " + tableName);
     String row = client.fetchOne();
     assertEquals(row, "500");
@@ -122,38 +123,39 @@
     }
 
     client.execute("create table " + tableName + " (num int)");
-    client.execute("load data local inpath '" + dataFilePath.toString() + "' into table " + tableName);
-    
-    // Command not part of HiveQL -  verify no results
+    client.execute("load data local inpath '" + dataFilePath.toString()
+        + "' into table " + tableName);
+
+    // Command not part of HiveQL - verify no results
     client.execute("SET hive.mapred.mode = nonstrict");
-    
+
     Schema schema = client.getSchema();
     assertEquals(schema.getFieldSchemasSize(), 0);
     assertEquals(schema.getPropertiesSize(), 0);
-    
+
     Schema thriftschema = client.getThriftSchema();
     assertEquals(thriftschema.getFieldSchemasSize(), 0);
     assertEquals(thriftschema.getPropertiesSize(), 0);
-    
+
     assertEquals(client.fetchOne(), "");
     assertEquals(client.fetchN(10).size(), 0);
     assertEquals(client.fetchAll().size(), 0);
-    
+
     // Execute Hive query and fetch
     client.execute("select * from " + tableName + " limit 10");
-    String row = client.fetchOne();
-    
+    client.fetchOne();
+
     // Re-execute command not part of HiveQL - verify still no results
     client.execute("SET hive.mapred.mode = nonstrict");
-    
+
     schema = client.getSchema();
     assertEquals(schema.getFieldSchemasSize(), 0);
     assertEquals(schema.getPropertiesSize(), 0);
-    
+
     thriftschema = client.getThriftSchema();
     assertEquals(thriftschema.getFieldSchemasSize(), 0);
     assertEquals(thriftschema.getPropertiesSize(), 0);
-    
+
     assertEquals(client.fetchOne(), "");
     assertEquals(client.fetchN(10).size(), 0);
     assertEquals(client.fetchAll().size(), 0);
@@ -161,7 +163,7 @@
     // Cleanup
     client.execute("drop table " + tableName);
   }
-  
+
   /**
    * Test metastore call
    */
@@ -188,10 +190,10 @@
     assertTrue(clusterStatus.getReduceTasks() >= 0);
     assertTrue(clusterStatus.getMaxMapTasks() >= 0);
     assertTrue(clusterStatus.getMaxReduceTasks() >= 0);
-    assertTrue(clusterStatus.getState() == JobTrackerState.INITIALIZING ||
-               clusterStatus.getState() == JobTrackerState.RUNNING);
+    assertTrue(clusterStatus.getState() == JobTrackerState.INITIALIZING
+        || clusterStatus.getState() == JobTrackerState.RUNNING);
   }
-  
+
   /** 
    *
    */
@@ -202,31 +204,31 @@
     } catch (Exception ex) {
     }
     client.execute("create table " + tableName + " (key int, value string)");
-    client.execute("load data local inpath '" + dataFilePath.toString() + "' into table " + tableName);
+    client.execute("load data local inpath '" + dataFilePath.toString()
+        + "' into table " + tableName);
 
     try {
-    // fetchAll test
-    client.execute("select key, value from " + tableName);
-    assertEquals(client.fetchAll().size(), 500);
-    assertEquals(client.fetchAll().size(), 0);
-
-    // fetchOne test
-    client.execute("select key, value from " + tableName);
-    for (int i = 0; i < 500; i++) {
-      String str = client.fetchOne();
-      if (str.equals("")) {
-        assertTrue(false);
+      // fetchAll test
+      client.execute("select key, value from " + tableName);
+      assertEquals(client.fetchAll().size(), 500);
+      assertEquals(client.fetchAll().size(), 0);
+
+      // fetchOne test
+      client.execute("select key, value from " + tableName);
+      for (int i = 0; i < 500; i++) {
+        String str = client.fetchOne();
+        if (str.equals("")) {
+          assertTrue(false);
+        }
       }
-    }
-    assertEquals(client.fetchOne(), "");
+      assertEquals(client.fetchOne(), "");
 
-    // fetchN test
-    client.execute("select key, value from " + tableName);
-    assertEquals(client.fetchN(499).size(), 499);
-    assertEquals(client.fetchN(499).size(), 1);
-    assertEquals(client.fetchN(499).size(), 0);
-    }
-    catch (Throwable e) {
+      // fetchN test
+      client.execute("select key, value from " + tableName);
+      assertEquals(client.fetchN(499).size(), 499);
+      assertEquals(client.fetchN(499).size(), 1);
+      assertEquals(client.fetchN(499).size(), 0);
+    } catch (Throwable e) {
       e.printStackTrace();
     }
   }
@@ -238,21 +240,28 @@
     }
 
     client.execute("create table " + tableName + " (key int, value string)");
-    client.execute("load data local inpath '" + dataFilePath.toString() + "' into table " + tableName);
-    //client.execute("select key, count(1) from " + tableName + " where key > 10 group by key");
+    client.execute("load data local inpath '" + dataFilePath.toString()
+        + "' into table " + tableName);
+    // client.execute("select key, count(1) from " + tableName +
+    // " where key > 10 group by key");
     String sql = "select key, value from " + tableName + " where key > 10";
     client.execute(sql);
 
     // Instantiate DynamicSerDe
     DynamicSerDe ds = new DynamicSerDe();
     Properties dsp = new Properties();
-    dsp.setProperty(Constants.SERIALIZATION_FORMAT, org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol.class.getName());
-    dsp.setProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_NAME, "result");
+    dsp.setProperty(Constants.SERIALIZATION_FORMAT,
+        org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol.class
+            .getName());
+    dsp.setProperty(
+        org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_NAME,
+        "result");
     String serDDL = new String("struct result { ");
     List<FieldSchema> schema = client.getThriftSchema().getFieldSchemas();
     for (int pos = 0; pos < schema.size(); pos++) {
-      if (pos != 0) 
-          serDDL = serDDL.concat(",");
+      if (pos != 0) {
+        serDDL = serDDL.concat(",");
+      }
       serDDL = serDDL.concat(schema.get(pos).getType());
       serDDL = serDDL.concat(" ");
       serDDL = serDDL.concat(schema.get(pos).getName());
@@ -268,10 +277,10 @@
     Object o = ds.deserialize(new BytesWritable(row.getBytes()));
 
     assertEquals(o.getClass().toString(), "class java.util.ArrayList");
-    List<?> lst = (List<?>)o;
+    List<?> lst = (List<?>) o;
     assertEquals(lst.get(0), 238);
 
-    // TODO: serde doesn't like underscore  -- struct result { string _c0}
+    // TODO: serde doesn't like underscore -- struct result { string _c0}
     sql = "select count(1) as c from " + tableName;
     client.execute(sql);
     row = client.fetchOne();
@@ -279,8 +288,9 @@
     serDDL = new String("struct result { ");
     schema = client.getThriftSchema().getFieldSchemas();
     for (int pos = 0; pos < schema.size(); pos++) {
-      if (pos != 0) 
-          serDDL = serDDL.concat(",");
+      if (pos != 0) {
+        serDDL = serDDL.concat(",");
+      }
       serDDL = serDDL.concat(schema.get(pos).getType());
       serDDL = serDDL.concat(" ");
       serDDL = serDDL.concat(schema.get(pos).getName());



Mime
View raw message