hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hashut...@apache.org
Subject svn commit: r1309666 [1/2] - in /hive/trunk: common/src/java/org/apache/hadoop/hive/conf/ conf/ ql/src/java/org/apache/hadoop/hive/ql/exec/ ql/src/java/org/apache/hadoop/hive/ql/metadata/ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/ ql/sr...
Date Thu, 05 Apr 2012 06:12:16 GMT
Author: hashutosh
Date: Thu Apr  5 06:12:15 2012
New Revision: 1309666

URL: http://svn.apache.org/viewvc?rev=1309666&view=rev
Log:
HIVE-2822 [jira] Add JSON output to the hive ddl commands
(Chris Dean via Ashutosh Chauhan)

Summary:
JSON output for DDL commands

By setting the variable hive.format=json then ddl commands will
produce json output suitable for client side parsing.  For example, to
list all the databases one might get:

    {
      "databases": [
        "default"
      ]
    }

The goal is to have an option to produce JSON output of the DDL commands that is
easily machine parseable.

For example, "desc my_table" currently gives

    id    bigint
    user  string

and we want to allow a json output:

    {
      "columns": [
        {"name": "id", "type": "bigint"},
        {"name": "user", "type": "string"}
      ]
    }

Test Plan: Run the provided unit tests

Reviewers: JIRA, ashutoshc

Reviewed By: ashutoshc

Differential Revision: https://reviews.facebook.net/D2475

Added:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MapBuilder.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
      - copied, changed from r1309624, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/MetaDataFormatUtils.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
    hive/trunk/ql/src/test/queries/clientpositive/describe_database_json.q
    hive/trunk/ql/src/test/queries/clientpositive/describe_table_json.q
    hive/trunk/ql/src/test/queries/clientpositive/misc_json.q
    hive/trunk/ql/src/test/queries/clientpositive/partitions_json.q
    hive/trunk/ql/src/test/results/clientpositive/describe_database_json.q.out
    hive/trunk/ql/src/test/results/clientpositive/describe_table_json.q.out
    hive/trunk/ql/src/test/results/clientpositive/misc_json.q.out
    hive/trunk/ql/src/test/results/clientpositive/partitions_json.q.out
Removed:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/MetaDataFormatUtils.java
Modified:
    hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/trunk/conf/hive-default.xml.template
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    hive/trunk/ql/src/test/results/clientnegative/database_create_already_exists.q.out
    hive/trunk/ql/src/test/results/clientnegative/database_drop_does_not_exist.q.out

Modified: hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1309666&r1=1309665&r2=1309666&view=diff
==============================================================================
--- hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Thu Apr  5 06:12:15 2012
@@ -582,6 +582,7 @@ public class HiveConf extends Configurat
     // A comma separated list of hooks which implement HiveDriverRunHook and will be run at the
     // beginning and end of Driver.run, these will be run in the order specified
     HIVE_DRIVER_RUN_HOOKS("hive.exec.driver.run.hooks", ""),
+    HIVE_DDL_OUTPUT_FORMAT("hive.ddl.output.format", null),
     ;
 
     public final String varname;

Modified: hive/trunk/conf/hive-default.xml.template
URL: http://svn.apache.org/viewvc/hive/trunk/conf/hive-default.xml.template?rev=1309666&r1=1309665&r2=1309666&view=diff
==============================================================================
--- hive/trunk/conf/hive-default.xml.template (original)
+++ hive/trunk/conf/hive-default.xml.template Thu Apr  5 06:12:15 2012
@@ -1259,4 +1259,13 @@
   </description>
 </property>
 
+<property>
+  <name>hive.ddl.output.format</name>
+  <value>text</value>
+  <description>
+    The data format to use for DDL output.  One of "text" (for human
+    readable text) or "json" (for a json object).
+  </description>
+</property>
+
 </configuration>

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1309666&r1=1309665&r2=1309666&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Thu Apr  5 06:12:15 2012
@@ -23,6 +23,7 @@ import static org.apache.hadoop.util.Str
 
 import java.io.BufferedWriter;
 import java.io.DataOutput;
+import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
@@ -91,9 +92,12 @@ import org.apache.hadoop.hive.ql.metadat
 import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
 import org.apache.hadoop.hive.ql.metadata.HiveUtils;
 import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
-import org.apache.hadoop.hive.ql.metadata.MetaDataFormatUtils;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.metadata.formatting.JsonMetaDataFormatter;
+import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils;
+import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatter;
+import org.apache.hadoop.hive.ql.metadata.formatting.TextMetaDataFormatter;
 import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc;
 import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
 import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc;
@@ -167,6 +171,8 @@ public class DDLTask extends Task<DDLWor
   private static String INTERMEDIATE_ORIGINAL_DIR_SUFFIX;
   private static String INTERMEDIATE_EXTRACTED_DIR_SUFFIX;
 
+  private MetaDataFormatter formatter;
+
   @Override
   public boolean requireLock() {
     return this.work != null && this.work.getNeedLock();
@@ -181,6 +187,14 @@ public class DDLTask extends Task<DDLWor
     super.initialize(conf, queryPlan, ctx);
     this.conf = conf;
 
+    // Pick the formatter to use to display the results.  Either the
+    // normal human readable output or a json object.
+    if ("json".equals(conf.get(
+            HiveConf.ConfVars.HIVE_DDL_OUTPUT_FORMAT.varname, "text")))
+      formatter = new JsonMetaDataFormatter();
+    else
+      formatter = new TextMetaDataFormatter();
+
     INTERMEDIATE_ARCHIVED_DIR_SUFFIX =
       HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED);
     INTERMEDIATE_ORIGINAL_DIR_SUFFIX =
@@ -376,17 +390,29 @@ public class DDLTask extends Task<DDLWor
       }
 
     } catch (InvalidTableException e) {
-      console.printError("Table " + e.getTableName() + " does not exist");
+      formatter.consoleError(console, "Table " + e.getTableName() + " does not exist",
+                             formatter.MISSING);
       LOG.debug(stringifyException(e));
       return 1;
+    } catch (AlreadyExistsException e) {
+      formatter.consoleError(console, e.getMessage(), formatter.CONFLICT);
+      return 1;
+    } catch (NoSuchObjectException e) {
+      formatter.consoleError(console, e.getMessage(),
+                             "\n" + stringifyException(e),
+                             formatter.MISSING);
+      return 1;
     } catch (HiveException e) {
-      console.printError("FAILED: Error in metadata: " + e.getMessage(), "\n"
-          + stringifyException(e));
+      formatter.consoleError(console,
+                             "FAILED: Error in metadata: " + e.getMessage(),
+                             "\n" + stringifyException(e),
+                             formatter.ERROR);
       LOG.debug(stringifyException(e));
       return 1;
     } catch (Exception e) {
-      console.printError("Failed with exception " + e.getMessage(), "\n"
-          + stringifyException(e));
+      formatter.consoleError(console, "Failed with exception " + e.getMessage(),
+                             "\n" + stringifyException(e),
+                             formatter.ERROR);
       return (1);
     }
     assert false;
@@ -1802,7 +1828,9 @@ public class DDLTask extends Task<DDLWor
     tbl = db.getTable(tabName);
 
     if (!tbl.isPartitioned()) {
-      console.printError("Table " + tabName + " is not a partitioned table");
+      formatter.consoleError(console,
+                             "Table " + tabName + " is not a partitioned table",
+                             formatter.ERROR);
       return 1;
     }
     if (showParts.getPartSpec() != null) {
@@ -1813,26 +1841,24 @@ public class DDLTask extends Task<DDLWor
     }
 
     // write the results in the file
-    DataOutput outStream = null;
+    DataOutputStream outStream = null;
     try {
       Path resFile = new Path(showParts.getResFile());
       FileSystem fs = resFile.getFileSystem(conf);
       outStream = fs.create(resFile);
-      Iterator<String> iterParts = parts.iterator();
 
-      while (iterParts.hasNext()) {
-        // create a row per partition name
-        outStream.writeBytes(iterParts.next());
-        outStream.write(terminator);
-      }
+      formatter.showTablePartitons(outStream, parts);
+
       ((FSDataOutputStream) outStream).close();
       outStream = null;
     } catch (FileNotFoundException e) {
-      LOG.info("show partitions: " + stringifyException(e));
-      throw new HiveException(e);
-    } catch (IOException e) {
-      LOG.info("show partitions: " + stringifyException(e));
-      throw new HiveException(e);
+        formatter.logWarn(outStream, "show partitions: " + stringifyException(e),
+                          MetaDataFormatter.ERROR);
+        return 1;
+      } catch (IOException e) {
+        formatter.logWarn(outStream, "show partitions: " + stringifyException(e),
+                          MetaDataFormatter.ERROR);
+        return 1;
     } catch (Exception e) {
       throw new HiveException(e);
     } finally {
@@ -1921,24 +1947,22 @@ public class DDLTask extends Task<DDLWor
     LOG.info("results : " + databases.size());
 
     // write the results in the file
-    DataOutput outStream = null;
+    DataOutputStream outStream = null;
     try {
       Path resFile = new Path(showDatabasesDesc.getResFile());
       FileSystem fs = resFile.getFileSystem(conf);
       outStream = fs.create(resFile);
 
-      for (String database : databases) {
-        // create a row per database name
-        outStream.writeBytes(database);
-        outStream.write(terminator);
-      }
+      formatter.showDatabases(outStream, databases);
       ((FSDataOutputStream) outStream).close();
       outStream = null;
     } catch (FileNotFoundException e) {
-      LOG.warn("show databases: " + stringifyException(e));
+      formatter.logWarn(outStream, "show databases: " + stringifyException(e),
+                        formatter.ERROR);
       return 1;
     } catch (IOException e) {
-      LOG.warn("show databases: " + stringifyException(e));
+      formatter.logWarn(outStream, "show databases: " + stringifyException(e),
+                        formatter.ERROR);
       return 1;
     } catch (Exception e) {
       throw new HiveException(e.toString());
@@ -1977,26 +2001,23 @@ public class DDLTask extends Task<DDLWor
     }
 
     // write the results in the file
-    DataOutput outStream = null;
+    DataOutputStream outStream = null;
     try {
       Path resFile = new Path(showTbls.getResFile());
       FileSystem fs = resFile.getFileSystem(conf);
       outStream = fs.create(resFile);
-      SortedSet<String> sortedTbls = new TreeSet<String>(tbls);
-      Iterator<String> iterTbls = sortedTbls.iterator();
 
-      while (iterTbls.hasNext()) {
-        // create a row per table name
-        outStream.writeBytes(iterTbls.next());
-        outStream.write(terminator);
-      }
+      SortedSet<String> sortedTbls = new TreeSet<String>(tbls);
+      formatter.showTables(outStream, sortedTbls);
       ((FSDataOutputStream) outStream).close();
       outStream = null;
     } catch (FileNotFoundException e) {
-      LOG.warn("show table: " + stringifyException(e));
+      formatter.logWarn(outStream, "show table: " + stringifyException(e),
+                        formatter.ERROR);
       return 1;
     } catch (IOException e) {
-      LOG.warn("show table: " + stringifyException(e));
+      formatter.logWarn(outStream, "show table: " + stringifyException(e),
+                        formatter.ERROR);
       return 1;
     } catch (Exception e) {
       throw new HiveException(e.toString());
@@ -2320,7 +2341,7 @@ public class DDLTask extends Task<DDLWor
   }
 
   private int descDatabase(DescDatabaseDesc descDatabase) throws HiveException {
-    DataOutput outStream = null;
+    DataOutputStream outStream = null;
     try {
       Path resFile = new Path(descDatabase.getResFile());
       FileSystem fs = resFile.getFileSystem(conf);
@@ -2328,37 +2349,32 @@ public class DDLTask extends Task<DDLWor
 
       Database database = db.getDatabase(descDatabase.getDatabaseName());
 
-      if (database != null) {
-        outStream.writeBytes(database.getName());
-        outStream.write(separator);
-        if (database.getDescription() != null) {
-          outStream.writeBytes(database.getDescription());
-        }
-        outStream.write(separator);
-        if (database.getLocationUri() != null) {
-          outStream.writeBytes(database.getLocationUri());
-        }
-
-        outStream.write(separator);
-        if (descDatabase.isExt() && database.getParametersSize() > 0) {
-          Map<String, String> params = database.getParameters();
-          outStream.writeBytes(params.toString());
-        }
-
+      if (database == null) {
+          formatter.error(outStream,
+                          "No such database: " + descDatabase.getDatabaseName(),
+                          formatter.MISSING);
       } else {
-        outStream.writeBytes("No such database: " + descDatabase.getDatabaseName());
+          Map<String, String> params = null;
+          if(descDatabase.isExt())
+              params = database.getParameters();
+
+          formatter.showDatabaseDescription(outStream,
+                                            database.getName(),
+                                            database.getDescription(),
+                                            database.getLocationUri(),
+                                            params);
       }
-
-      outStream.write(terminator);
-
       ((FSDataOutputStream) outStream).close();
       outStream = null;
-
     } catch (FileNotFoundException e) {
-      LOG.warn("describe database: " + stringifyException(e));
+      formatter.logWarn(outStream,
+                        "describe database: " + stringifyException(e),
+                        formatter.ERROR);
       return 1;
     } catch (IOException e) {
-      LOG.warn("describe database: " + stringifyException(e));
+      formatter.logWarn(outStream,
+                        "describe database: " + stringifyException(e),
+                        formatter.ERROR);
       return 1;
     } catch (Exception e) {
       throw new HiveException(e.toString());
@@ -2406,95 +2422,23 @@ public class DDLTask extends Task<DDLWor
     }
 
     // write the results in the file
-    DataOutput outStream = null;
+    DataOutputStream outStream = null;
     try {
       Path resFile = new Path(showTblStatus.getResFile());
       FileSystem fs = resFile.getFileSystem(conf);
       outStream = fs.create(resFile);
 
-      Iterator<Table> iterTables = tbls.iterator();
-      while (iterTables.hasNext()) {
-        // create a row per table name
-        Table tbl = iterTables.next();
-        String tableName = tbl.getTableName();
-        String tblLoc = null;
-        String inputFormattCls = null;
-        String outputFormattCls = null;
-        if (part != null) {
-          if (par != null) {
-            if (par.getLocation() != null) {
-              tblLoc = par.getDataLocation().toString();
-            }
-            inputFormattCls = par.getInputFormatClass().getName();
-            outputFormattCls = par.getOutputFormatClass().getName();
-          }
-        } else {
-          if (tbl.getPath() != null) {
-            tblLoc = tbl.getDataLocation().toString();
-          }
-          inputFormattCls = tbl.getInputFormatClass().getName();
-          outputFormattCls = tbl.getOutputFormatClass().getName();
-        }
-
-        String owner = tbl.getOwner();
-        List<FieldSchema> cols = tbl.getCols();
-        String ddlCols = MetaStoreUtils.getDDLFromFieldSchema("columns", cols);
-        boolean isPartitioned = tbl.isPartitioned();
-        String partitionCols = "";
-        if (isPartitioned) {
-          partitionCols = MetaStoreUtils.getDDLFromFieldSchema(
-              "partition_columns", tbl.getPartCols());
-        }
-
-        outStream.writeBytes("tableName:" + tableName);
-        outStream.write(terminator);
-        outStream.writeBytes("owner:" + owner);
-        outStream.write(terminator);
-        outStream.writeBytes("location:" + tblLoc);
-        outStream.write(terminator);
-        outStream.writeBytes("inputformat:" + inputFormattCls);
-        outStream.write(terminator);
-        outStream.writeBytes("outputformat:" + outputFormattCls);
-        outStream.write(terminator);
-        outStream.writeBytes("columns:" + ddlCols);
-        outStream.write(terminator);
-        outStream.writeBytes("partitioned:" + isPartitioned);
-        outStream.write(terminator);
-        outStream.writeBytes("partitionColumns:" + partitionCols);
-        outStream.write(terminator);
-        // output file system information
-        Path tablLoc = tbl.getPath();
-        List<Path> locations = new ArrayList<Path>();
-        if (isPartitioned) {
-          if (par == null) {
-            for (Partition curPart : db.getPartitions(tbl)) {
-              if (curPart.getLocation() != null) {
-                locations.add(new Path(curPart.getLocation()));
-              }
-            }
-          } else {
-            if (par.getLocation() != null) {
-              locations.add(new Path(par.getLocation()));
-            }
-          }
-        } else {
-          if (tablLoc != null) {
-            locations.add(tablLoc);
-          }
-        }
-        if (!locations.isEmpty()) {
-          writeFileSystemStats(outStream, locations, tablLoc, false, 0);
-        }
+      formatter.showTableStatus(outStream, db, conf, tbls, part, par);
 
-        outStream.write(terminator);
-      }
       ((FSDataOutputStream) outStream).close();
       outStream = null;
     } catch (FileNotFoundException e) {
-      LOG.info("show table status: " + stringifyException(e));
+      formatter.logInfo(outStream, "show table status: " + stringifyException(e),
+                        formatter.ERROR);
       return 1;
     } catch (IOException e) {
-      LOG.info("show table status: " + stringifyException(e));
+      formatter.logInfo(outStream, "show table status: " + stringifyException(e),
+                        formatter.ERROR);
       return 1;
     } catch (Exception e) {
       throw new HiveException(e);
@@ -2523,14 +2467,14 @@ public class DDLTask extends Task<DDLWor
     // describe the table - populate the output stream
     Table tbl = db.getTable(tableName, false);
     Partition part = null;
-    DataOutput outStream = null;
+    DataOutputStream outStream = null;
     try {
       Path resFile = new Path(descTbl.getResFile());
       if (tbl == null) {
         FileSystem fs = resFile.getFileSystem(conf);
         outStream = fs.create(resFile);
         String errMsg = "Table " + tableName + " does not exist";
-        outStream.write(errMsg.getBytes("UTF-8"));
+        formatter.error(outStream, errMsg, formatter.MISSING);
         ((FSDataOutputStream) outStream).close();
         outStream = null;
         return 0;
@@ -2542,7 +2486,7 @@ public class DDLTask extends Task<DDLWor
           outStream = fs.create(resFile);
           String errMsg = "Partition " + descTbl.getPartSpec() + " for table "
               + tableName + " does not exist";
-          outStream.write(errMsg.getBytes("UTF-8"));
+          formatter.error(outStream, errMsg, formatter.MISSING);
           ((FSDataOutputStream) outStream).close();
           outStream = null;
           return 0;
@@ -2550,87 +2494,50 @@ public class DDLTask extends Task<DDLWor
         tbl = part.getTable();
       }
     } catch (FileNotFoundException e) {
-      LOG.info("describe table: " + stringifyException(e));
+      formatter.logInfo(outStream, "describe table: " + stringifyException(e),
+                        formatter.ERROR);
       return 1;
     } catch (IOException e) {
-      LOG.info("describe table: " + stringifyException(e));
+      formatter.logInfo(outStream, "describe table: " + stringifyException(e),
+                        formatter.ERROR);
       return 1;
     } finally {
       IOUtils.closeStream((FSDataOutputStream) outStream);
     }
 
     try {
-
       LOG.info("DDLTask: got data for " + tbl.getTableName());
-
       Path resFile = new Path(descTbl.getResFile());
       FileSystem fs = resFile.getFileSystem(conf);
       outStream = fs.create(resFile);
 
+      List<FieldSchema> cols = null;
       if (colPath.equals(tableName)) {
-        List<FieldSchema> cols = (part == null) ? tbl.getCols() : part.getCols();
+        cols = (part == null) ? tbl.getCols() : part.getCols();
 
         if (!descTbl.isFormatted()) {
           if (tableName.equals(colPath)) {
             cols.addAll(tbl.getPartCols());
           }
-          outStream.writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols));
-        } else {
-          outStream.writeBytes(
-            MetaDataFormatUtils.getAllColumnsInformation(cols,
-              tbl.isPartitioned() ? tbl.getPartCols() : null));
         }
       } else {
-        List<FieldSchema> cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer());
-        if (descTbl.isFormatted()) {
-          outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(cols));
-        } else {
-          outStream.writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols));
-        }
+        cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer());
       }
 
-      if (tableName.equals(colPath)) {
-
-        if (descTbl.isFormatted()) {
-          if (part != null) {
-            outStream.writeBytes(MetaDataFormatUtils.getPartitionInformation(part));
-          } else {
-            outStream.writeBytes(MetaDataFormatUtils.getTableInformation(tbl));
-          }
-        }
-
-        // if extended desc table then show the complete details of the table
-        if (descTbl.isExt()) {
-          // add empty line
-          outStream.write(terminator);
-          if (part != null) {
-            // show partition information
-            outStream.writeBytes("Detailed Partition Information");
-            outStream.write(separator);
-            outStream.writeBytes(part.getTPartition().toString());
-            outStream.write(separator);
-            // comment column is empty
-            outStream.write(terminator);
-          } else {
-            // show table information
-            outStream.writeBytes("Detailed Table Information");
-            outStream.write(separator);
-            outStream.writeBytes(tbl.getTTable().toString());
-            outStream.write(separator);
-            outStream.write(terminator);
-          }
-        }
-      }
+      formatter.describeTable(outStream, colPath, tableName, tbl, part, cols,
+                              descTbl.isFormatted(), descTbl.isExt());
 
       LOG.info("DDLTask: written data for " + tbl.getTableName());
       ((FSDataOutputStream) outStream).close();
       outStream = null;
 
     } catch (FileNotFoundException e) {
-      LOG.info("describe table: " + stringifyException(e));
+      formatter.logInfo(outStream, "describe table: " + stringifyException(e),
+                        formatter.ERROR);
       return 1;
     } catch (IOException e) {
-      LOG.info("describe table: " + stringifyException(e));
+      formatter.logInfo(outStream, "describe table: " + stringifyException(e),
+                        formatter.ERROR);
       return 1;
     } catch (Exception e) {
       throw new HiveException(e);
@@ -2682,128 +2589,6 @@ public class DDLTask extends Task<DDLWor
     outStream.write(separator);
   }
 
-  private void writeFileSystemStats(DataOutput outStream, List<Path> locations,
-      Path tabLoc, boolean partSpecified, int indent) throws IOException {
-    long totalFileSize = 0;
-    long maxFileSize = 0;
-    long minFileSize = Long.MAX_VALUE;
-    long lastAccessTime = 0;
-    long lastUpdateTime = 0;
-    int numOfFiles = 0;
-
-    boolean unknown = false;
-    FileSystem fs = tabLoc.getFileSystem(conf);
-    // in case all files in locations do not exist
-    try {
-      FileStatus tmpStatus = fs.getFileStatus(tabLoc);
-      lastAccessTime = ShimLoader.getHadoopShims().getAccessTime(tmpStatus);
-      lastUpdateTime = tmpStatus.getModificationTime();
-      if (partSpecified) {
-        // check whether the part exists or not in fs
-        tmpStatus = fs.getFileStatus(locations.get(0));
-      }
-    } catch (IOException e) {
-      LOG.warn(
-          "Cannot access File System. File System status will be unknown: ", e);
-      unknown = true;
-    }
-
-    if (!unknown) {
-      for (Path loc : locations) {
-        try {
-          FileStatus status = fs.getFileStatus(tabLoc);
-          FileStatus[] files = fs.listStatus(loc);
-          long accessTime = ShimLoader.getHadoopShims().getAccessTime(status);
-          long updateTime = status.getModificationTime();
-          // no matter loc is the table location or part location, it must be a
-          // directory.
-          if (!status.isDir()) {
-            continue;
-          }
-          if (accessTime > lastAccessTime) {
-            lastAccessTime = accessTime;
-          }
-          if (updateTime > lastUpdateTime) {
-            lastUpdateTime = updateTime;
-          }
-          for (FileStatus currentStatus : files) {
-            if (currentStatus.isDir()) {
-              continue;
-            }
-            numOfFiles++;
-            long fileLen = currentStatus.getLen();
-            totalFileSize += fileLen;
-            if (fileLen > maxFileSize) {
-              maxFileSize = fileLen;
-            }
-            if (fileLen < minFileSize) {
-              minFileSize = fileLen;
-            }
-            accessTime = ShimLoader.getHadoopShims().getAccessTime(
-                currentStatus);
-            updateTime = currentStatus.getModificationTime();
-            if (accessTime > lastAccessTime) {
-              lastAccessTime = accessTime;
-            }
-            if (updateTime > lastUpdateTime) {
-              lastUpdateTime = updateTime;
-            }
-          }
-        } catch (IOException e) {
-          // ignore
-        }
-      }
-    }
-    String unknownString = "unknown";
-
-    for (int k = 0; k < indent; k++) {
-      outStream.writeBytes(Utilities.INDENT);
-    }
-    outStream.writeBytes("totalNumberFiles:");
-    outStream.writeBytes(unknown ? unknownString : "" + numOfFiles);
-    outStream.write(terminator);
-
-    for (int k = 0; k < indent; k++) {
-      outStream.writeBytes(Utilities.INDENT);
-    }
-    outStream.writeBytes("totalFileSize:");
-    outStream.writeBytes(unknown ? unknownString : "" + totalFileSize);
-    outStream.write(terminator);
-
-    for (int k = 0; k < indent; k++) {
-      outStream.writeBytes(Utilities.INDENT);
-    }
-    outStream.writeBytes("maxFileSize:");
-    outStream.writeBytes(unknown ? unknownString : "" + maxFileSize);
-    outStream.write(terminator);
-
-    for (int k = 0; k < indent; k++) {
-      outStream.writeBytes(Utilities.INDENT);
-    }
-    outStream.writeBytes("minFileSize:");
-    if (numOfFiles > 0) {
-      outStream.writeBytes(unknown ? unknownString : "" + minFileSize);
-    } else {
-      outStream.writeBytes(unknown ? unknownString : "" + 0);
-    }
-    outStream.write(terminator);
-
-    for (int k = 0; k < indent; k++) {
-      outStream.writeBytes(Utilities.INDENT);
-    }
-    outStream.writeBytes("lastAccessTime:");
-    outStream.writeBytes((unknown || lastAccessTime < 0) ? unknownString : ""
-        + lastAccessTime);
-    outStream.write(terminator);
-
-    for (int k = 0; k < indent; k++) {
-      outStream.writeBytes(Utilities.INDENT);
-    }
-    outStream.writeBytes("lastUpdateTime:");
-    outStream.writeBytes(unknown ? unknownString : "" + lastUpdateTime);
-    outStream.write(terminator);
-  }
-
   /**
    * Alter a given table.
    *
@@ -2823,8 +2608,10 @@ public class DDLTask extends Task<DDLWor
     if(alterTbl.getPartSpec() != null) {
       part = db.getPartition(tbl, alterTbl.getPartSpec(), false);
       if(part == null) {
-        console.printError("Partition : " + alterTbl.getPartSpec().toString()
-            + " does not exist.");
+        formatter.consoleError(console,
+                               "Partition : " + alterTbl.getPartSpec().toString()
+                               + " does not exist.",
+                               formatter.MISSING);
         return 1;
       }
     }
@@ -2854,7 +2641,9 @@ public class DDLTask extends Task<DDLWor
           while (iterOldCols.hasNext()) {
             String oldColName = iterOldCols.next().getName();
             if (oldColName.equalsIgnoreCase(newColName)) {
-              console.printError("Column '" + newColName + "' exists");
+              formatter.consoleError(console,
+                                     "Column '" + newColName + "' exists",
+                                     formatter.CONFLICT);
               return 1;
             }
           }
@@ -2886,7 +2675,9 @@ public class DDLTask extends Task<DDLWor
         String oldColName = col.getName();
         if (oldColName.equalsIgnoreCase(newName)
             && !oldColName.equalsIgnoreCase(oldName)) {
-          console.printError("Column '" + newName + "' exists");
+          formatter.consoleError(console,
+                                 "Column '" + newName + "' exists",
+                                 formatter.CONFLICT);
           return 1;
         } else if (oldColName.equalsIgnoreCase(oldName)) {
           col.setName(newName);
@@ -2914,12 +2705,16 @@ public class DDLTask extends Task<DDLWor
 
       // did not find the column
       if (!found) {
-        console.printError("Column '" + oldName + "' does not exist");
+        formatter.consoleError(console,
+                               "Column '" + oldName + "' does not exists",
+                               formatter.MISSING);
         return 1;
       }
       // after column is not null, but we did not find it.
       if ((afterCol != null && !afterCol.trim().equals("")) && position < 0) {
-        console.printError("Column '" + afterCol + "' does not exist");
+        formatter.consoleError(console,
+                               "Column '" + afterCol + "' does not exists",
+                               formatter.MISSING);
         return 1;
       }
 
@@ -2940,8 +2735,10 @@ public class DDLTask extends Task<DDLWor
           && !tbl.getSerializationLib().equals(LazySimpleSerDe.class.getName())
           && !tbl.getSerializationLib().equals(ColumnarSerDe.class.getName())
           && !tbl.getSerializationLib().equals(DynamicSerDe.class.getName())) {
-        console.printError("Replace columns is not supported for this table. "
-            + "SerDe may be incompatible.");
+        formatter.consoleError(console,
+                               "Replace columns is not supported for this table. "
+                               + "SerDe may be incompatible.",
+                               formatter.ERROR);
         return 1;
       }
       tbl.getTTable().getSd().setCols(alterTbl.getNewCols());
@@ -3072,7 +2869,9 @@ public class DDLTask extends Task<DDLWor
         throw new HiveException(e);
       }
     } else {
-      console.printError("Unsupported Alter commnad");
+      formatter.consoleError(console,
+                             "Unsupported Alter commnad",
+                             formatter.ERROR);
       return 1;
     }
 
@@ -3083,8 +2882,9 @@ public class DDLTask extends Task<DDLWor
       try {
         tbl.checkValidity();
       } catch (HiveException e) {
-        console.printError("Invalid table columns : " + e.getMessage(),
-            stringifyException(e));
+        formatter.consoleError(console,
+                               "Invalid table columns : " + e.getMessage(),
+                               formatter.ERROR);
         return 1;
       }
     } else {
@@ -3256,8 +3056,10 @@ public class DDLTask extends Task<DDLWor
     try {
       user = conf.getUser();
     } catch (IOException e) {
-      console.printError("Unable to get current user: " + e.getMessage(),
-          stringifyException(e));
+      formatter.consoleError(console,
+                             "Unable to get current user: " + e.getMessage(),
+                             stringifyException(e),
+                             formatter.ERROR);
       return false;
     }
 
@@ -3681,8 +3483,10 @@ public class DDLTask extends Task<DDLWor
     try {
       tbl.setOwner(conf.getUser());
     } catch (IOException e) {
-      console.printError("Unable to get current user: " + e.getMessage(),
-          stringifyException(e));
+      formatter.consoleError(console,
+                             "Unable to get current user: " + e.getMessage(),
+                             stringifyException(e),
+                             formatter.ERROR);
       return 1;
     }
     // set create time

Added: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java?rev=1309666&view=auto
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java (added)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java Thu Apr  5 06:12:15 2012
@@ -0,0 +1,462 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.metadata.formatting;
+
+import java.io.DataOutputStream;
+import java.io.OutputStream;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.codehaus.jackson.map.ObjectMapper;
+
+/**
+ * Format table and index information for machine readability using
+ * json.
+ */
+public class JsonMetaDataFormatter implements MetaDataFormatter {
+    private static final Log LOG = LogFactory.getLog("hive.ql.exec.DDLTask");
+
+    /**
+     * Convert the map to a JSON string.
+     */
+    public void asJson(OutputStream out, Map<String, Object> data)
+        throws HiveException
+    {
+        try {
+            new ObjectMapper().writeValue(out, data);
+        } catch (IOException e) {
+            throw new HiveException("Unable to convert to json", e);
+        }
+    }
+
+    /**
+     * Write an error message.
+     */
+    public void error(OutputStream out, String msg, int errorCode)
+        throws HiveException
+    {
+        asJson(out,
+               MapBuilder.create()
+               .put("error", msg)
+               .put("errorCode", errorCode)
+               .build());
+    }
+
+    /**
+     * Write a log warn message.
+     */
+    public void logWarn(OutputStream out, String msg, int errorCode)
+        throws HiveException
+    {
+        LOG.warn(msg);
+        error(out, msg, errorCode);
+    }
+
+    /**
+     * Write a log info message.
+     */
+    public void logInfo(OutputStream out, String msg, int errorCode)
+        throws HiveException
+    {
+        LOG.info(msg);
+        error(out, msg, errorCode);
+    }
+
+    /**
+     * Write a console error message.
+     */
+    public void consoleError(LogHelper console, String msg, int errorCode) {
+        try {
+            console.printError(msg);
+            error(console.getOutStream(), msg, errorCode);
+        } catch (HiveException e) {
+            console.printError("unable to create json: " + e);
+        }
+    }
+
+    /**
+     * Write a console error message.
+     */
+    public void consoleError(LogHelper console, String msg, String detail,
+                             int errorCode)
+    {
+        try {
+            console.printError(msg, detail);
+            asJson(console.getOutStream(),
+                   MapBuilder.create()
+                   .put("error", msg)
+                   .put("errorDetail", detail)
+                   .put("errorCode", errorCode)
+                   .build());
+        } catch (HiveException e) {
+            console.printError("unable to create json: " + e);
+        }
+    }
+
+    /**
+     * Show a list of tables.
+     */
+    public void showTables(DataOutputStream out, Set<String> tables)
+        throws HiveException
+    {
+        asJson(out,
+               MapBuilder.create()
+               .put("tables", tables)
+               .build());
+    }
+
+    /**
+     * Describe table.
+     */
+    public void describeTable(DataOutputStream out,
+                              String colPath, String tableName,
+                              Table tbl, Partition part, List<FieldSchema> cols,
+                              boolean isFormatted, boolean isExt)
+        throws HiveException
+    {
+        MapBuilder builder = MapBuilder.create();
+
+        builder.put("columns", makeColsUnformatted(cols));
+
+        if (isExt) {
+            if (part != null)
+                builder.put("partitionInfo", part.getTPartition());
+            else
+                builder.put("tableInfo", tbl.getTTable());
+        }
+
+        asJson(out, builder.build());
+    }
+
+    private List<Map<String, Object>> makeColsUnformatted(List<FieldSchema> cols) {
+        ArrayList<Map<String, Object>> res = new ArrayList<Map<String, Object>>();
+        for (FieldSchema col : cols)
+            res.add(makeOneColUnformatted(col));
+        return res;
+    }
+
+    private Map<String, Object> makeOneColUnformatted(FieldSchema col) {
+        return MapBuilder.create()
+            .put("name", col.getName())
+            .put("type", col.getType())
+            .put("comment", col.getComment())
+            .build();
+    }
+
+    public void showTableStatus(DataOutputStream out,
+                                Hive db,
+                                HiveConf conf,
+                                List<Table> tbls,
+                                Map<String, String> part,
+                                Partition par)
+        throws HiveException
+    {
+        asJson(out, MapBuilder
+               .create()
+               .put("tables", makeAllTableStatus(db, conf,
+                                                 tbls, part, par))
+               .build());
+    }
+
+    private List<Map<String, Object>> makeAllTableStatus(Hive db,
+                                    HiveConf conf,
+                                    List<Table> tbls,
+                                    Map<String, String> part,
+                                    Partition par)
+        throws HiveException
+    {
+        try {
+            ArrayList<Map<String, Object>> res = new ArrayList<Map<String, Object>>();
+            for (Table tbl : tbls)
+                res.add(makeOneTableStatus(tbl, db, conf, part, par));
+            return res;
+        } catch(IOException e) {
+            throw new HiveException(e);
+        }
+    }
+
+    private Map<String, Object> makeOneTableStatus(Table tbl,
+                                   Hive db,
+                                   HiveConf conf,
+                                   Map<String, String> part,
+                                   Partition par)
+        throws HiveException, IOException
+    {
+        String tblLoc = null;
+        String inputFormattCls = null;
+        String outputFormattCls = null;
+        if (part != null) {
+          if (par != null) {
+            if (par.getLocation() != null) {
+              tblLoc = par.getDataLocation().toString();
+            }
+            inputFormattCls = par.getInputFormatClass().getName();
+            outputFormattCls = par.getOutputFormatClass().getName();
+          }
+        } else {
+          if (tbl.getPath() != null) {
+            tblLoc = tbl.getDataLocation().toString();
+          }
+          inputFormattCls = tbl.getInputFormatClass().getName();
+          outputFormattCls = tbl.getOutputFormatClass().getName();
+        }
+
+        MapBuilder builder = MapBuilder.create();
+
+        builder.put("tableName", tbl.getTableName());
+        builder.put("owner", tbl.getOwner());
+        builder.put("location", tblLoc);
+        builder.put("inputFormat", inputFormattCls);
+        builder.put("outputFormat", outputFormattCls);
+        builder.put("columns", makeColsUnformatted(tbl.getCols()));
+
+        builder.put("partitioned", tbl.isPartitioned());
+        if (tbl.isPartitioned())
+            builder.put("partitionColumns", makeColsUnformatted(tbl.getPartCols()));
+
+        putFileSystemsStats(builder, makeTableStatusLocations(tbl, db, par),
+                            conf, tbl.getPath());
+
+        return builder.build();
+    }
+
+    private List<Path> makeTableStatusLocations(Table tbl, Hive db, Partition par)
+        throws HiveException
+    {
+        // output file system information
+        Path tblPath = tbl.getPath();
+        List<Path> locations = new ArrayList<Path>();
+        if (tbl.isPartitioned()) {
+          if (par == null) {
+            for (Partition curPart : db.getPartitions(tbl)) {
+              if (curPart.getLocation() != null) {
+                locations.add(new Path(curPart.getLocation()));
+              }
+            }
+          } else {
+            if (par.getLocation() != null) {
+              locations.add(new Path(par.getLocation()));
+            }
+          }
+        } else {
+          if (tblPath != null) {
+            locations.add(tblPath);
+          }
+        }
+
+        return locations;
+    }
+
+    // Duplicates logic in TextMetaDataFormatter
+    private void putFileSystemsStats(MapBuilder builder, List<Path> locations,
+                                     HiveConf conf, Path tblPath)
+        throws IOException
+    {
+      long totalFileSize = 0;
+      long maxFileSize = 0;
+      long minFileSize = Long.MAX_VALUE;
+      long lastAccessTime = 0;
+      long lastUpdateTime = 0;
+      int numOfFiles = 0;
+
+      boolean unknown = false;
+      FileSystem fs = tblPath.getFileSystem(conf);
+      // in case all files in locations do not exist
+      try {
+        FileStatus tmpStatus = fs.getFileStatus(tblPath);
+        lastAccessTime = ShimLoader.getHadoopShims().getAccessTime(tmpStatus);
+        lastUpdateTime = tmpStatus.getModificationTime();
+      } catch (IOException e) {
+        LOG.warn(
+            "Cannot access File System. File System status will be unknown: ", e);
+        unknown = true;
+      }
+
+      if (!unknown) {
+        for (Path loc : locations) {
+          try {
+            FileStatus status = fs.getFileStatus(tblPath);
+            FileStatus[] files = fs.listStatus(loc);
+            long accessTime = ShimLoader.getHadoopShims().getAccessTime(status);
+            long updateTime = status.getModificationTime();
+            // no matter loc is the table location or part location, it must be a
+            // directory.
+            if (!status.isDir()) {
+              continue;
+            }
+            if (accessTime > lastAccessTime) {
+              lastAccessTime = accessTime;
+            }
+            if (updateTime > lastUpdateTime) {
+              lastUpdateTime = updateTime;
+            }
+            for (FileStatus currentStatus : files) {
+              if (currentStatus.isDir()) {
+                continue;
+              }
+              numOfFiles++;
+              long fileLen = currentStatus.getLen();
+              totalFileSize += fileLen;
+              if (fileLen > maxFileSize) {
+                maxFileSize = fileLen;
+              }
+              if (fileLen < minFileSize) {
+                minFileSize = fileLen;
+              }
+              accessTime = ShimLoader.getHadoopShims().getAccessTime(
+                  currentStatus);
+              updateTime = currentStatus.getModificationTime();
+              if (accessTime > lastAccessTime) {
+                lastAccessTime = accessTime;
+              }
+              if (updateTime > lastUpdateTime) {
+                lastUpdateTime = updateTime;
+              }
+            }
+          } catch (IOException e) {
+            // ignore
+          }
+        }
+      }
+
+      builder
+          .put("totalNumberFiles", numOfFiles, ! unknown)
+          .put("totalFileSize",    totalFileSize, ! unknown)
+          .put("maxFileSize",      maxFileSize, ! unknown)
+          .put("minFileSize",      numOfFiles > 0 ? minFileSize : 0, ! unknown)
+          .put("lastAccessTime",   lastAccessTime, ! (unknown  || lastAccessTime < 0))
+          .put("lastUpdateTime",   lastUpdateTime, ! unknown);
+    }
+
+    /**
+     * Show the table partitions.
+     */
+    public void showTablePartitons(DataOutputStream out, List<String> parts)
+        throws HiveException
+    {
+        asJson(out,
+               MapBuilder.create()
+               .put("partitions", makeTablePartions(parts))
+               .build());
+    }
+
+    private List<Map<String, Object>> makeTablePartions(List<String> parts)
+        throws HiveException
+    {
+        try {
+            ArrayList<Map<String, Object>> res = new ArrayList<Map<String, Object>>();
+            for (String part : parts)
+                res.add(makeOneTablePartition(part));
+            return res;
+        } catch (UnsupportedEncodingException e) {
+            throw new HiveException(e);
+        }
+    }
+
+    // This seems like a very wrong implementation.
+    private Map<String, Object> makeOneTablePartition(String partIdent)
+        throws UnsupportedEncodingException
+    {
+        ArrayList<Map<String, Object>> res = new ArrayList<Map<String, Object>>();
+
+        ArrayList<String> names = new ArrayList<String>();
+        for (String part : StringUtils.split(partIdent, "/")) {
+            String name = part;
+            String val = null;
+            String[] kv = StringUtils.split(part, "=", 2);
+            if (kv != null) {
+                name = kv[0];
+                if (kv.length > 1)
+                    val = URLDecoder.decode(kv[1], "UTF-8");
+            }
+            if (val != null)
+                names.add(name + "='" + val + "'");
+            else
+                names.add(name);
+
+            res.add(MapBuilder.create()
+                    .put("columnName", name)
+                    .put("columnValue", val)
+                    .build());
+        }
+
+        return MapBuilder.create()
+            .put("name", StringUtils.join(names, ","))
+            .put("values", res)
+            .build();
+    }
+
+    /**
+     * Show a list of databases
+     */
+    public void showDatabases(DataOutputStream out, List<String> databases)
+        throws HiveException
+    {
+        asJson(out,
+               MapBuilder.create()
+               .put("databases", databases)
+               .build());
+    }
+
+    /**
+     * Show the description of a database
+     */
+    public void showDatabaseDescription(DataOutputStream out,
+                                        String database,
+                                        String comment,
+                                        String location,
+                                        Map<String, String> params)
+        throws HiveException
+    {
+        if (params == null || params.isEmpty()) {
+            asJson(out, MapBuilder
+               .create()
+               .put("database", database)
+               .put("comment", comment)
+               .put("location", location)
+               .build());
+        } else {
+            asJson(out, MapBuilder
+               .create()
+               .put("database", database)
+               .put("comment", comment)
+               .put("location", location)
+               .put("params", params)
+               .build());
+        }
+    }
+}

Added: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MapBuilder.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MapBuilder.java?rev=1309666&view=auto
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MapBuilder.java (added)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MapBuilder.java Thu Apr  5 06:12:15 2012
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.metadata.formatting;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Helper class to build Maps consumed by the JSON formatter.  Only
+ * add non-null entries to the Map.
+ */
+public class MapBuilder {
+    private Map<String, Object> map = new HashMap<String, Object>();
+
+    private MapBuilder() {}
+
+    public static MapBuilder create() {
+        return new MapBuilder();
+    }
+
+    public MapBuilder put(String name, Object val) {
+        if (val != null)
+            map.put(name, val);
+        return this;
+    }
+
+    public MapBuilder put(String name, boolean val) {
+        map.put(name, Boolean.valueOf(val));
+        return this;
+    }
+
+    public MapBuilder put(String name, int val) {
+        map.put(name, Integer.valueOf(val));
+        return this;
+    }
+
+    public MapBuilder put(String name, long val) {
+        map.put(name, Long.valueOf(val));
+        return this;
+    }
+
+    public <T> MapBuilder put(String name, T val, boolean use) {
+        if (use)
+            put(name, val);
+        return this;
+    }
+
+    public Map<String, Object> build() {
+        return map;
+    }
+}

Copied: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java (from r1309624, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/MetaDataFormatUtils.java)
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java?p2=hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java&p1=hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/MetaDataFormatUtils.java&r1=1309624&r2=1309666&rev=1309666&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/MetaDataFormatUtils.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java Thu Apr  5 06:12:15 2012
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.ql.metadata;
+package org.apache.hadoop.hive.ql.metadata.formatting;
 
 import java.util.ArrayList;
 import java.util.Collections;
@@ -30,6 +30,8 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.ql.index.HiveIndex;
 import org.apache.hadoop.hive.ql.index.HiveIndex.IndexType;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.plan.DescTableDesc;
 import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc;
 

Added: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java?rev=1309666&view=auto
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java (added)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java Thu Apr  5 06:12:15 2012
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.metadata.formatting;
+
+import java.io.DataOutputStream;
+import java.io.OutputStream;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
+
+/**
+ * Interface to format table and index information.  We can format it
+ * for human readability (lines of text) or for machine readability
+ * (json).
+ */
+public interface MetaDataFormatter {
+    /**
+     * Generic error code.  This and the other error codes are
+     * designed to match the HTTP status codes.
+     */
+    static final int ERROR = 500;
+
+    /**
+     * Missing error code.
+     */
+    static final int MISSING = 404;
+
+    /**
+     * Conflict error code.
+     */
+    static final int CONFLICT = 409;
+
+    /**
+     * Write an error message.
+     */
+    public void error(OutputStream out, String msg, int errorCode)
+        throws HiveException;
+
+    /**
+     * Write a log warn message.
+     */
+    public void logWarn(OutputStream out, String msg, int errorCode)
+        throws HiveException;
+
+    /**
+     * Write a log info message.
+     */
+    public void logInfo(OutputStream out, String msg, int errorCode)
+        throws HiveException;
+
+    /**
+     * Write a console error message.
+     */
+    public void consoleError(LogHelper console, String msg, int errorCode);
+
+    /**
+     * Write a console error message.
+     */
+    public void consoleError(LogHelper console, String msg, String detail,
+                             int errorCode);
+
+    /**
+     * Show a list of tables.
+     */
+    public void showTables(DataOutputStream out, Set<String> tables)
+        throws HiveException;
+
+    /**
+     * Describe table.
+     */
+    public void describeTable(DataOutputStream out,
+                              String colPath, String tableName,
+                              Table tbl, Partition part, List<FieldSchema> cols,
+                              boolean isFormatted, boolean isExt)
+        throws HiveException;
+
+   /**
+     * Show the table status.
+     */
+    public void showTableStatus(DataOutputStream out,
+                                Hive db,
+                                HiveConf conf,
+                                List<Table> tbls,
+                                Map<String, String> part,
+                                Partition par)
+        throws HiveException;
+
+    /**
+     * Show the table partitions.
+     */
+    public void showTablePartitons(DataOutputStream out,
+                                   List<String> parts)
+        throws HiveException;
+
+    /**
+     * Show the databases
+     */
+    public void showDatabases(DataOutputStream out, List<String> databases)
+        throws HiveException;
+
+    /**
+     * Describe a database.
+     */
+    public void showDatabaseDescription(DataOutputStream out,
+                                        String database,
+                                        String comment,
+                                        String location,
+                                        Map<String, String> params)
+        throws HiveException;
+}
+

Added: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java?rev=1309666&view=auto
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java (added)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java Thu Apr  5 06:12:15 2012
@@ -0,0 +1,458 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.metadata.formatting;
+
+import java.io.DataOutputStream;
+import java.io.OutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
+import org.apache.hadoop.hive.shims.ShimLoader;
+
+/**
+ * Format table and index information for human readability using
+ * simple lines of text.
+ */
+public class TextMetaDataFormatter implements MetaDataFormatter {
+    private static final Log LOG = LogFactory.getLog("hive.ql.exec.DDLTask");
+
+    private static final int separator = Utilities.tabCode;
+    private static final int terminator = Utilities.newLineCode;
+
+    /**
+     * Write an error message.
+     */
+    public void error(OutputStream out, String msg, int errorCode)
+        throws HiveException
+    {
+        try {
+            out.write(msg.getBytes("UTF-8"));
+            out.write(terminator);
+        } catch (Exception e) {
+            throw new HiveException(e);
+        }
+    }
+
+    /**
+     * Write a log warn message.
+     */
+    public void logWarn(OutputStream out, String msg, int errorCode)
+        throws HiveException
+    {
+        LOG.warn(msg);
+    }
+
+    /**
+     * Write a log info message.
+     */
+    public void logInfo(OutputStream out, String msg, int errorCode)
+        throws HiveException
+    {
+        LOG.info(msg);
+    }
+
+    /**
+     * Write a console error message.
+     */
+    public void consoleError(LogHelper console, String msg, int errorCode) {
+        console.printError(msg);
+    }
+
+    /**
+     * Write a console error message.
+     */
+    public void consoleError(LogHelper console, String msg, String detail,
+                             int errorCode)
+    {
+        console.printError(msg, detail);
+    }
+
+    /**
+     * Show a list of tables.
+     */
+    public void showTables(DataOutputStream out, Set<String> tables)
+        throws HiveException
+    {
+        Iterator<String> iterTbls = tables.iterator();
+
+        try {
+            while (iterTbls.hasNext()) {
+                // create a row per table name
+                out.writeBytes(iterTbls.next());
+                out.write(terminator);
+            }
+        } catch (IOException e) {
+           throw new HiveException(e);
+        }
+    }
+
+    public void describeTable(DataOutputStream outStream,
+                              String colPath, String tableName,
+                              Table tbl, Partition part, List<FieldSchema> cols,
+                              boolean isFormatted, boolean isExt)
+         throws HiveException
+   {
+       try {
+         if (colPath.equals(tableName)) {
+           if (!isFormatted) {
+             outStream.writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols));
+           } else {
+             outStream.writeBytes(
+               MetaDataFormatUtils.getAllColumnsInformation(cols,
+                 tbl.isPartitioned() ? tbl.getPartCols() : null));
+           }
+         } else {
+           if (isFormatted) {
+             outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(cols));
+           } else {
+             outStream.writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols));
+           }
+         }
+
+         if (tableName.equals(colPath)) {
+
+           if (isFormatted) {
+             if (part != null) {
+               outStream.writeBytes(MetaDataFormatUtils.getPartitionInformation(part));
+             } else {
+               outStream.writeBytes(MetaDataFormatUtils.getTableInformation(tbl));
+             }
+           }
+
+           // if extended desc table then show the complete details of the table
+           if (isExt) {
+             // add empty line
+             outStream.write(terminator);
+             if (part != null) {
+               // show partition information
+               outStream.writeBytes("Detailed Partition Information");
+               outStream.write(separator);
+               outStream.writeBytes(part.getTPartition().toString());
+               outStream.write(separator);
+               // comment column is empty
+               outStream.write(terminator);
+             } else {
+               // show table information
+               outStream.writeBytes("Detailed Table Information");
+               outStream.write(separator);
+               outStream.writeBytes(tbl.getTTable().toString());
+               outStream.write(separator);
+               outStream.write(terminator);
+             }
+           }
+         }
+       } catch (IOException e) {
+           throw new HiveException(e);
+       }
+    }
+
+    public void showTableStatus(DataOutputStream outStream,
+                                Hive db,
+                                HiveConf conf,
+                                List<Table> tbls,
+                                Map<String, String> part,
+                                Partition par)
+        throws HiveException
+    {
+        try {
+            Iterator<Table> iterTables = tbls.iterator();
+            while (iterTables.hasNext()) {
+              // create a row per table name
+              Table tbl = iterTables.next();
+              String tableName = tbl.getTableName();
+              String tblLoc = null;
+              String inputFormattCls = null;
+              String outputFormattCls = null;
+              if (part != null) {
+                if (par != null) {
+                  if (par.getLocation() != null) {
+                    tblLoc = par.getDataLocation().toString();
+                  }
+                  inputFormattCls = par.getInputFormatClass().getName();
+                  outputFormattCls = par.getOutputFormatClass().getName();
+                }
+              } else {
+                if (tbl.getPath() != null) {
+                  tblLoc = tbl.getDataLocation().toString();
+                }
+                inputFormattCls = tbl.getInputFormatClass().getName();
+                outputFormattCls = tbl.getOutputFormatClass().getName();
+              }
+
+              String owner = tbl.getOwner();
+              List<FieldSchema> cols = tbl.getCols();
+              String ddlCols = MetaStoreUtils.getDDLFromFieldSchema("columns", cols);
+              boolean isPartitioned = tbl.isPartitioned();
+              String partitionCols = "";
+              if (isPartitioned) {
+                partitionCols = MetaStoreUtils.getDDLFromFieldSchema(
+                    "partition_columns", tbl.getPartCols());
+              }
+
+              outStream.writeBytes("tableName:" + tableName);
+              outStream.write(terminator);
+              outStream.writeBytes("owner:" + owner);
+              outStream.write(terminator);
+              outStream.writeBytes("location:" + tblLoc);
+              outStream.write(terminator);
+              outStream.writeBytes("inputformat:" + inputFormattCls);
+              outStream.write(terminator);
+              outStream.writeBytes("outputformat:" + outputFormattCls);
+              outStream.write(terminator);
+              outStream.writeBytes("columns:" + ddlCols);
+              outStream.write(terminator);
+              outStream.writeBytes("partitioned:" + isPartitioned);
+              outStream.write(terminator);
+              outStream.writeBytes("partitionColumns:" + partitionCols);
+              outStream.write(terminator);
+              // output file system information
+              Path tblPath = tbl.getPath();
+              List<Path> locations = new ArrayList<Path>();
+              if (isPartitioned) {
+                if (par == null) {
+                  for (Partition curPart : db.getPartitions(tbl)) {
+                    if (curPart.getLocation() != null) {
+                      locations.add(new Path(curPart.getLocation()));
+                    }
+                  }
+                } else {
+                  if (par.getLocation() != null) {
+                    locations.add(new Path(par.getLocation()));
+                  }
+                }
+              } else {
+                if (tblPath != null) {
+                  locations.add(tblPath);
+                }
+              }
+              if (!locations.isEmpty()) {
+                writeFileSystemStats(outStream, conf, locations, tblPath, false, 0);
+              }
+
+              outStream.write(terminator);
+            }
+        } catch (IOException e) {
+            throw new HiveException(e);
+        }
+    }
+
+    private void writeFileSystemStats(DataOutputStream outStream,
+                                      HiveConf conf,
+                                      List<Path> locations,
+                                      Path tblPath, boolean partSpecified, int indent)
+        throws IOException
+    {
+      long totalFileSize = 0;
+      long maxFileSize = 0;
+      long minFileSize = Long.MAX_VALUE;
+      long lastAccessTime = 0;
+      long lastUpdateTime = 0;
+      int numOfFiles = 0;
+
+      boolean unknown = false;
+      FileSystem fs = tblPath.getFileSystem(conf);
+      // in case all files in locations do not exist
+      try {
+        FileStatus tmpStatus = fs.getFileStatus(tblPath);
+        lastAccessTime = ShimLoader.getHadoopShims().getAccessTime(tmpStatus);
+        lastUpdateTime = tmpStatus.getModificationTime();
+        if (partSpecified) {
+          // check whether the part exists or not in fs
+          tmpStatus = fs.getFileStatus(locations.get(0));
+        }
+      } catch (IOException e) {
+        LOG.warn(
+            "Cannot access File System. File System status will be unknown: ", e);
+        unknown = true;
+      }
+
+      if (!unknown) {
+        for (Path loc : locations) {
+          try {
+            FileStatus status = fs.getFileStatus(tblPath);
+            FileStatus[] files = fs.listStatus(loc);
+            long accessTime = ShimLoader.getHadoopShims().getAccessTime(status);
+            long updateTime = status.getModificationTime();
+            // no matter loc is the table location or part location, it must be a
+            // directory.
+            if (!status.isDir()) {
+              continue;
+            }
+            if (accessTime > lastAccessTime) {
+              lastAccessTime = accessTime;
+            }
+            if (updateTime > lastUpdateTime) {
+              lastUpdateTime = updateTime;
+            }
+            for (FileStatus currentStatus : files) {
+              if (currentStatus.isDir()) {
+                continue;
+              }
+              numOfFiles++;
+              long fileLen = currentStatus.getLen();
+              totalFileSize += fileLen;
+              if (fileLen > maxFileSize) {
+                maxFileSize = fileLen;
+              }
+              if (fileLen < minFileSize) {
+                minFileSize = fileLen;
+              }
+              accessTime = ShimLoader.getHadoopShims().getAccessTime(
+                  currentStatus);
+              updateTime = currentStatus.getModificationTime();
+              if (accessTime > lastAccessTime) {
+                lastAccessTime = accessTime;
+              }
+              if (updateTime > lastUpdateTime) {
+                lastUpdateTime = updateTime;
+              }
+            }
+          } catch (IOException e) {
+            // ignore
+          }
+        }
+      }
+      String unknownString = "unknown";
+
+      for (int k = 0; k < indent; k++) {
+        outStream.writeBytes(Utilities.INDENT);
+      }
+      outStream.writeBytes("totalNumberFiles:");
+      outStream.writeBytes(unknown ? unknownString : "" + numOfFiles);
+      outStream.write(terminator);
+
+      for (int k = 0; k < indent; k++) {
+        outStream.writeBytes(Utilities.INDENT);
+      }
+      outStream.writeBytes("totalFileSize:");
+      outStream.writeBytes(unknown ? unknownString : "" + totalFileSize);
+      outStream.write(terminator);
+
+      for (int k = 0; k < indent; k++) {
+        outStream.writeBytes(Utilities.INDENT);
+      }
+      outStream.writeBytes("maxFileSize:");
+      outStream.writeBytes(unknown ? unknownString : "" + maxFileSize);
+      outStream.write(terminator);
+
+      for (int k = 0; k < indent; k++) {
+        outStream.writeBytes(Utilities.INDENT);
+      }
+      outStream.writeBytes("minFileSize:");
+      if (numOfFiles > 0) {
+        outStream.writeBytes(unknown ? unknownString : "" + minFileSize);
+      } else {
+        outStream.writeBytes(unknown ? unknownString : "" + 0);
+      }
+      outStream.write(terminator);
+
+      for (int k = 0; k < indent; k++) {
+        outStream.writeBytes(Utilities.INDENT);
+      }
+      outStream.writeBytes("lastAccessTime:");
+      outStream.writeBytes((unknown || lastAccessTime < 0) ? unknownString : ""
+          + lastAccessTime);
+      outStream.write(terminator);
+
+      for (int k = 0; k < indent; k++) {
+        outStream.writeBytes(Utilities.INDENT);
+      }
+      outStream.writeBytes("lastUpdateTime:");
+      outStream.writeBytes(unknown ? unknownString : "" + lastUpdateTime);
+      outStream.write(terminator);
+  }
+
+    /**
+     * Show the table partitions.
+     */
+    public void showTablePartitons(DataOutputStream outStream, List<String> parts)
+        throws HiveException
+    {
+        try {
+            for (String part : parts) {
+                outStream.writeBytes(part);
+                outStream.write(terminator);
+            }
+        } catch (IOException e) {
+            throw new HiveException(e);
+        }
+    }
+
+    /**
+     * Show the list of databases
+     */
+    public void showDatabases(DataOutputStream outStream, List<String> databases)
+        throws HiveException
+        {
+        try {
+            for (String database : databases) {
+                // create a row per database name
+                outStream.writeBytes(database);
+                outStream.write(terminator);
+              }
+        } catch (IOException e) {
+            throw new HiveException(e);
+        }
+    }
+
+    /**
+     * Describe a database
+     */
+    public void showDatabaseDescription(DataOutputStream outStream,
+                                        String database,
+                                        String comment,
+                                        String location,
+                                        Map<String, String> params)
+        throws HiveException
+    {
+        try {
+            outStream.writeBytes(database);
+            outStream.write(separator);
+            if (comment != null)
+                outStream.writeBytes(comment);
+            outStream.write(separator);
+            if (location != null)
+                outStream.writeBytes(location);
+            outStream.write(separator);
+            if (params != null && !params.isEmpty()) {
+                outStream.writeBytes(params.toString());
+            }
+            outStream.write(terminator);
+        } catch (IOException e) {
+            throw new HiveException(e);
+        }
+    }
+}

Added: hive/trunk/ql/src/test/queries/clientpositive/describe_database_json.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/describe_database_json.q?rev=1309666&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/describe_database_json.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/describe_database_json.q Thu Apr  5 06:12:15 2012
@@ -0,0 +1,23 @@
+set hive.ddl.output.format=json;
+
+CREATE DATABASE IF NOT EXISTS jsondb1 COMMENT 'Test database' LOCATION '${hiveconf:hive.metastore.warehouse.dir}/jsondb1' WITH DBPROPERTIES ('id' = 'jsondb1'); 
+
+DESCRIBE DATABASE jsondb1;
+
+DESCRIBE DATABASE EXTENDED jsondb1;
+
+SHOW DATABASES;
+
+SHOW DATABASES LIKE 'json*';
+
+DROP DATABASE jsondb1;
+
+CREATE DATABASE jsondb1;
+
+DESCRIBE DATABASE jsondb1;
+
+DESCRIBE DATABASE EXTENDED jsondb1;
+
+DROP DATABASE jsondb1;
+
+set hive.ddl.output.format=text;

Added: hive/trunk/ql/src/test/queries/clientpositive/describe_table_json.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/describe_table_json.q?rev=1309666&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/describe_table_json.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/describe_table_json.q Thu Apr  5 06:12:15 2012
@@ -0,0 +1,19 @@
+set hive.ddl.output.format=json;
+
+CREATE TABLE IF NOT EXISTS jsontable (key INT, value STRING) COMMENT 'json table' STORED AS TEXTFILE;
+
+SHOW TABLES;
+
+SHOW TABLES LIKE 'json*';
+
+SHOW TABLE EXTENDED LIKE 'json*';
+
+ALTER TABLE jsontable SET TBLPROPERTIES ('id' = 'jsontable');
+
+DESCRIBE jsontable;
+
+DESCRIBE extended jsontable;
+
+DROP TABLE jsontable;
+
+set hive.ddl.output.format=text;

Added: hive/trunk/ql/src/test/queries/clientpositive/misc_json.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/misc_json.q?rev=1309666&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/misc_json.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/misc_json.q Thu Apr  5 06:12:15 2012
@@ -0,0 +1,13 @@
+set hive.ddl.output.format=json;
+
+CREATE TABLE IF NOT EXISTS jsontable (key INT, value STRING) COMMENT 'json table' STORED AS TEXTFILE;
+
+ALTER TABLE jsontable ADD COLUMNS (name STRING COMMENT 'a new column'); 
+
+ALTER TABLE jsontable RENAME TO jsontable2;
+
+SHOW TABLE EXTENDED LIKE jsontable2;
+
+DROP TABLE jsontable2;
+
+set hive.ddl.output.format=text;

Added: hive/trunk/ql/src/test/queries/clientpositive/partitions_json.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/partitions_json.q?rev=1309666&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/partitions_json.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/partitions_json.q Thu Apr  5 06:12:15 2012
@@ -0,0 +1,21 @@
+set hive.ddl.output.format=json;
+
+CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING);
+SHOW PARTITIONS add_part_test;
+
+ALTER TABLE add_part_test ADD PARTITION (ds='2010-01-01');
+SHOW PARTITIONS add_part_test;
+
+ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01');
+SHOW PARTITIONS add_part_test;
+
+ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-02');
+SHOW PARTITIONS add_part_test;
+
+SHOW TABLE EXTENDED LIKE add_part_test PARTITION (ds='2010-01-02');
+
+ALTER TABLE add_part_test DROP PARTITION (ds='2010-01-02');
+
+DROP TABLE add_part_test;
+
+set hive.ddl.output.format=text;

Modified: hive/trunk/ql/src/test/results/clientnegative/database_create_already_exists.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/database_create_already_exists.q.out?rev=1309666&r1=1309665&r2=1309666&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/database_create_already_exists.q.out (original)
+++ hive/trunk/ql/src/test/results/clientnegative/database_create_already_exists.q.out Thu Apr  5 06:12:15 2012
@@ -11,5 +11,5 @@ CREATE DATABASE test_db
 POSTHOOK: type: CREATEDATABASE
 PREHOOK: query: CREATE DATABASE test_db
 PREHOOK: type: CREATEDATABASE
-Failed with exception Database test_db already exists
+Database test_db already exists
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask

Modified: hive/trunk/ql/src/test/results/clientnegative/database_drop_does_not_exist.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/database_drop_does_not_exist.q.out?rev=1309666&r1=1309665&r2=1309666&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/database_drop_does_not_exist.q.out (original)
+++ hive/trunk/ql/src/test/results/clientnegative/database_drop_does_not_exist.q.out Thu Apr  5 06:12:15 2012
@@ -6,5 +6,5 @@ default
 PREHOOK: query: -- Try to drop a database that does not exist
 DROP DATABASE does_not_exist
 PREHOOK: type: DROPDATABASE
-Failed with exception There is no database named does_not_exist
+There is no database named does_not_exist
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask

Added: hive/trunk/ql/src/test/results/clientpositive/describe_database_json.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/describe_database_json.q.out?rev=1309666&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/describe_database_json.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/describe_database_json.q.out Thu Apr  5 06:12:15 2012
@@ -0,0 +1,46 @@
+#### A masked pattern was here ####
+PREHOOK: type: CREATEDATABASE
+#### A masked pattern was here ####
+POSTHOOK: type: CREATEDATABASE
+PREHOOK: query: DESCRIBE DATABASE jsondb1
+PREHOOK: type: DESCDATABASE
+POSTHOOK: query: DESCRIBE DATABASE jsondb1
+POSTHOOK: type: DESCDATABASE
+#### A masked pattern was here ####
+PREHOOK: query: DESCRIBE DATABASE EXTENDED jsondb1
+PREHOOK: type: DESCDATABASE
+POSTHOOK: query: DESCRIBE DATABASE EXTENDED jsondb1
+POSTHOOK: type: DESCDATABASE
+#### A masked pattern was here ####
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+{"databases":["default","jsondb1"]}
+PREHOOK: query: SHOW DATABASES LIKE 'json*'
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES LIKE 'json*'
+POSTHOOK: type: SHOWDATABASES
+{"databases":["jsondb1"]}
+PREHOOK: query: DROP DATABASE jsondb1
+PREHOOK: type: DROPDATABASE
+POSTHOOK: query: DROP DATABASE jsondb1
+POSTHOOK: type: DROPDATABASE
+PREHOOK: query: CREATE DATABASE jsondb1
+PREHOOK: type: CREATEDATABASE
+POSTHOOK: query: CREATE DATABASE jsondb1
+POSTHOOK: type: CREATEDATABASE
+PREHOOK: query: DESCRIBE DATABASE jsondb1
+PREHOOK: type: DESCDATABASE
+POSTHOOK: query: DESCRIBE DATABASE jsondb1
+POSTHOOK: type: DESCDATABASE
+#### A masked pattern was here ####
+PREHOOK: query: DESCRIBE DATABASE EXTENDED jsondb1
+PREHOOK: type: DESCDATABASE
+POSTHOOK: query: DESCRIBE DATABASE EXTENDED jsondb1
+POSTHOOK: type: DESCDATABASE
+#### A masked pattern was here ####
+PREHOOK: query: DROP DATABASE jsondb1
+PREHOOK: type: DROPDATABASE
+POSTHOOK: query: DROP DATABASE jsondb1
+POSTHOOK: type: DROPDATABASE

Added: hive/trunk/ql/src/test/results/clientpositive/describe_table_json.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/describe_table_json.q.out?rev=1309666&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/describe_table_json.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/describe_table_json.q.out Thu Apr  5 06:12:15 2012
@@ -0,0 +1,46 @@
+PREHOOK: query: CREATE TABLE IF NOT EXISTS jsontable (key INT, value STRING) COMMENT 'json table' STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE IF NOT EXISTS jsontable (key INT, value STRING) COMMENT 'json table' STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@jsontable
+PREHOOK: query: SHOW TABLES
+PREHOOK: type: SHOWTABLES
+POSTHOOK: query: SHOW TABLES
+POSTHOOK: type: SHOWTABLES
+{"tables":["jsontable","src","src1","src_json","src_sequencefile","src_thrift","srcbucket","srcbucket2","srcpart"]}
+PREHOOK: query: SHOW TABLES LIKE 'json*'
+PREHOOK: type: SHOWTABLES
+POSTHOOK: query: SHOW TABLES LIKE 'json*'
+POSTHOOK: type: SHOWTABLES
+{"tables":["jsontable"]}
+PREHOOK: query: SHOW TABLE EXTENDED LIKE 'json*'
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: SHOW TABLE EXTENDED LIKE 'json*'
+POSTHOOK: type: SHOW_TABLESTATUS
+{"tables":[]}
+PREHOOK: query: ALTER TABLE jsontable SET TBLPROPERTIES ('id' = 'jsontable')
+PREHOOK: type: ALTERTABLE_PROPERTIES
+PREHOOK: Input: default@jsontable
+PREHOOK: Output: default@jsontable
+POSTHOOK: query: ALTER TABLE jsontable SET TBLPROPERTIES ('id' = 'jsontable')
+POSTHOOK: type: ALTERTABLE_PROPERTIES
+POSTHOOK: Input: default@jsontable
+POSTHOOK: Output: default@jsontable
+PREHOOK: query: DESCRIBE jsontable
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: DESCRIBE jsontable
+POSTHOOK: type: DESCTABLE
+{"columns":[{"name":"key","type":"int"},{"name":"value","type":"string"}]}	 	 
+PREHOOK: query: DESCRIBE extended jsontable
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: DESCRIBE extended jsontable
+POSTHOOK: type: DESCTABLE
+#### A masked pattern was here ####
+PREHOOK: query: DROP TABLE jsontable
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@jsontable
+PREHOOK: Output: default@jsontable
+POSTHOOK: query: DROP TABLE jsontable
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@jsontable
+POSTHOOK: Output: default@jsontable

Added: hive/trunk/ql/src/test/results/clientpositive/misc_json.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/misc_json.q.out?rev=1309666&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/misc_json.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/misc_json.q.out Thu Apr  5 06:12:15 2012
@@ -0,0 +1,35 @@
+PREHOOK: query: CREATE TABLE IF NOT EXISTS jsontable (key INT, value STRING) COMMENT 'json table' STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE IF NOT EXISTS jsontable (key INT, value STRING) COMMENT 'json table' STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@jsontable
+PREHOOK: query: ALTER TABLE jsontable ADD COLUMNS (name STRING COMMENT 'a new column')
+PREHOOK: type: ALTERTABLE_ADDCOLS
+PREHOOK: Input: default@jsontable
+PREHOOK: Output: default@jsontable
+POSTHOOK: query: ALTER TABLE jsontable ADD COLUMNS (name STRING COMMENT 'a new column')
+POSTHOOK: type: ALTERTABLE_ADDCOLS
+POSTHOOK: Input: default@jsontable
+POSTHOOK: Output: default@jsontable
+PREHOOK: query: ALTER TABLE jsontable RENAME TO jsontable2
+PREHOOK: type: ALTERTABLE_RENAME
+PREHOOK: Input: default@jsontable
+PREHOOK: Output: default@jsontable
+POSTHOOK: query: ALTER TABLE jsontable RENAME TO jsontable2
+POSTHOOK: type: ALTERTABLE_RENAME
+POSTHOOK: Input: default@jsontable
+POSTHOOK: Output: default@jsontable
+POSTHOOK: Output: default@jsontable2
+PREHOOK: query: SHOW TABLE EXTENDED LIKE jsontable2
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: SHOW TABLE EXTENDED LIKE jsontable2
+POSTHOOK: type: SHOW_TABLESTATUS
+#### A masked pattern was here ####
+PREHOOK: query: DROP TABLE jsontable2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@jsontable2
+PREHOOK: Output: default@jsontable2
+POSTHOOK: query: DROP TABLE jsontable2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@jsontable2
+POSTHOOK: Output: default@jsontable2



Mime
View raw message