hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From br...@apache.org
Subject svn commit: r1664455 [12/30] - in /hive/branches/parquet: ./ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde...
Date Thu, 05 Mar 2015 18:51:39 GMT
Modified: hive/branches/parquet/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java (original)
+++ hive/branches/parquet/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java Thu Mar  5 18:51:32 2015
@@ -69,8 +69,7 @@ public class WindowingTableFunction exte
   public void execute(PTFPartitionIterator<Object> pItr, PTFPartition outP) throws HiveException {
     ArrayList<List<?>> oColumns = new ArrayList<List<?>>();
     PTFPartition iPart = pItr.getPartition();
-    StructObjectInspector inputOI;
-    inputOI = (StructObjectInspector) iPart.getOutputOI();
+    StructObjectInspector inputOI = iPart.getOutputOI();
 
     WindowTableFunctionDef wTFnDef = (WindowTableFunctionDef) getTableDef();
     Order order = wTFnDef.getOrder().getExpressions().get(0).getOrder();
@@ -145,7 +144,8 @@ public class WindowingTableFunction exte
     return true;
   }
 
-  private boolean streamingPossible(Configuration cfg, WindowFunctionDef wFnDef) {
+  private boolean streamingPossible(Configuration cfg, WindowFunctionDef wFnDef)
+      throws HiveException {
     WindowFrameDef wdwFrame = wFnDef.getWindowFrame();
     WindowFunctionInfo wFnInfo = FunctionRegistry.getWindowFunctionInfo(wFnDef
         .getName());
@@ -202,7 +202,7 @@ public class WindowingTableFunction exte
    * So no Unbounded Preceding or Following.
    */
   @SuppressWarnings("resource")
-  private int[] setCanAcceptInputAsStream(Configuration cfg) {
+  private int[] setCanAcceptInputAsStream(Configuration cfg) throws HiveException {
 
     canAcceptInputAsStream = false;
 
@@ -514,7 +514,6 @@ public class WindowingTableFunction exte
       i++;
     }
 
-    i=0;
     for(i=0; i < iPart.getOutputOI().getAllStructFieldRefs().size(); i++) {
       output.add(null);
     }

Modified: hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java (original)
+++ hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java Thu Mar  5 18:51:32 2015
@@ -26,12 +26,9 @@ import java.util.Stack;
 import junit.framework.TestCase;
 
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.metastore.api.Partition;
@@ -90,14 +87,14 @@ public class TestMetastoreExpr extends T
     }
   }
 
-  private static void silentDropDatabase(String dbName) throws MetaException, TException {
+  private static void silentDropDatabase(String dbName) throws TException {
     try {
       for (String tableName : client.getTables(dbName, "*")) {
         client.dropTable(dbName, tableName);
       }
       client.dropDatabase(dbName);
-    } catch (NoSuchObjectException e) {
-    } catch (InvalidOperationException e) {
+    } catch (NoSuchObjectException ignore) {
+    } catch (InvalidOperationException ignore) {
     }
   }
 
@@ -153,16 +150,16 @@ public class TestMetastoreExpr extends T
       client.listPartitionsByExpr(dbName, tblName,
           new byte[] { 'f', 'o', 'o' }, null, (short)-1, new ArrayList<Partition>());
       fail("Should have thrown IncompatibleMetastoreException");
-    } catch (IMetaStoreClient.IncompatibleMetastoreException ex) {
+    } catch (IMetaStoreClient.IncompatibleMetastoreException ignore) {
     }
 
     // Invalid expression => throw some exception, but not incompatible metastore.
     try {
       checkExpr(-1, dbName, tblName, e.val(31).intCol("p3").pred(">", 2).build());
       fail("Should have thrown");
-    } catch (IMetaStoreClient.IncompatibleMetastoreException ex) {
+    } catch (IMetaStoreClient.IncompatibleMetastoreException ignore) {
       fail("Should not have thrown IncompatibleMetastoreException");
-    } catch (Exception ex) {
+    } catch (Exception ignore) {
     }
   }
 
@@ -198,7 +195,7 @@ public class TestMetastoreExpr extends T
       for (int i = 0; i < args; ++i) {
         children.add(stack.pop());
       }
-      stack.push(new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo,
+      stack.push(new ExprNodeGenericFuncDesc(ti,
           FunctionRegistry.getFunctionInfo(name).getGenericUDF(), children));
       return this;
     }
@@ -249,8 +246,7 @@ public class TestMetastoreExpr extends T
   }
 
   private void addPartition(HiveMetaStoreClient client, Table table,
-      List<String> vals, String location) throws InvalidObjectException,
-        AlreadyExistsException, MetaException, TException {
+      List<String> vals, String location) throws TException {
 
     Partition part = new Partition();
     part.setDbName(table.getDbName());

Modified: hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java (original)
+++ hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java Thu Mar  5 18:51:32 2015
@@ -59,6 +59,7 @@ import org.apache.hadoop.hive.ql.plan.Se
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hadoop.util.Shell;
 
@@ -93,7 +94,8 @@ public class TestExecDriver extends Test
       tmppath = new Path(tmpdir);
 
       fs = FileSystem.get(conf);
-      if (fs.exists(tmppath) && !fs.getFileStatus(tmppath).isDirectory()) {
+      if (fs.exists(tmppath) &&
+          !ShimLoader.getHadoopShims().isDirectory(fs.getFileStatus(tmppath))) {
         throw new RuntimeException(tmpdir + " exists but is not a directory");
       }
 
@@ -166,7 +168,7 @@ public class TestExecDriver extends Test
     if (!fs.exists(di_test)) {
       throw new RuntimeException(tmpdir + File.separator + testdir + " does not exist");
     }
-    if (!fs.getFileStatus(di_test).isDirectory()) {
+    if (!ShimLoader.getHadoopShims().isDirectory(fs.getFileStatus(di_test))) {
       throw new RuntimeException(tmpdir + File.separator + testdir + " is not a directory");
     }
     FSDataInputStream fi_test = fs.open((fs.listStatus(di_test))[0].getPath());

Modified: hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestHiveKVResultCache.java
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestHiveKVResultCache.java?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestHiveKVResultCache.java (original)
+++ hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestHiveKVResultCache.java Thu Mar  5 18:51:32 2015
@@ -27,8 +27,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.concurrent.LinkedBlockingQueue;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.io.HiveKey;
 import org.apache.hadoop.io.BytesWritable;
 import org.junit.Test;
@@ -42,8 +40,7 @@ public class TestHiveKVResultCache {
   @Test
   public void testSimple() throws Exception {
     // Create KV result cache object, add one (k,v) pair and retrieve them.
-    HiveConf conf = new HiveConf();
-    HiveKVResultCache cache = new HiveKVResultCache(conf);
+    HiveKVResultCache cache = new HiveKVResultCache();
 
     HiveKey key = new HiveKey("key".getBytes(), "key".hashCode());
     BytesWritable value = new BytesWritable("value".getBytes());
@@ -60,10 +57,9 @@ public class TestHiveKVResultCache {
 
   @Test
   public void testSpilling() throws Exception {
-    HiveConf conf = new HiveConf();
-    HiveKVResultCache cache = new HiveKVResultCache(conf);
+    HiveKVResultCache cache = new HiveKVResultCache();
 
-    final int recordCount = HiveKVResultCache.IN_MEMORY_CACHE_SIZE * 3;
+    final int recordCount = HiveKVResultCache.IN_MEMORY_NUM_ROWS * 3;
 
     // Test using the same cache where first n rows are inserted then cache is cleared.
     // Next reuse the same cache and insert another m rows and verify the cache stores correctly.
@@ -104,10 +100,18 @@ public class TestHiveKVResultCache {
   @Test
   public void testResultList() throws Exception {
     scanAndVerify(10000, 0, 0, "a", "b");
+    scanAndVerify(10000, 511, 0, "a", "b");
+    scanAndVerify(10000, 511 * 2, 0, "a", "b");
+    scanAndVerify(10000, 511, 10, "a", "b");
+    scanAndVerify(10000, 511 * 2, 10, "a", "b");
     scanAndVerify(10000, 512, 0, "a", "b");
     scanAndVerify(10000, 512 * 2, 0, "a", "b");
-    scanAndVerify(10000, 512, 10, "a", "b");
-    scanAndVerify(10000, 512 * 2, 10, "a", "b");
+    scanAndVerify(10000, 512, 3, "a", "b");
+    scanAndVerify(10000, 512 * 6, 10, "a", "b");
+    scanAndVerify(10000, 512 * 7, 5, "a", "b");
+    scanAndVerify(10000, 512 * 9, 19, "a", "b");
+    scanAndVerify(10000, 1, 0, "a", "b");
+    scanAndVerify(10000, 1, 1, "a", "b");
   }
 
   private static void scanAndVerify(
@@ -176,8 +180,8 @@ public class TestHiveKVResultCache {
     // A queue to notify separateRowGenerator to generate the next batch of rows.
     private LinkedBlockingQueue<Boolean> queue;
 
-    MyHiveFunctionResultList(Configuration conf, Iterator inputIterator) {
-      super(conf, inputIterator);
+    MyHiveFunctionResultList(Iterator inputIterator) {
+      super(inputIterator);
     }
 
     void init(long rows, int threshold, int separate, String p1, String p2) {
@@ -258,8 +262,7 @@ public class TestHiveKVResultCache {
   private static long scanResultList(long rows, int threshold, int separate,
       List<Tuple2<HiveKey, BytesWritable>> output, String prefix1, String prefix2) {
     final long iteratorCount = threshold == 0 ? 1 : rows * (100 - separate) / 100 / threshold;
-    MyHiveFunctionResultList resultList = new MyHiveFunctionResultList(
-        new HiveConf(), new Iterator() {
+    MyHiveFunctionResultList resultList = new MyHiveFunctionResultList(new Iterator() {
       // Input record iterator, not used
       private int i = 0;
       @Override

Modified: hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestHiveSchemaConverter.java
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestHiveSchemaConverter.java?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestHiveSchemaConverter.java (original)
+++ hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestHiveSchemaConverter.java Thu Mar  5 18:51:32 2015
@@ -108,6 +108,16 @@ public class TestHiveSchemaConverter {
   }
 
   @Test
+  public void testDateType() throws Exception {
+    testConversion(
+        "a",
+        "date",
+        "message hive_schema {\n"
+            + "  optional int32 a (DATE);\n"
+            + "}\n");
+  }
+
+  @Test
   public void testArray() throws Exception {
     testConversion("arrayCol",
             "array<int>",

Modified: hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java (original)
+++ hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java Thu Mar  5 18:51:32 2015
@@ -28,6 +28,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.regex.Pattern;
 
+import com.google.common.collect.ImmutableMap;
 import junit.framework.TestCase;
 
 import org.apache.hadoop.fs.FileStatus;
@@ -36,6 +37,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.PartitionDropOptions;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -426,6 +428,158 @@ public class TestHive extends TestCase {
     }
   }
 
+  private FileStatus[] getTrashContents() throws Exception {
+    FileSystem fs = FileSystem.get(hiveConf);
+    Path trashDir = ShimLoader.getHadoopShims().getCurrentTrashPath(hiveConf, fs);
+    return fs.globStatus(trashDir.suffix("/*"));
+  }
+
+  private Table createPartitionedTable(String dbName, String tableName) throws Exception {
+    try {
+
+      hm.dropTable(dbName, tableName);
+      hm.createTable(tableName,
+                     Arrays.asList("key", "value"),   // Data columns.
+                     Arrays.asList("ds", "hr"),       // Partition columns.
+                     TextInputFormat.class,
+                     HiveIgnoreKeyTextOutputFormat.class);
+      return hm.getTable(dbName, tableName);
+    }
+    catch (Exception exception) {
+      fail("Unable to drop and create table " + dbName + "." + tableName
+          + " because " + StringUtils.stringifyException(exception));
+      throw exception;
+    }
+  }
+
+  private void cleanUpTableQuietly(String dbName, String tableName) {
+    try {
+      hm.dropTable(dbName, tableName, true, true, true);
+    }
+    catch(Exception exception) {
+      fail("Unexpected exception: " + StringUtils.stringifyException(exception));
+    }
+  }
+
+  /**
+   * Test for PURGE support for dropping partitions.
+   * 1. Drop partitions without PURGE, and check that the data isn't moved to Trash.
+   * 2. Drop partitions with PURGE, and check that the data is moved to Trash.
+   * @throws Exception on failure.
+   */
+  public void testDropPartitionsWithPurge() throws Exception {
+    String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME;
+    String tableName = "table_for_testDropPartitionsWithPurge";
+
+    try {
+
+      Map<String, String> partitionSpec =  new ImmutableMap.Builder<String, String>()
+                                                 .put("ds", "20141216")
+                                                 .put("hr", "12")
+                                                 .build();
+
+      int trashSizeBeforeDrop = getTrashContents().length;
+
+      Table table = createPartitionedTable(dbName, tableName);
+      hm.createPartition(table, partitionSpec);
+
+      Partition partition = hm.getPartition(table, partitionSpec, false);
+      assertNotNull("Newly created partition shouldn't be null!", partition);
+
+      hm.dropPartition(dbName, tableName,
+                       partition.getValues(),
+                       PartitionDropOptions.instance()
+                                           .deleteData(true)
+                                           .purgeData(true)
+                      );
+
+      int trashSizeAfterDropPurge = getTrashContents().length;
+
+      assertEquals("After dropPartitions(purge), trash should've remained unchanged!",
+                 trashSizeBeforeDrop, trashSizeAfterDropPurge);
+
+      // Repeat, and drop partition without purge.
+      hm.createPartition(table, partitionSpec);
+
+      partition = hm.getPartition(table, partitionSpec, false);
+      assertNotNull("Newly created partition shouldn't be null!", partition);
+
+      hm.dropPartition(dbName, tableName,
+                       partition.getValues(),
+                       PartitionDropOptions.instance()
+                                           .deleteData(true)
+                                           .purgeData(false)
+                      );
+
+      int trashSizeWithoutPurge = getTrashContents().length;
+
+      assertEquals("After dropPartitions(noPurge), data should've gone to trash!",
+                  trashSizeBeforeDrop, trashSizeWithoutPurge);
+
+    }
+    catch (Exception e) {
+      fail("Unexpected exception: " + StringUtils.stringifyException(e));
+    }
+    finally {
+      cleanUpTableQuietly(dbName, tableName);
+    }
+  }
+
+  /**
+   * Test that tables set up with auto-purge skip trash-directory when tables/partitions are dropped.
+   * @throws Throwable
+   */
+  public void testAutoPurgeTablesAndPartitions() throws Throwable {
+
+    String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME;
+    String tableName = "table_for_testAutoPurgeTablesAndPartitions";
+    try {
+
+      Table table = createPartitionedTable(dbName, tableName);
+      table.getParameters().put("auto.purge", "true");
+      hm.alterTable(tableName, table);
+
+      Map<String, String> partitionSpec =  new ImmutableMap.Builder<String, String>()
+          .put("ds", "20141216")
+          .put("hr", "12")
+          .build();
+
+      int trashSizeBeforeDrop = getTrashContents().length;
+
+      hm.createPartition(table, partitionSpec);
+
+      Partition partition = hm.getPartition(table, partitionSpec, false);
+      assertNotNull("Newly created partition shouldn't be null!", partition);
+
+      hm.dropPartition(dbName, tableName,
+          partition.getValues(),
+          PartitionDropOptions.instance()
+                              .deleteData(true)
+                              .purgeData(false)
+      );
+
+      int trashSizeAfterDrop = getTrashContents().length;
+
+      assertEquals("After dropPartition(noPurge), data should still have skipped trash.",
+                 trashSizeBeforeDrop, trashSizeAfterDrop);
+
+      // Repeat the same check for dropTable.
+
+      trashSizeBeforeDrop = trashSizeAfterDrop;
+      hm.dropTable(dbName, tableName);
+      trashSizeAfterDrop = getTrashContents().length;
+
+      assertEquals("After dropTable(noPurge), data should still have skipped trash.",
+                 trashSizeBeforeDrop, trashSizeAfterDrop);
+
+    }
+    catch(Exception e) {
+      fail("Unexpected failure: " + StringUtils.stringifyException(e));
+    }
+    finally {
+      cleanUpTableQuietly(dbName, tableName);
+    }
+  }
 
   public void testPartition() throws Throwable {
     try {

Modified: hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java (original)
+++ hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java Thu Mar  5 18:51:32 2015
@@ -207,6 +207,26 @@ public class TestIUD {
           "(TOK_WHERE (= (TOK_TABLE_OR_COL b) 9))))",
       ast.toStringTree());
   }
+  /**
+   * same as testInsertIntoTableAsSelectFromNamedVirtTable but with column list on target table
+   * @throws ParseException
+   */
+  @Test
+  public void testInsertIntoTableAsSelectFromNamedVirtTableNamedCol() throws ParseException {
+    ASTNode ast = parse("insert into page_view(c1,c2) select a,b as c from (values (1,2),(3,4)) as VC(a,b) where b = 9");
+    Assert.assertEquals("AST doesn't match",
+      "(TOK_QUERY " +
+        "(TOK_FROM " +
+        "(TOK_VIRTUAL_TABLE " +
+        "(TOK_VIRTUAL_TABREF (TOK_TABNAME VC) (TOK_COL_NAME a b)) " +
+        "(TOK_VALUES_TABLE (TOK_VALUE_ROW 1 2) (TOK_VALUE_ROW 3 4)))) " +
+        "(TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME page_view)) (TOK_TABCOLNAME c1 c2)) " +
+        "(TOK_SELECT " +
+        "(TOK_SELEXPR (TOK_TABLE_OR_COL a)) " +
+        "(TOK_SELEXPR (TOK_TABLE_OR_COL b) c)) " +
+        "(TOK_WHERE (= (TOK_TABLE_OR_COL b) 9))))",
+      ast.toStringTree());
+  }
   @Test
   public void testInsertIntoTableFromAnonymousTable1Row() throws ParseException {
     ASTNode ast = parse("insert into page_view values(1,2)");
@@ -220,6 +240,32 @@ public class TestIUD {
         "(TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))",
       ast.toStringTree());
   }
+  /**
+   * Same as testInsertIntoTableFromAnonymousTable1Row but with column list on target table
+   * @throws ParseException
+   */
+  @Test
+  public void testInsertIntoTableFromAnonymousTable1RowNamedCol() throws ParseException {
+    ASTNode ast = parse("insert into page_view(a,b) values(1,2)");
+    Assert.assertEquals("AST doesn't match",
+      "(TOK_QUERY " +
+        "(TOK_FROM " +
+          "(TOK_VIRTUAL_TABLE " +
+            "(TOK_VIRTUAL_TABREF TOK_ANONYMOUS) " +
+            "(TOK_VALUES_TABLE (TOK_VALUE_ROW 1 2))" +
+          ")" +
+        ") " +
+        "(TOK_INSERT " +
+          "(TOK_INSERT_INTO " +
+            "(TOK_TAB (TOK_TABNAME page_view)) " +
+            "(TOK_TABCOLNAME a b)" +//this is "extra" piece we get vs previous query
+          ") " +
+          "(TOK_SELECT " +
+            "(TOK_SELEXPR TOK_ALLCOLREF)" +
+          ")" +
+        ")" +
+      ")", ast.toStringTree());
+  }
   @Test
   public void testInsertIntoTableFromAnonymousTable() throws ParseException {
     ASTNode ast = parse("insert into table page_view values(-1,2),(3,+4)");

Modified: hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/parse/TestMacroSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/parse/TestMacroSemanticAnalyzer.java?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/parse/TestMacroSemanticAnalyzer.java (original)
+++ hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/parse/TestMacroSemanticAnalyzer.java Thu Mar  5 18:51:32 2015
@@ -66,12 +66,12 @@ public class TestMacroSemanticAnalyzer {
   @Test
   public void testDropMacroExistsDoNotIgnoreErrors() throws Exception {
     conf.setBoolVar(ConfVars.DROPIGNORESNONEXISTENT, false);
-    FunctionRegistry.registerGenericUDF(false, "SOME_MACRO", GenericUDFMacro.class);
+    FunctionRegistry.registerTemporaryUDF("SOME_MACRO", GenericUDFMacro.class);
     analyze(parse("DROP TEMPORARY MACRO SOME_MACRO"));
   }
   @Test
   public void testDropMacro() throws Exception {
-    FunctionRegistry.registerGenericUDF(false, "SOME_MACRO", GenericUDFMacro.class);
+    FunctionRegistry.registerTemporaryUDF("SOME_MACRO", GenericUDFMacro.class);
     analyze(parse("DROP TEMPORARY MACRO SOME_MACRO"));
   }
   @Test(expected = SemanticException.class)

Modified: hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFAddMonths.java
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFAddMonths.java?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFAddMonths.java (original)
+++ hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFAddMonths.java Thu Mar  5 18:51:32 2015
@@ -17,26 +17,30 @@
  */
 package org.apache.hadoop.hive.ql.udf.generic;
 
+import junit.framework.TestCase;
+
+import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredJavaObject;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredObject;
+import org.apache.hadoop.hive.serde2.io.ByteWritable;
+import org.apache.hadoop.hive.serde2.io.ShortWritable;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
 
-import junit.framework.TestCase;
-
 public class TestGenericUDFAddMonths extends TestCase {
 
-  public void testAddMonths() throws HiveException {
+  public void testAddMonthsInt() throws HiveException {
     GenericUDFAddMonths udf = new GenericUDFAddMonths();
     ObjectInspector valueOI0 = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
     ObjectInspector valueOI1 = PrimitiveObjectInspectorFactory.writableIntObjectInspector;
     ObjectInspector[] arguments = { valueOI0, valueOI1 };
 
     udf.initialize(arguments);
+
+    // date str
     runAndVerify("2014-01-14", 1, "2014-02-14", udf);
     runAndVerify("2014-01-31", 1, "2014-02-28", udf);
     runAndVerify("2014-02-28", -1, "2014-01-31", udf);
@@ -46,7 +50,64 @@ public class TestGenericUDFAddMonths ext
     runAndVerify("2016-02-29", -12, "2015-02-28", udf);
     runAndVerify("2016-01-29", 1, "2016-02-29", udf);
     runAndVerify("2016-02-29", -1, "2016-01-31", udf);
-    runAndVerify("2014-01-32", 1, "2014-03-01", udf);
+    // wrong date str
+    runAndVerify("2014-02-30", 1, "2014-04-02", udf);
+    runAndVerify("2014-02-32", 1, "2014-04-04", udf);
+    runAndVerify("2014-01", 1, null, udf);
+
+    // ts str
+    runAndVerify("2014-01-14 10:30:00", 1, "2014-02-14", udf);
+    runAndVerify("2014-01-31 10:30:00", 1, "2014-02-28", udf);
+    runAndVerify("2014-02-28 10:30:00.1", -1, "2014-01-31", udf);
+    runAndVerify("2014-02-28 10:30:00.100", 2, "2014-04-30", udf);
+    runAndVerify("2014-04-30 10:30:00.001", -2, "2014-02-28", udf);
+    runAndVerify("2015-02-28 10:30:00.000000001", 12, "2016-02-29", udf);
+    runAndVerify("2016-02-29 10:30:00", -12, "2015-02-28", udf);
+    runAndVerify("2016-01-29 10:30:00", 1, "2016-02-29", udf);
+    runAndVerify("2016-02-29 10:30:00", -1, "2016-01-31", udf);
+    // wrong ts str
+    runAndVerify("2014-02-30 10:30:00", 1, "2014-04-02", udf);
+    runAndVerify("2014-02-32 10:30:00", 1, "2014-04-04", udf);
+    runAndVerify("2014/01/31 10:30:00", 1, null, udf);
+    runAndVerify("2014-01-31T10:30:00", 1, "2014-02-28", udf);
+  }
+
+  public void testAddMonthsShort() throws HiveException {
+    GenericUDFAddMonths udf = new GenericUDFAddMonths();
+    ObjectInspector valueOI0 = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+    ObjectInspector valueOI1 = PrimitiveObjectInspectorFactory.writableShortObjectInspector;
+    ObjectInspector[] arguments = { valueOI0, valueOI1 };
+
+    udf.initialize(arguments);
+    // short
+    runAndVerify("2014-01-14", (short) 1, "2014-02-14", udf);
+  }
+
+  public void testAddMonthsByte() throws HiveException {
+    GenericUDFAddMonths udf = new GenericUDFAddMonths();
+    ObjectInspector valueOI0 = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+    ObjectInspector valueOI1 = PrimitiveObjectInspectorFactory.writableByteObjectInspector;
+    ObjectInspector[] arguments = { valueOI0, valueOI1 };
+
+    udf.initialize(arguments);
+    // short
+    runAndVerify("2014-01-14", (byte) 1, "2014-02-14", udf);
+  }
+
+  public void testAddMonthsLong() throws HiveException {
+    @SuppressWarnings("resource")
+    GenericUDFAddMonths udf = new GenericUDFAddMonths();
+    ObjectInspector valueOI0 = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+    ObjectInspector valueOI1 = PrimitiveObjectInspectorFactory.writableLongObjectInspector;
+    ObjectInspector[] arguments = { valueOI0, valueOI1 };
+
+    try {
+      udf.initialize(arguments);
+      assertTrue("add_months exception expected", false);
+    } catch (UDFArgumentTypeException e) {
+      assertEquals("add_months test",
+          "add_months only takes INT/SHORT/BYTE types as 2nd argument, got LONG", e.getMessage());
+    }
   }
 
   private void runAndVerify(String str, int months, String expResult, GenericUDF udf)
@@ -55,6 +116,24 @@ public class TestGenericUDFAddMonths ext
     DeferredObject valueObj1 = new DeferredJavaObject(new IntWritable(months));
     DeferredObject[] args = { valueObj0, valueObj1 };
     Text output = (Text) udf.evaluate(args);
-    assertEquals("add_months() test ", expResult, output.toString());
+    assertEquals("add_months() test ", expResult, output != null ? output.toString() : null);
+  }
+
+  private void runAndVerify(String str, short months, String expResult, GenericUDF udf)
+      throws HiveException {
+    DeferredObject valueObj0 = new DeferredJavaObject(new Text(str));
+    DeferredObject valueObj1 = new DeferredJavaObject(new ShortWritable(months));
+    DeferredObject[] args = { valueObj0, valueObj1 };
+    Text output = (Text) udf.evaluate(args);
+    assertEquals("add_months() test ", expResult, output != null ? output.toString() : null);
+  }
+
+  private void runAndVerify(String str, byte months, String expResult, GenericUDF udf)
+      throws HiveException {
+    DeferredObject valueObj0 = new DeferredJavaObject(new Text(str));
+    DeferredObject valueObj1 = new DeferredJavaObject(new ByteWritable(months));
+    DeferredObject[] args = { valueObj0, valueObj1 };
+    Text output = (Text) udf.evaluate(args);
+    assertEquals("add_months() test ", expResult, output != null ? output.toString() : null);
   }
 }

Modified: hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFLastDay.java
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFLastDay.java?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFLastDay.java (original)
+++ hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFLastDay.java Thu Mar  5 18:51:32 2015
@@ -35,6 +35,8 @@ public class TestGenericUDFLastDay exten
     ObjectInspector[] arguments = { valueOI0 };
 
     udf.initialize(arguments);
+
+    // date str
     runAndVerify("2014-01-01", "2014-01-31", udf);
     runAndVerify("2014-01-14", "2014-01-31", udf);
     runAndVerify("2014-01-31", "2014-01-31", udf);
@@ -43,17 +45,26 @@ public class TestGenericUDFLastDay exten
     runAndVerify("2016-02-03", "2016-02-29", udf);
     runAndVerify("2016-02-28", "2016-02-29", udf);
     runAndVerify("2016-02-29", "2016-02-29", udf);
+    //wrong date str
+    runAndVerify("2016-02-30", "2016-03-31", udf);
+    runAndVerify("2014-01-32", "2014-02-28", udf);
     runAndVerify("01/14/2014", null, udf);
     runAndVerify(null, null, udf);
 
+    // ts str
     runAndVerify("2014-01-01 10:30:45", "2014-01-31", udf);
     runAndVerify("2014-01-14 10:30:45", "2014-01-31", udf);
-    runAndVerify("2014-01-31 10:30:45", "2014-01-31", udf);
-    runAndVerify("2014-02-02 10:30:45", "2014-02-28", udf);
-    runAndVerify("2014-02-28 10:30:45", "2014-02-28", udf);
-    runAndVerify("2016-02-03 10:30:45", "2016-02-29", udf);
+    runAndVerify("2014-01-31 10:30:45.1", "2014-01-31", udf);
+    runAndVerify("2014-02-02 10:30:45.100", "2014-02-28", udf);
+    runAndVerify("2014-02-28 10:30:45.001", "2014-02-28", udf);
+    runAndVerify("2016-02-03 10:30:45.000000001", "2016-02-29", udf);
     runAndVerify("2016-02-28 10:30:45", "2016-02-29", udf);
     runAndVerify("2016-02-29 10:30:45", "2016-02-29", udf);
+    // wrong ts str
+    runAndVerify("2016-02-30 10:30:45", "2016-03-31", udf);
+    runAndVerify("2014-01-32 10:30:45", "2014-02-28", udf);
+    runAndVerify("01/14/2014 10:30:45", null, udf);
+    runAndVerify("2016-02-28T10:30:45", "2016-02-29", udf);
   }
 
   private void runAndVerify(String str, String expResult, GenericUDF udf)

Modified: hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFNextDay.java
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFNextDay.java?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFNextDay.java (original)
+++ hive/branches/parquet/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFNextDay.java Thu Mar  5 18:51:32 2015
@@ -57,10 +57,10 @@ public class TestGenericUDFNextDay exten
 
     // start_date is Wed, full timestamp, full day name
     runAndVerify("2015-01-14 14:04:34", "sunday", "2015-01-18", udf);
-    runAndVerify("2015-01-14 14:04:34", "Monday", "2015-01-19", udf);
-    runAndVerify("2015-01-14 14:04:34", "Tuesday", "2015-01-20", udf);
-    runAndVerify("2015-01-14 14:04:34", "wednesday", "2015-01-21", udf);
-    runAndVerify("2015-01-14 14:04:34", "thursDAY", "2015-01-15", udf);
+    runAndVerify("2015-01-14 14:04:34.1", "Monday", "2015-01-19", udf);
+    runAndVerify("2015-01-14 14:04:34.100", "Tuesday", "2015-01-20", udf);
+    runAndVerify("2015-01-14 14:04:34.001", "wednesday", "2015-01-21", udf);
+    runAndVerify("2015-01-14 14:04:34.000000001", "thursDAY", "2015-01-15", udf);
     runAndVerify("2015-01-14 14:04:34", "FRIDAY", "2015-01-16", udf);
     runAndVerify("2015-01-14 14:04:34", "SATurday", "2015-01-17", udf);
 
@@ -72,6 +72,12 @@ public class TestGenericUDFNextDay exten
     // not valid values
     runAndVerify("01/14/2015", "TU", null, udf);
     runAndVerify("2015-01-14", "VT", null, udf);
+    runAndVerify("2015-02-30", "WE", "2015-03-04", udf);
+    runAndVerify("2015-02-32", "WE", "2015-03-11", udf);
+    runAndVerify("2015-02-30 10:30:00", "WE", "2015-03-04", udf);
+    runAndVerify("2015-02-32 10:30:00", "WE", "2015-03-11", udf);
+    runAndVerify("2015/01/14 14:04:34", "SAT", null, udf);
+    runAndVerify("2015-01-14T14:04:34", "SAT", "2015-01-17", udf);
   }
 
   public void testNextDayErrorArg1() throws HiveException {
@@ -86,7 +92,7 @@ public class TestGenericUDFNextDay exten
       assertTrue("UDFArgumentException expected", false);
     } catch (UDFArgumentException e) {
       assertEquals(
-          "next_day() only takes STRING/TIMESTAMP/DATEWRITABLE types as first argument, got LONG",
+          "next_day only takes STRING_GROUP, DATE_GROUP types as 1st argument, got LONG",
           e.getMessage());
     }
   }
@@ -102,7 +108,7 @@ public class TestGenericUDFNextDay exten
       udf.initialize(arguments);
       assertTrue("UDFArgumentException expected", false);
     } catch (UDFArgumentException e) {
-      assertEquals("next_day() only takes STRING_GROUP types as second argument, got INT",
+      assertEquals("next_day only takes STRING_GROUP types as 2nd argument, got INT",
           e.getMessage());
     }
   }

Modified: hive/branches/parquet/ql/src/test/queries/clientnegative/drop_native_udf.q
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/queries/clientnegative/drop_native_udf.q?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/queries/clientnegative/drop_native_udf.q (original)
+++ hive/branches/parquet/ql/src/test/queries/clientnegative/drop_native_udf.q Thu Mar  5 18:51:32 2015
@@ -1 +1 @@
-DROP TEMPORARY FUNCTION max;
+DROP FUNCTION max;

Modified: hive/branches/parquet/ql/src/test/queries/clientpositive/groupby3_map_skew.q
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/queries/clientpositive/groupby3_map_skew.q?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/queries/clientpositive/groupby3_map_skew.q (original)
+++ hive/branches/parquet/ql/src/test/queries/clientpositive/groupby3_map_skew.q Thu Mar  5 18:51:32 2015
@@ -29,5 +29,6 @@ INSERT OVERWRITE TABLE dest1 SELECT
   variance(substr(src.value,5)),
   var_samp(substr(src.value,5));
 
-SELECT c1, c2, c3, c4, c5, c6, c7, ROUND(c8, 5), ROUND(c9, 5) FROM dest1;
+SELECT ROUND(c1, 1), ROUND(c2, 3), ROUND(c3, 5), ROUND(c4, 1), ROUND(c5, 1), ROUND(c6, 5),
+ROUND(c7,5), ROUND(c8, 5), ROUND(c9, 5) FROM dest1;
 

Modified: hive/branches/parquet/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q (original)
+++ hive/branches/parquet/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q Thu Mar  5 18:51:32 2015
@@ -12,8 +12,8 @@ SET hive.exec.compress.output=true;
 
 EXPLAIN
 FROM SRC
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key limit 10
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key limit 10;
+INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10
+INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10;
 
 FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10

Modified: hive/branches/parquet/ql/src/test/queries/clientpositive/groupby_multi_single_reducer3.q
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/queries/clientpositive/groupby_multi_single_reducer3.q?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/queries/clientpositive/groupby_multi_single_reducer3.q (original)
+++ hive/branches/parquet/ql/src/test/queries/clientpositive/groupby_multi_single_reducer3.q Thu Mar  5 18:51:32 2015
@@ -1,4 +1,7 @@
 -- HIVE-3849 Aliased column in where clause for multi-groupby single reducer cannot be resolved
+
+-- SORT_QUERY_RESULTS
+
 create table e1 (key string, count int);
 create table e2 (key string, count int);
 

Modified: hive/branches/parquet/ql/src/test/queries/clientpositive/parallel_join0.q
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/queries/clientpositive/parallel_join0.q?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/queries/clientpositive/parallel_join0.q (original)
+++ hive/branches/parquet/ql/src/test/queries/clientpositive/parallel_join0.q Thu Mar  5 18:51:32 2015
@@ -1,6 +1,6 @@
 set mapreduce.job.reduces=4;
 
--- SORT_BEFORE_DIFF
+-- SORT_QUERY_RESULTS
 
 EXPLAIN
 SELECT src1.key as k1, src1.value as v1, 

Modified: hive/branches/parquet/ql/src/test/queries/clientpositive/parquet_types.q
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/queries/clientpositive/parquet_types.q?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/queries/clientpositive/parquet_types.q (original)
+++ hive/branches/parquet/ql/src/test/queries/clientpositive/parquet_types.q Thu Mar  5 18:51:32 2015
@@ -14,7 +14,8 @@ CREATE TABLE parquet_types_staging (
   cbinary string,
   m1 map<string, varchar(3)>,
   l1 array<int>,
-  st1 struct<c1:int, c2:char(1)>
+  st1 struct<c1:int, c2:char(1)>,
+  d date
 ) ROW FORMAT DELIMITED
 FIELDS TERMINATED BY '|'
 COLLECTION ITEMS TERMINATED BY ','
@@ -33,7 +34,8 @@ CREATE TABLE parquet_types (
   cbinary binary,
   m1 map<string, varchar(3)>,
   l1 array<int>,
-  st1 struct<c1:int, c2:char(1)>
+  st1 struct<c1:int, c2:char(1)>,
+  d date
 ) STORED AS PARQUET;
 
 LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging;
@@ -42,10 +44,10 @@ SELECT * FROM parquet_types_staging;
 
 INSERT OVERWRITE TABLE parquet_types
 SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar,
-unhex(cbinary), m1, l1, st1 FROM parquet_types_staging;
+unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging;
 
 SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar,
-hex(cbinary), m1, l1, st1 FROM parquet_types;
+hex(cbinary), m1, l1, st1, d FROM parquet_types;
 
 SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar) FROM parquet_types;
 
@@ -55,8 +57,8 @@ SELECT ctinyint,
   MAX(cint),
   MIN(csmallint),
   COUNT(cstring1),
-  AVG(cfloat),
-  STDDEV_POP(cdouble)
+  ROUND(AVG(cfloat), 5),
+  ROUND(STDDEV_POP(cdouble),5)
 FROM parquet_types
 GROUP BY ctinyint
 ORDER BY ctinyint

Modified: hive/branches/parquet/ql/src/test/queries/clientpositive/ptf.q
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/queries/clientpositive/ptf.q?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/queries/clientpositive/ptf.q (original)
+++ hive/branches/parquet/ql/src/test/queries/clientpositive/ptf.q Thu Mar  5 18:51:32 2015
@@ -202,7 +202,7 @@ dense_rank() over (partition by p_mfgr o
 sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
 from noop(on noopwithmap(on noop(on part 
 partition by p_mfgr 
-order by p_mfgr, p_name
+order by p_mfgr DESC, p_name
 )));
 
 select p_mfgr, p_name, p_size, 
@@ -211,7 +211,7 @@ dense_rank() over (partition by p_mfgr o
 sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
 from noop(on noopwithmap(on noop(on part 
 partition by p_mfgr 
-order by p_mfgr, p_name
+order by p_mfgr DESC, p_name
 )));
  
 -- 13. testPTFAndWindowingInSubQ

Modified: hive/branches/parquet/ql/src/test/queries/clientpositive/ptf_matchpath.q
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/queries/clientpositive/ptf_matchpath.q?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/queries/clientpositive/ptf_matchpath.q (original)
+++ hive/branches/parquet/ql/src/test/queries/clientpositive/ptf_matchpath.q Thu Mar  5 18:51:32 2015
@@ -15,6 +15,17 @@ LOAD DATA LOCAL INPATH '../../data/files
 -- SORT_QUERY_RESULTS
 
 -- 1. basic Matchpath test
+explain
+select origin_city_name, fl_num, year, month, day_of_month, sz, tpath 
+from matchpath(on 
+        flights_tiny 
+        distribute by fl_num 
+        sort by year, month, day_of_month  
+      arg1('LATE.LATE+'), 
+      arg2('LATE'), arg3(arr_delay > 15), 
+    arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath') 
+   );       
+
 select origin_city_name, fl_num, year, month, day_of_month, sz, tpath 
 from matchpath(on 
         flights_tiny 
@@ -26,6 +37,17 @@ from matchpath(on
    );       
 
 -- 2. Matchpath on 1 partition
+explain
+select origin_city_name, fl_num, year, month, day_of_month, sz, tpath 
+from matchpath(on 
+        flights_tiny 
+        sort by fl_num, year, month, day_of_month  
+      arg1('LATE.LATE+'), 
+      arg2('LATE'), arg3(arr_delay > 15), 
+    arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath') 
+   )
+where fl_num = 1142;
+
 select origin_city_name, fl_num, year, month, day_of_month, sz, tpath 
 from matchpath(on 
         flights_tiny 
@@ -37,6 +59,17 @@ from matchpath(on
 where fl_num = 1142;
 
 -- 3. empty partition.
+explain
+select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
+from matchpath(on
+        (select * from flights_tiny where fl_num = -1142) flights_tiny
+        sort by fl_num, year, month, day_of_month
+      arg1('LATE.LATE+'),
+      arg2('LATE'), arg3(arr_delay > 15),
+    arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath')
+   );
+   
+
 select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
 from matchpath(on
         (select * from flights_tiny where fl_num = -1142) flights_tiny

Modified: hive/branches/parquet/ql/src/test/queries/clientpositive/tez_join_hash.q
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/queries/clientpositive/tez_join_hash.q?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/queries/clientpositive/tez_join_hash.q (original)
+++ hive/branches/parquet/ql/src/test/queries/clientpositive/tez_join_hash.q Thu Mar  5 18:51:32 2015
@@ -10,3 +10,26 @@ explain
 SELECT count(*) FROM src, orc_src where src.key=orc_src.key;
 
 SELECT count(*) FROM src, orc_src where src.key=orc_src.key;
+
+set hive.auto.convert.join=true;
+set hive.auto.convert.join.noconditionaltask=true;
+set hive.auto.convert.join.noconditionaltask.size=3000;
+
+explain
+select count(*) from (select x.key as key, y.value as value from
+srcpart x join srcpart y on (x.key = y.key)
+union all
+select key, value from srcpart z) a join src b on (a.value = b.value) group by a.key, a.value;
+
+select key, count(*) from (select x.key as key, y.value as value from
+srcpart x join srcpart y on (x.key = y.key)
+union all
+select key, value from srcpart z) a join src b on (a.value = b.value) group by a.key, a.value;
+
+set hive.execution.engine=mr;
+select key, count(*) from (select x.key as key, y.value as value from
+srcpart x join srcpart y on (x.key = y.key)
+union all
+select key, value from srcpart z) a join src b on (a.value = b.value) group by a.key, a.value;
+
+

Modified: hive/branches/parquet/ql/src/test/queries/clientpositive/udaf_covar_pop.q
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/queries/clientpositive/udaf_covar_pop.q?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/queries/clientpositive/udaf_covar_pop.q (original)
+++ hive/branches/parquet/ql/src/test/queries/clientpositive/udaf_covar_pop.q Thu Mar  5 18:51:32 2015
@@ -11,6 +11,6 @@ SELECT covar_pop(b, c) FROM covar_tab WH
 SELECT covar_pop(b, c) FROM covar_tab WHERE a < 3;
 SELECT covar_pop(b, c) FROM covar_tab WHERE a = 3;
 SELECT a, covar_pop(b, c) FROM covar_tab GROUP BY a ORDER BY a;
-SELECT covar_pop(b, c) FROM covar_tab;
+SELECT ROUND(covar_pop(b, c), 5) FROM covar_tab;
 
 DROP TABLE covar_tab;

Modified: hive/branches/parquet/ql/src/test/queries/clientpositive/udaf_covar_samp.q
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/queries/clientpositive/udaf_covar_samp.q?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/queries/clientpositive/udaf_covar_samp.q (original)
+++ hive/branches/parquet/ql/src/test/queries/clientpositive/udaf_covar_samp.q Thu Mar  5 18:51:32 2015
@@ -11,6 +11,6 @@ SELECT covar_samp(b, c) FROM covar_tab W
 SELECT covar_samp(b, c) FROM covar_tab WHERE a < 3;
 SELECT covar_samp(b, c) FROM covar_tab WHERE a = 3;
 SELECT a, covar_samp(b, c) FROM covar_tab GROUP BY a ORDER BY a;
-SELECT covar_samp(b, c) FROM covar_tab;
+SELECT ROUND(covar_samp(b, c), 5) FROM covar_tab;
 
 DROP TABLE covar_tab;

Modified: hive/branches/parquet/ql/src/test/queries/clientpositive/union3.q
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/queries/clientpositive/union3.q?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/queries/clientpositive/union3.q (original)
+++ hive/branches/parquet/ql/src/test/queries/clientpositive/union3.q Thu Mar  5 18:51:32 2015
@@ -1,4 +1,4 @@
--- SORT_BEFORE_DIFF
+-- SORT_QUERY_RESULTS
 
 explain
 SELECT *

Modified: hive/branches/parquet/ql/src/test/queries/clientpositive/union4.q
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/queries/clientpositive/union4.q?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/queries/clientpositive/union4.q (original)
+++ hive/branches/parquet/ql/src/test/queries/clientpositive/union4.q Thu Mar  5 18:51:32 2015
@@ -1,5 +1,7 @@
 set hive.map.aggr = true;
 
+-- SORT_QUERY_RESULTS
+
 -- union case: both subqueries are map-reduce jobs on same input, followed by filesink
 
 

Modified: hive/branches/parquet/ql/src/test/queries/clientpositive/vector_cast_constant.q
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/queries/clientpositive/vector_cast_constant.q?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/queries/clientpositive/vector_cast_constant.q (original)
+++ hive/branches/parquet/ql/src/test/queries/clientpositive/vector_cast_constant.q Thu Mar  5 18:51:32 2015
@@ -1,5 +1,7 @@
 SET hive.vectorized.execution.enabled=true;
 
+-- JAVA_VERSION_SPECIFIC_OUTPUT
+
 DROP TABLE over1k;
 DROP TABLE over1korc;
 

Modified: hive/branches/parquet/ql/src/test/results/clientnegative/create_function_nonexistent_class.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientnegative/create_function_nonexistent_class.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientnegative/create_function_nonexistent_class.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientnegative/create_function_nonexistent_class.q.out Thu Mar  5 18:51:32 2015
@@ -2,5 +2,5 @@ PREHOOK: query: create function default.
 PREHOOK: type: CREATEFUNCTION
 PREHOOK: Output: database:default
 PREHOOK: Output: default.badfunc
-FAILED: Class my.nonexistent.class not found
+Failed to register default.badfunc using class my.nonexistent.class
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.FunctionTask

Modified: hive/branches/parquet/ql/src/test/results/clientnegative/create_function_nonudf_class.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientnegative/create_function_nonudf_class.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientnegative/create_function_nonudf_class.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientnegative/create_function_nonudf_class.q.out Thu Mar  5 18:51:32 2015
@@ -2,5 +2,5 @@ PREHOOK: query: create function default.
 PREHOOK: type: CREATEFUNCTION
 PREHOOK: Output: database:default
 PREHOOK: Output: default.badfunc
-FAILED: Class java.lang.String does not implement UDF, GenericUDF, or UDAF
+Failed to register default.badfunc using class java.lang.String
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.FunctionTask

Modified: hive/branches/parquet/ql/src/test/results/clientnegative/drop_native_udf.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientnegative/drop_native_udf.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientnegative/drop_native_udf.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientnegative/drop_native_udf.q.out Thu Mar  5 18:51:32 2015
@@ -1,4 +1 @@
-PREHOOK: query: DROP TEMPORARY FUNCTION max
-PREHOOK: type: DROPFUNCTION
-PREHOOK: Output: max
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.FunctionTask
+FAILED: SemanticException [Error 10301]: Cannot drop native function max

Modified: hive/branches/parquet/ql/src/test/results/clientnegative/invalid_arithmetic_type.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientnegative/invalid_arithmetic_type.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientnegative/invalid_arithmetic_type.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientnegative/invalid_arithmetic_type.q.out Thu Mar  5 18:51:32 2015
@@ -1 +1 @@
-FAILED: SemanticException Line 0:-1 Wrong arguments ''2000-01-01 00:00:01'': No matching method for class org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPMinus with (timestamp, timestamp)
+FAILED: SemanticException Line 0:-1 Wrong arguments ''2000-01-01 00:00:01'': No matching method for class org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNumericMinus with (timestamp, timestamp)

Modified: hive/branches/parquet/ql/src/test/results/clientnegative/udf_add_months_error_1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientnegative/udf_add_months_error_1.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientnegative/udf_add_months_error_1.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientnegative/udf_add_months_error_1.q.out Thu Mar  5 18:51:32 2015
@@ -1 +1 @@
-FAILED: SemanticException [Error 10016]: Line 1:18 Argument type mismatch '14567893456': ADD_MONTHS() only takes STRING/TIMESTAMP/DATEWRITABLE types as first argument, got LONG
+FAILED: SemanticException [Error 10016]: Line 1:18 Argument type mismatch '14567893456': add_months only takes STRING_GROUP, DATE_GROUP types as 1st argument, got LONG

Modified: hive/branches/parquet/ql/src/test/results/clientnegative/udf_add_months_error_2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientnegative/udf_add_months_error_2.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientnegative/udf_add_months_error_2.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientnegative/udf_add_months_error_2.q.out Thu Mar  5 18:51:32 2015
@@ -1 +1 @@
-FAILED: SemanticException [Error 10016]: Line 1:32 Argument type mismatch '2.4': ADD_MONTHS() only takes INT types as second argument, got DOUBLE
+FAILED: SemanticException [Error 10016]: Line 1:32 Argument type mismatch '2.4': add_months only takes INT/SHORT/BYTE types as 2nd argument, got DOUBLE

Modified: hive/branches/parquet/ql/src/test/results/clientnegative/udf_last_day_error_1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientnegative/udf_last_day_error_1.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientnegative/udf_last_day_error_1.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientnegative/udf_last_day_error_1.q.out Thu Mar  5 18:51:32 2015
@@ -1 +1 @@
-FAILED: SemanticException [Error 10016]: Line 1:16 Argument type mismatch '1423199465': LAST_DAY() only takes STRING/TIMESTAMP/DATEWRITABLE types, got INT
+FAILED: SemanticException [Error 10016]: Line 1:16 Argument type mismatch '1423199465': last_day only takes STRING_GROUP, DATE_GROUP types as 1st argument, got INT

Modified: hive/branches/parquet/ql/src/test/results/clientnegative/udf_last_day_error_2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientnegative/udf_last_day_error_2.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientnegative/udf_last_day_error_2.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientnegative/udf_last_day_error_2.q.out Thu Mar  5 18:51:32 2015
@@ -1 +1 @@
-FAILED: SemanticException [Error 10016]: Line 1:16 Argument type mismatch ''test'': Only primitive type arguments are accepted but map<string,string> is passed
+FAILED: SemanticException [Error 10016]: Line 1:16 Argument type mismatch ''test'': last_day only takes primitive types as 1st argument, got MAP

Modified: hive/branches/parquet/ql/src/test/results/clientnegative/udf_next_day_error_1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientnegative/udf_next_day_error_1.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientnegative/udf_next_day_error_1.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientnegative/udf_next_day_error_1.q.out Thu Mar  5 18:51:32 2015
@@ -1 +1 @@
-FAILED: SemanticException [Error 10016]: Line 1:16 Argument type mismatch '145622345': next_day() only takes STRING/TIMESTAMP/DATEWRITABLE types as first argument, got INT
+FAILED: SemanticException [Error 10016]: Line 1:16 Argument type mismatch '145622345': next_day only takes STRING_GROUP, DATE_GROUP types as 1st argument, got INT

Modified: hive/branches/parquet/ql/src/test/results/clientnegative/udf_next_day_error_2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientnegative/udf_next_day_error_2.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientnegative/udf_next_day_error_2.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientnegative/udf_next_day_error_2.q.out Thu Mar  5 18:51:32 2015
@@ -1 +1 @@
-FAILED: SemanticException [Error 10016]: Line 1:30 Argument type mismatch '4': next_day() only takes STRING_GROUP types as second argument, got INT
+FAILED: SemanticException [Error 10016]: Line 1:30 Argument type mismatch '4': next_day only takes STRING_GROUP types as 2nd argument, got INT

Modified: hive/branches/parquet/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out Thu Mar  5 18:51:32 2015
@@ -4,4 +4,5 @@ PREHOOK: Output: database:default
 PREHOOK: Output: default.lookup
 #### A masked pattern was here ####
 nonexistent_file.txt does not exist
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.FunctionTask. nonexistent_file.txt does not exist
+Failed to register default.lookup using class org.apache.hadoop.hive.ql.udf.UDFFileLookup
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.FunctionTask

Modified: hive/branches/parquet/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientpositive/alter_partition_coltype.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientpositive/alter_partition_coltype.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientpositive/alter_partition_coltype.q.out Thu Mar  5 18:51:32 2015
@@ -255,7 +255,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -433,7 +433,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -590,7 +590,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/parquet/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out Thu Mar  5 18:51:32 2015
@@ -667,7 +667,7 @@ STAGE PLANS:
                       columns _col0,_col1
                       columns.types string:int
                       escape.delim \
-                      hive.serialization.extend.nesting.levels true
+                      hive.serialization.extend.additional.nesting.levels true
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out Thu Mar  5 18:51:32 2015
@@ -294,7 +294,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -514,7 +514,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -856,7 +856,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1173,7 +1173,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1329,7 +1329,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out Thu Mar  5 18:51:32 2015
@@ -412,7 +412,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -762,7 +762,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1071,7 +1071,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1508,7 +1508,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out Thu Mar  5 18:51:32 2015
@@ -719,7 +719,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out Thu Mar  5 18:51:32 2015
@@ -274,7 +274,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -618,7 +618,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -935,7 +935,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1091,7 +1091,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_3.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_3.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_3.q.out Thu Mar  5 18:51:32 2015
@@ -225,7 +225,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -396,7 +396,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -782,7 +782,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1052,7 +1052,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1159,7 +1159,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out Thu Mar  5 18:51:32 2015
@@ -241,7 +241,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -412,7 +412,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -798,7 +798,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1068,7 +1068,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1175,7 +1175,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out Thu Mar  5 18:51:32 2015
@@ -202,7 +202,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -363,7 +363,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -590,7 +590,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -751,7 +751,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -854,7 +854,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_7.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_7.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_7.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_7.q.out Thu Mar  5 18:51:32 2015
@@ -307,7 +307,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -529,7 +529,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -966,7 +966,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1331,7 +1331,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1487,7 +1487,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_8.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_8.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_8.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientpositive/auto_sortmerge_join_8.q.out Thu Mar  5 18:51:32 2015
@@ -307,7 +307,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -529,7 +529,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -968,7 +968,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1333,7 +1333,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1489,7 +1489,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/parquet/ql/src/test/results/clientpositive/bucket_map_join_1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientpositive/bucket_map_join_1.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientpositive/bucket_map_join_1.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientpositive/bucket_map_join_1.q.out Thu Mar  5 18:51:32 2015
@@ -237,7 +237,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Modified: hive/branches/parquet/ql/src/test/results/clientpositive/bucket_map_join_2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/parquet/ql/src/test/results/clientpositive/bucket_map_join_2.q.out?rev=1664455&r1=1664454&r2=1664455&view=diff
==============================================================================
--- hive/branches/parquet/ql/src/test/results/clientpositive/bucket_map_join_2.q.out (original)
+++ hive/branches/parquet/ql/src/test/results/clientpositive/bucket_map_join_2.q.out Thu Mar  5 18:51:32 2015
@@ -237,7 +237,7 @@ STAGE PLANS:
                   columns _col0
                   columns.types bigint
                   escape.delim \
-                  hive.serialization.extend.nesting.levels true
+                  hive.serialization.extend.additional.nesting.levels true
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe



Mime
View raw message