hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hashut...@apache.org
Subject hive git commit: HIVE-16058 : Disable falling back to non-cbo for SemanticException for tests (Vineet Garg, Ashutosh Chauhan via Sergey Shelukhin)
Date Sat, 22 Apr 2017 16:41:57 GMT
Repository: hive
Updated Branches:
  refs/heads/master 6566065c0 -> 8a946ccb3


HIVE-16058 : Disable falling back to non-cbo for SemanticException for tests (Vineet Garg, Ashutosh Chauhan via Sergey Shelukhin)

Signed-off-by: Ashutosh Chauhan <hashutosh@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8a946ccb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8a946ccb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8a946ccb

Branch: refs/heads/master
Commit: 8a946ccb31b64f246b0c80d202e098aa46363a8f
Parents: 6566065
Author: Vineet Garg <vgarg@hortonworks.com>
Authored: Tue Feb 28 09:38:00 2017 -0800
Committer: Ashutosh Chauhan <hashutosh@apache.org>
Committed: Sat Apr 22 09:40:58 2017 -0700

----------------------------------------------------------------------
 .../clientnegative/case_with_row_sequence.q.out |  16 +--
 .../src/test/queries/positive/hbase_queries.q   |   1 +
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |   5 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |   7 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  16 +--
 .../hive/ql/udf/generic/GenericUDFInFile.java   |   3 +-
 .../hive/ql/lockmgr/TestDbTxnManager2.java      |  50 ++++----
 ql/src/test/queries/clientpositive/cbo_rp_gby.q |   1 -
 .../test/queries/clientpositive/cbo_rp_join.q   |   1 -
 .../test/queries/clientpositive/cbo_rp_limit.q  |   1 -
 .../queries/clientpositive/cbo_rp_semijoin.q    |   1 -
 .../clientpositive/cbo_rp_unionDistinct_2.q     |   1 +
 .../test/queries/clientpositive/cbo_rp_views.q  |   7 +-
 .../queries/clientpositive/cbo_rp_windowing_2.q |   4 +
 .../test/queries/clientpositive/jdbc_handler.q  |   1 +
 .../clientpositive/position_alias_test_1.q      |   5 +-
 .../clientpositive/udaf_percentile_approx_23.q  |   1 +
 .../clientpositive/vector_complex_join.q        |   3 +-
 .../results/clientnegative/acid_overwrite.q.out |   2 +-
 .../clientnegative/alter_view_failure6.q.out    |   2 +-
 .../clientnegative/input_part0_neg.q.out        |   2 +-
 .../insert_into_with_schema.q.out               |   2 +-
 .../insert_into_with_schema1.q.out              |   2 +-
 .../insert_into_with_schema2.q.out              |   2 +-
 .../ptf_negative_InvalidValueBoundary.q.out     |   3 +-
 .../subquery_corr_grandparent.q.out             |   2 +-
 .../subquery_scalar_multi_columns.q.out         |   4 +-
 ql/src/test/results/clientnegative/union2.q.out |   2 +-
 .../clientnegative/wrong_column_type.q.out      |   2 +-
 .../clientpositive/llap/cbo_rp_views.q.out      |   2 +-
 .../clientpositive/llap/jdbc_handler.q.out      |  52 ++++-----
 .../clientpositive/position_alias_test_1.q.out  | 113 ++++++++++++-------
 32 files changed, 168 insertions(+), 148 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/contrib/src/test/results/clientnegative/case_with_row_sequence.q.out
----------------------------------------------------------------------
diff --git a/contrib/src/test/results/clientnegative/case_with_row_sequence.q.out b/contrib/src/test/results/clientnegative/case_with_row_sequence.q.out
index 9ee319f..cf92da6 100644
--- a/contrib/src/test/results/clientnegative/case_with_row_sequence.q.out
+++ b/contrib/src/test/results/clientnegative/case_with_row_sequence.q.out
@@ -10,18 +10,4 @@ POSTHOOK: query: create temporary function row_sequence as
 'org.apache.hadoop.hive.contrib.udf.UDFRowSequence'
 POSTHOOK: type: CREATEFUNCTION
 POSTHOOK: Output: row_sequence
-PREHOOK: query: SELECT CASE WHEN 3 > 2 THEN 10 WHEN row_sequence() > 5 THEN 20 ELSE 30 END
-FROM src LIMIT 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-Execution failed with exit status: 2
-Obtaining error information
-
-Task failed!
-Task ID:
-  Stage-1
-
-Logs:
-
-FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.mr.MapRedTask
+FAILED: SemanticException Stateful expressions cannot be used inside of CASE

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/hbase-handler/src/test/queries/positive/hbase_queries.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_queries.q b/hbase-handler/src/test/queries/positive/hbase_queries.q
index 49fa829..43efd6c 100644
--- a/hbase-handler/src/test/queries/positive/hbase_queries.q
+++ b/hbase-handler/src/test/queries/positive/hbase_queries.q
@@ -180,6 +180,7 @@ DROP TABLE IF EXISTS hbase_table_10;
 CREATE TABLE hbase_table_10 (id bigint, data map<int, int>, str string)
 stored by 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 with serdeproperties ("hbase.columns.mapping" = ":key,cf:map_col2,cf:str2_col");
+set hive.cbo.enable=false;
 insert overwrite table hbase_table_10 select 1 as id, map(10, cast(null as int)) as data , null as str from src limit 1;
 insert into table hbase_table_10 select 2 as id, map(20, cast(null as int)) as data , '1234' as str from src limit 1;
 insert into table hbase_table_10 select 3 as id, map(30, 31) as data , '1234' as str from src limit 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 746c199..9667d71 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hive.ql;
 
 import java.text.MessageFormat;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.regex.Matcher;
@@ -412,8 +411,8 @@ public enum ErrorMsg {
   INSERT_CANNOT_CREATE_TEMP_FILE(10293, "Unable to create temp file for insert values "),
   ACID_OP_ON_NONACID_TXNMGR(10294, "Attempt to do update or delete using transaction manager that" +
       " does not support these operations."),
-  NO_INSERT_OVERWRITE_WITH_ACID(10295, "INSERT OVERWRITE not allowed on table with OutputFormat " +
-      "that implements AcidOutputFormat while transaction manager that supports ACID is in use"),
+  NO_INSERT_OVERWRITE_WITH_ACID(10295, "INSERT OVERWRITE not allowed on table {0} with OutputFormat " +
+      "that implements AcidOutputFormat while transaction manager that supports ACID is in use", true),
   VALUES_TABLE_CONSTRUCTOR_NOT_SUPPORTED(10296,
       "Values clause with table constructor not yet supported"),
   ACID_OP_ON_NONACID_TABLE(10297, "Attempt to do update or delete on table {0} that does not use " +

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 09f0b9c..1b054a7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -466,12 +466,15 @@ public class CalcitePlanner extends SemanticAnalyzer {
           else if (!conf.getBoolVar(ConfVars.HIVE_IN_TEST) || isMissingStats
               || e instanceof CalciteSemanticException ) {
               reAnalyzeAST = true;
-          } else if (e instanceof SemanticException) {
+          } else if (e instanceof SemanticException && !conf.getBoolVar(ConfVars.HIVE_IN_TEST)) {
             // although, its likely to be a valid exception, we will retry
             // with cbo off anyway.
+            // for tests we would like to avoid retrying to catch cbo failures
               reAnalyzeAST = true;
           } else if (e instanceof RuntimeException) {
             throw (RuntimeException) e;
+          } else if (e instanceof SemanticException) {
+            throw e;
           } else {
             throw new SemanticException(e);
           }
@@ -3497,7 +3500,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
       ParseDriver pd = new ParseDriver();
       try {
         ASTNode hintNode = pd.parseHint(hint);
-        qbp.setHints((ASTNode) hintNode);
+        qbp.setHints(hintNode);
       } catch (ParseException e) {
         throw new SemanticException("failed to parse query hint: "+e.getMessage(), e);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index e4ca25b..d39b8bd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -267,7 +267,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   protected LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext> opParseCtx;
   private List<LoadTableDesc> loadTableWork;
   private List<LoadFileDesc> loadFileWork;
-  private List<ColumnStatsAutoGatherContext> columnStatsAutoGatherContexts;
+  private final List<ColumnStatsAutoGatherContext> columnStatsAutoGatherContexts;
   private final Map<JoinOperator, QBJoinTree> joinContext;
   private final Map<SMBMapJoinOperator, QBJoinTree> smbMapJoinContext;
   private final HashMap<TableScanOperator, Table> topToTable;
@@ -1449,7 +1449,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           }
           try {
             ASTNode hintNode = pd.parseHint(queryHintStr);
-            qbp.setHints((ASTNode) hintNode);
+            qbp.setHints(hintNode);
             posn++;
           } catch (ParseException e) {
             throw new SemanticException("failed to parse query hint: "+e.getMessage(), e);
@@ -7270,7 +7270,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
     if (ltd != null && SessionState.get() != null) {
       SessionState.get().getLineageState()
-          .mapDirToOp(ltd.getSourcePath(), (FileSinkOperator) output);
+          .mapDirToOp(ltd.getSourcePath(), output);
     } else if ( queryState.getCommandType().equals(HiveOperation.CREATETABLE_AS_SELECT.getOperationName())) {
 
       Path tlocation = null;
@@ -7283,7 +7283,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       }
 
       SessionState.get().getLineageState()
-              .mapDirToOp(tlocation, (FileSinkOperator) output);
+              .mapDirToOp(tlocation, output);
     }
 
     if (LOG.isDebugEnabled()) {
@@ -7357,7 +7357,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     String tableName = tableDesc.getTableName();
     if (!qb.getParseInfo().isInsertIntoTable(tableName)) {
       LOG.debug("Couldn't find table " + tableName + " in insertIntoTable");
-      throw new SemanticException(ErrorMsg.NO_INSERT_OVERWRITE_WITH_ACID.getMsg());
+      throw new SemanticException(ErrorMsg.NO_INSERT_OVERWRITE_WITH_ACID, tableName);
     }
     /*
     LOG.info("Modifying config values for ACID write");
@@ -10443,7 +10443,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       // Currently, partition spec can only be static partition.
       String k = MetaStoreUtils.encodeTableName(tblName) + Path.SEPARATOR;
       tsDesc.setStatsAggPrefix(tab.getDbName()+"."+k);
-      
+
       // set up WriteEntity for replication
       outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_SHARED));
 
@@ -10891,7 +10891,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
     void setCTASToken(ASTNode child) {
     }
-    
+
     void setViewToken(ASTNode child) {
     }
 
@@ -11244,7 +11244,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       if (ctx.getExplainAnalyze() == AnalyzeState.RUNNING) {
         return;
       }
-      
+
       if (!ctx.isCboSucceeded()) {
         saveViewDefinition();
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInFile.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInFile.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInFile.java
index aad4f18..b536194 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInFile.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInFile.java
@@ -86,7 +86,8 @@ public class GenericUDFInFile extends GenericUDF {
     return
       poi.getPrimitiveCategory() == PrimitiveObjectInspector.PrimitiveCategory.STRING ||
       poi.getPrimitiveCategory() == PrimitiveObjectInspector.PrimitiveCategory.CHAR ||
-      poi.getPrimitiveCategory() == PrimitiveObjectInspector.PrimitiveCategory.VARCHAR;
+      poi.getPrimitiveCategory() == PrimitiveObjectInspector.PrimitiveCategory.VARCHAR ||
+      poi.getPrimitiveCategory() == PrimitiveObjectInspector.PrimitiveCategory.VOID;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index 7cae109..afebf03 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -56,7 +56,7 @@ import java.util.Map;
 /**
  * See additional tests in {@link org.apache.hadoop.hive.ql.lockmgr.TestDbTxnManager}
  * Tests here are "end-to-end"ish and simulate concurrent queries.
- * 
+ *
  * The general approach is to use an instance of Driver to use Driver.run() to create tables
  * Use Driver.compile() to generate QueryPlan which can then be passed to HiveTxnManager.acquireLocks().
  * Same HiveTxnManager is used to openTxn()/commitTxn() etc.  This can exercise almost the entire
@@ -284,7 +284,7 @@ public class TestDbTxnManager2 {
     dropTable(new String[] {"T9"});
     conf.setIntVar(HiveConf.ConfVars.HIVE_LOCK_NUMRETRIES, 2);
     conf.setBoolVar(HiveConf.ConfVars.TXN_MGR_DUMP_LOCK_STATE_ON_ACQUIRE_TIMEOUT, true);
-    HiveTxnManager otherTxnMgr = new DbTxnManager(); 
+    HiveTxnManager otherTxnMgr = new DbTxnManager();
     ((DbTxnManager)otherTxnMgr).setHiveConf(conf);
     CommandProcessorResponse cpr = driver.run("create table T9(a int)");
     checkCmdOnDriver(cpr);
@@ -294,7 +294,7 @@ public class TestDbTxnManager2 {
     List<ShowLocksResponseElement> locks = getLocks(txnMgr);
     Assert.assertEquals("Unexpected lock count", 1, locks.size());
     checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T9", null, locks);
-    
+
     cpr = driver.compileAndRespond("drop table T9");
     checkCmdOnDriver(cpr);
     try {
@@ -361,7 +361,7 @@ public class TestDbTxnManager2 {
     useDummyTxnManagerTemporarily(conf);
     cpr = driver.compileAndRespond("insert overwrite table T10 select a, b from T11");
     Assert.assertEquals(ErrorMsg.NO_INSERT_OVERWRITE_WITH_ACID.getErrorCode(), cpr.getResponseCode());
-    Assert.assertTrue(cpr.getErrorMessage().contains("INSERT OVERWRITE not allowed on table with OutputFormat" +
+    Assert.assertTrue(cpr.getErrorMessage().contains("INSERT OVERWRITE not allowed on table default.t10 with OutputFormat" +
         " that implements AcidOutputFormat while transaction manager that supports ACID is in use"));
 
     useDummyTxnManagerTemporarily(conf);
@@ -919,7 +919,7 @@ public class TestDbTxnManager2 {
     adp.setOperationType(DataOperationType.UPDATE);
     txnHandler.addDynamicPartitions(adp);
     txnMgr.commitTxn();
-    
+
     adp.setTxnid(txnId2);
     txnHandler.addDynamicPartitions(adp);
     LockException expectedException = null;
@@ -950,13 +950,13 @@ public class TestDbTxnManager2 {
     cpr = driver.run("create table if not exists TAB2 (a int, b int) partitioned by (p string) " +
       "clustered by (a) into 2  buckets stored as orc TBLPROPERTIES ('transactional'='true')");
     checkCmdOnDriver(cpr);
-    
+
     txnMgr.openTxn(ctx, "Long Running");
     checkCmdOnDriver(driver.compileAndRespond("select a from  TAB_PART where p = 'blah'"));
     txnMgr.acquireLocks(driver.getPlan(), ctx, "Long Running");
     List<ShowLocksResponseElement> locks = getLocks(txnMgr);
     Assert.assertEquals("Unexpected lock count", 1, locks.size());
-    //for some reason this just locks the table; if I alter table to add this partition, then 
+    //for some reason this just locks the table; if I alter table to add this partition, then
     //we end up locking both table and partition with share_read.  (Plan has 2 ReadEntities)...?
     //same for other locks below
     checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB_PART", null, locks);
@@ -995,7 +995,7 @@ public class TestDbTxnManager2 {
     txnMgr2.commitTxn();
     Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"),
       1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET"));
-    
+
     AcidWriteSetService houseKeeper = new AcidWriteSetService();
     TestTxnCommands2.runHouseKeeperService(houseKeeper, conf);
     //since T3 overlaps with Long Running (still open) GC does nothing
@@ -1006,7 +1006,7 @@ public class TestDbTxnManager2 {
     adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(),
       "default", "tab2", Collections.EMPTY_LIST);
     adp.setOperationType(DataOperationType.UPDATE);
-    txnHandler.addDynamicPartitions(adp);     
+    txnHandler.addDynamicPartitions(adp);
     txnMgr.commitTxn();
 
     locks = getLocks(txnMgr);
@@ -1114,14 +1114,14 @@ public class TestDbTxnManager2 {
     Assert.assertEquals("Unexpected lock count", 2, locks.size());
     checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB2", "p=two", locks);
     checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB2", "p=one", locks);
-    
+
     //this simulates the completion of txnid:2
     AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), "default", "tab2",
       Collections.singletonList("p=two"));
     adp.setOperationType(DataOperationType.UPDATE);
     txnHandler.addDynamicPartitions(adp);
     txnMgr2.commitTxn();//txnid:2
-    
+
     locks = getLocks(txnMgr2);
     Assert.assertEquals("Unexpected lock count", 1, locks.size());
     checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB2", "p=one", locks);
@@ -1132,7 +1132,7 @@ public class TestDbTxnManager2 {
     txnHandler.addDynamicPartitions(adp);
     txnMgr.commitTxn();//txnid:3
     //now both txns concurrently updated TAB2 but different partitions.
-    
+
     Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"),
       1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u'"));
     Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"),
@@ -1140,7 +1140,7 @@ public class TestDbTxnManager2 {
     //2 from txnid:1, 1 from txnid:2, 1 from txnid:3
     Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"),
       4, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab2' and ctc_partition is not null"));
-    
+
     //================
     //test with predicates such that partition pruning doesn't kick in
     cpr = driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " +
@@ -1501,7 +1501,7 @@ public class TestDbTxnManager2 {
     ShowLocksResponse rsp = ((DbLockManager)txnMgr.getLockManager()).getLocks(rqst);
     return rsp.getLocks();
   }
-  
+
   @Test
   public void testShowLocksAgentInfo() throws Exception {
     CommandProcessorResponse cpr = driver.run("create table if not exists XYZ (a int, b int)");
@@ -1544,7 +1544,7 @@ public class TestDbTxnManager2 {
     checkCmdOnDriver(driver.run("insert into source2 values " +
   //cc ? -:U-(1/2)     D-(1/2)         cc ? U-(1/3):-             D-(2/2)       I-(1/1) - new part 2
       "(9,100,1,2),      (3,4,1,2),               (5,13,1,3),       (7,8,2,2), (14,15,2,1)"));
-    
+
 
     long txnId1 = txnMgr.openTxn(ctx, "T1");
     checkCmdOnDriver(driver.compileAndRespond("merge into target t using source s on t.a=s.b " +
@@ -1730,7 +1730,7 @@ public class TestDbTxnManager2 {
 
 
   }
-  @Test 
+  @Test
   public void testMergeUnpartitioned01() throws Exception {
     testMergeUnpartitioned(true);
   }
@@ -1753,7 +1753,7 @@ public class TestDbTxnManager2 {
       "stored as orc TBLPROPERTIES ('transactional'='true')"));
     checkCmdOnDriver(driver.run("insert into target values (1,2), (3,4), (5,6), (7,8)"));
     checkCmdOnDriver(driver.run("create table source (a int, b int)"));
-    
+
     long txnid1 = txnMgr.openTxn(ctx, "T1");
     if(causeConflict) {
       checkCmdOnDriver(driver.compileAndRespond("update target set b = 2 where a=1"));
@@ -1791,7 +1791,7 @@ public class TestDbTxnManager2 {
       "default", "target", null, locks).getLockid();
 
     txnMgr.commitTxn();//commit T1
-    
+
     Assert.assertEquals("WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnid1) + "): " +
         TxnDbUtil.queryToString("select * from WRITE_SET"),
       causeConflict ? 1 : 0,//Inserts are not tracked by WRITE_SET
@@ -1817,7 +1817,7 @@ public class TestDbTxnManager2 {
       1,//
       TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2 +
       "and tc_operation_type='d'"));
-    
+
     //complete T2 txn
     LockException expectedException = null;
     try {
@@ -1950,10 +1950,10 @@ public class TestDbTxnManager2 {
 
     checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "target", "p=1/q=2", locks);
     checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "target", "p=1/q=2", locks);
-    
+
     checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "target", "p=1/q=3", locks);
     checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "target", "p=1/q=3", locks);
-    
+
     checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "target", "p=2/q=2", locks);
 
     Assert.assertEquals(
@@ -1978,7 +1978,7 @@ public class TestDbTxnManager2 {
       2,//2 partitions updated
       TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_txnid=" + txnId1 +
       " and ws_operation_type='u'"));
-    
+
 
     //re-check locks which were in Waiting state - should now be Acquired
     ((DbLockManager)txnMgr2.getLockManager()).checkLock(extLockId);
@@ -2018,8 +2018,8 @@ public class TestDbTxnManager2 {
         TxnDbUtil.queryToString("select * from TXN_COMPONENTS"),
       1,
       TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2 + " and tc_operation_type='u'"));
-    
-    
+
+
     LockException expectedException = null;
     try {
       txnMgr2.commitTxn();
@@ -2087,7 +2087,7 @@ public class TestDbTxnManager2 {
 
     txnMgr2 = (DbTxnManager) TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
     checkCmdOnDriver(driver.compileAndRespond("show tables"));
-    ((DbTxnManager)txnMgr2).acquireLocks(driver.getPlan(), ctx, "Fidler", false);
+    txnMgr2.acquireLocks(driver.getPlan(), ctx, "Fidler", false);
     locks = getLocks();
     Assert.assertEquals("Unexpected lock count", 2, locks.size());
     checkLock(LockType.EXCLUSIVE, LockState.ACQUIRED, "default", "t2", "p=1", locks);

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/queries/clientpositive/cbo_rp_gby.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_gby.q b/ql/src/test/queries/clientpositive/cbo_rp_gby.q
index 67b3e7b..7aca53a 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_gby.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_gby.q
@@ -1,6 +1,5 @@
 set hive.mapred.mode=nonstrict;
 set hive.cbo.enable=true;
-set hive.cbo.returnpath.hiveop=true;
 set hive.exec.check.crossproducts=false;
 
 set hive.stats.fetch.column.stats=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/queries/clientpositive/cbo_rp_join.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_join.q b/ql/src/test/queries/clientpositive/cbo_rp_join.q
index 0a87306..eef2440 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_join.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_join.q
@@ -1,6 +1,5 @@
 set hive.mapred.mode=nonstrict;
 set hive.cbo.enable=true;
-set hive.cbo.returnpath.hiveop=true;
 set hive.exec.check.crossproducts=false;
 
 set hive.stats.fetch.column.stats=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/queries/clientpositive/cbo_rp_limit.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_limit.q b/ql/src/test/queries/clientpositive/cbo_rp_limit.q
index 488280d..5e54a1b 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_limit.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_limit.q
@@ -1,6 +1,5 @@
 set hive.mapred.mode=nonstrict;
 set hive.cbo.enable=true;
-set hive.cbo.returnpath.hiveop=true;
 set hive.exec.check.crossproducts=false;
 
 set hive.stats.fetch.column.stats=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/queries/clientpositive/cbo_rp_semijoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_semijoin.q b/ql/src/test/queries/clientpositive/cbo_rp_semijoin.q
index e1eb74d..57db490 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_semijoin.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_semijoin.q
@@ -1,6 +1,5 @@
 set hive.mapred.mode=nonstrict;
 set hive.cbo.enable=true;
-set hive.cbo.returnpath.hiveop=true;
 set hive.exec.check.crossproducts=false;
 
 set hive.stats.fetch.column.stats=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/queries/clientpositive/cbo_rp_unionDistinct_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_unionDistinct_2.q b/ql/src/test/queries/clientpositive/cbo_rp_unionDistinct_2.q
index ea98bd2..6df4e07 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_unionDistinct_2.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_unionDistinct_2.q
@@ -78,6 +78,7 @@ select key as key, value from u3
 
 drop view if exists v;
 
+set hive.cbo.returnpath.hiveop=false;
 create view v as select distinct * from 
 (
 select distinct * from u1

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/queries/clientpositive/cbo_rp_views.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_views.q b/ql/src/test/queries/clientpositive/cbo_rp_views.q
index 1004303..5f7f3e0 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_views.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_views.q
@@ -1,6 +1,4 @@
 set hive.mapred.mode=nonstrict;
-set hive.cbo.enable=true;
-set hive.cbo.returnpath.hiveop=true;
 set hive.exec.check.crossproducts=false;
 
 set hive.stats.fetch.column.stats=true;
@@ -10,6 +8,7 @@ set hive.auto.convert.join=false;
 create view v1 as select c_int, value, c_boolean, dt from cbo_t1;
 create view v2 as select c_int, value from cbo_t2;
 
+set hive.cbo.returnpath.hiveop=true;
 select value from v1 where c_boolean=false;
 select max(c_int) from v1 group by (c_boolean);
 
@@ -18,8 +17,10 @@ select count(v1.c_int)  from v1 join v2 on v1.c_int = v2.c_int;
 
 select count(*) from v1 a join v1 b on a.value = b.value;
 
+set hive.cbo.returnpath.hiveop=false;
 create view v3 as select v1.value val from v1 join cbo_t1 on v1.c_boolean = cbo_t1.c_boolean;
 
+set hive.cbo.returnpath.hiveop=true;
 select count(val) from v3 where val != '1';
 with q1 as ( select key from cbo_t1 where key = '1')
 select count(*) from q1;
@@ -27,11 +28,13 @@ select count(*) from q1;
 with q1 as ( select value from v1 where c_boolean = false)
 select count(value) from q1 ;
 
+set hive.cbo.returnpath.hiveop=false;
 create view v4 as
 with q1 as ( select key,c_int from cbo_t1  where key = '1')
 select * from q1
 ;
 
+set hive.cbo.returnpath.hiveop=true;
 with q1 as ( select c_int from q2 where c_boolean = false),
 q2 as ( select c_int,c_boolean from v1  where value = '1')
 select sum(c_int) from (select c_int from q1) a;

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/queries/clientpositive/cbo_rp_windowing_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_windowing_2.q b/ql/src/test/queries/clientpositive/cbo_rp_windowing_2.q
index b84a660..6362be3 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_windowing_2.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_windowing_2.q
@@ -198,6 +198,7 @@ row_number() over(distribute by p_mfgr sort by p_mfgr, p_name) as rn
 from part
 window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following);
 
+set hive.cbo.returnpath.hiveop=false;
 -- 22. testViewAsTableInputWithWindowing
 create view IF NOT EXISTS mfgr_price_view as 
 select p_mfgr, p_brand, 
@@ -205,6 +206,7 @@ round(sum(p_retailprice),2) as s
 from part 
 group by p_mfgr, p_brand;
         
+set hive.cbo.returnpath.hiveop=true;
 select * 
 from (
 select p_mfgr, p_brand, s, 
@@ -219,6 +221,7 @@ round(sum(s) over w1 ,2)  as s1
 from mfgr_price_view 
 window w1 as (distribute by p_mfgr sort by p_brand rows between 2 preceding and current row);
 
+set hive.cbo.returnpath.hiveop=false;
 -- 23. testCreateViewWithWindowingQuery
 create view IF NOT EXISTS mfgr_brand_price_view as 
 select p_mfgr, p_brand, 
@@ -226,6 +229,7 @@ round(sum(p_retailprice) over w1,2) as s
 from part 
 window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and current row);
         
+set hive.cbo.returnpath.hiveop=true ;
 select * from mfgr_brand_price_view;        
         
 -- 24. testLateralViews

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/queries/clientpositive/jdbc_handler.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/jdbc_handler.q b/ql/src/test/queries/clientpositive/jdbc_handler.q
index 2038617..a37e547 100644
--- a/ql/src/test/queries/clientpositive/jdbc_handler.q
+++ b/ql/src/test/queries/clientpositive/jdbc_handler.q
@@ -1,3 +1,4 @@
+set hive.strict.checks.cartesian.product= false;
 CREATE EXTERNAL TABLE tables
 (
 id int,

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/queries/clientpositive/position_alias_test_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/position_alias_test_1.q b/ql/src/test/queries/clientpositive/position_alias_test_1.q
index 599bc08..404f182 100644
--- a/ql/src/test/queries/clientpositive/position_alias_test_1.q
+++ b/ql/src/test/queries/clientpositive/position_alias_test_1.q
@@ -2,8 +2,7 @@ create table alias_test_01(a INT, b STRING) ;
                          create table alias_test_02(a INT, b STRING) ;
                          create table alias_test_03(a INT, b STRING) ;
                          set hive.groupby.position.alias = true;
-                         set hive.cbo.enable=true;
-
+                         set hive.strict.checks.cartesian.product = false;
 
                          explain
                          select * from
@@ -15,4 +14,4 @@ create table alias_test_01(a INT, b STRING) ;
                          on alias01.a = alias02.a
                          left join
                          alias_test_03 alias03
-                         on alias01.a = alias03.a;
\ No newline at end of file
+                         on alias01.a = alias03.a;

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q b/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q
index 05e64a3..8c66726 100644
--- a/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q
+++ b/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q
@@ -99,4 +99,5 @@ select percentile_approx(key, 0.5) from bucket;
 select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket;
 
 -- test where number of elements is zero
+set hive.cbo.enable=false;
 select percentile_approx(key, array(0.50, 0.70, 0.90, 0.95, 0.99)) from bucket where key > 10000;

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/queries/clientpositive/vector_complex_join.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_complex_join.q b/ql/src/test/queries/clientpositive/vector_complex_join.q
index c6926cb..3c70d9b 100644
--- a/ql/src/test/queries/clientpositive/vector_complex_join.q
+++ b/ql/src/test/queries/clientpositive/vector_complex_join.q
@@ -23,7 +23,8 @@ INSERT OVERWRITE TABLE test2a SELECT ARRAY(1, 2) FROM src LIMIT 1;
 CREATE TABLE test2b (a INT) STORED AS ORC;
 INSERT OVERWRITE TABLE test2b VALUES (2), (3), (4);
 
+set hive.cbo.enable=false;
 explain vectorization expression
 select *  from test2b join test2a on test2b.a = test2a.a[1];
 
-select *  from test2b join test2a on test2b.a = test2a.a[1];
\ No newline at end of file
+select *  from test2b join test2a on test2b.a = test2a.a[1];

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/results/clientnegative/acid_overwrite.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/acid_overwrite.q.out b/ql/src/test/results/clientnegative/acid_overwrite.q.out
index 0940106..15070fa 100644
--- a/ql/src/test/results/clientnegative/acid_overwrite.q.out
+++ b/ql/src/test/results/clientnegative/acid_overwrite.q.out
@@ -16,4 +16,4 @@ POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: default@acid_uanp
 POSTHOOK: Lineage: acid_uanp.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 POSTHOOK: Lineage: acid_uanp.b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
-FAILED: SemanticException [Error 10295]: INSERT OVERWRITE not allowed on table with OutputFormat that implements AcidOutputFormat while transaction manager that supports ACID is in use
+FAILED: SemanticException [Error 10295]: INSERT OVERWRITE not allowed on table default.acid_uanp with OutputFormat that implements AcidOutputFormat while transaction manager that supports ACID is in use

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/results/clientnegative/alter_view_failure6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/alter_view_failure6.q.out b/ql/src/test/results/clientnegative/alter_view_failure6.q.out
index 1ada547..827dd93 100644
--- a/ql/src/test/results/clientnegative/alter_view_failure6.q.out
+++ b/ql/src/test/results/clientnegative/alter_view_failure6.q.out
@@ -19,5 +19,5 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@xxx7
 POSTHOOK: Lineage: xxx7.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
-FAILED: SemanticException Queries against partitioned tables without a partition filter are disabled for safety reasons. If you know what you are doing, please sethive.strict.checks.large.query to false and that hive.mapred.mode is not set to 'strict' to proceed. Note that if you may get errors or incorrect results if you make a mistake while using some of the unsafe features. No partition predicate for Alias "default.xxx7:srcpart" Table "srcpart"
+FAILED: SemanticException Queries against partitioned tables without a partition filter are disabled for safety reasons. If you know what you are doing, please sethive.strict.checks.large.query to false and that hive.mapred.mode is not set to 'strict' to proceed. Note that if you may get errors or incorrect results if you make a mistake while using some of the unsafe features. No partition predicate for Alias "default.srcpart" Table "srcpart"
 FAILED: SemanticException [Error 10056]: The query does not reference any valid partition. To run this query, set hive.mapred.mode=nonstrict

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/results/clientnegative/input_part0_neg.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/input_part0_neg.q.out b/ql/src/test/results/clientnegative/input_part0_neg.q.out
index 8c5761b..46a0f61 100644
--- a/ql/src/test/results/clientnegative/input_part0_neg.q.out
+++ b/ql/src/test/results/clientnegative/input_part0_neg.q.out
@@ -1 +1 @@
-FAILED: SemanticException Queries against partitioned tables without a partition filter are disabled for safety reasons. If you know what you are doing, please sethive.strict.checks.large.query to false and that hive.mapred.mode is not set to 'strict' to proceed. Note that if you may get errors or incorrect results if you make a mistake while using some of the unsafe features. No partition predicate for Alias "x" Table "srcpart"
+FAILED: SemanticException Queries against partitioned tables without a partition filter are disabled for safety reasons. If you know what you are doing, please sethive.strict.checks.large.query to false and that hive.mapred.mode is not set to 'strict' to proceed. Note that if you may get errors or incorrect results if you make a mistake while using some of the unsafe features. No partition predicate for Alias "default.srcpart" Table "srcpart"

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/results/clientnegative/insert_into_with_schema.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/insert_into_with_schema.q.out b/ql/src/test/results/clientnegative/insert_into_with_schema.q.out
index 660b39c..c94f5d8 100644
--- a/ql/src/test/results/clientnegative/insert_into_with_schema.q.out
+++ b/ql/src/test/results/clientnegative/insert_into_with_schema.q.out
@@ -22,4 +22,4 @@ POSTHOOK: query: create table source(s1 int, s2 int)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:x314n
 POSTHOOK: Output: x314n@source
-FAILED: SemanticException 0:0 Expected 1 columns for insclause-0/x314n@source; select produces 2 columns. Error encountered near token 'TOK_ALLCOLREF'
+FAILED: SemanticException 0:0 Expected 1 columns for insclause-0/x314n@source; select produces 2 columns. Error encountered near token 'values__tmp__table__1.tmp_values_col2'

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/results/clientnegative/insert_into_with_schema1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/insert_into_with_schema1.q.out b/ql/src/test/results/clientnegative/insert_into_with_schema1.q.out
index b1cb29b..1e8f5a3 100644
--- a/ql/src/test/results/clientnegative/insert_into_with_schema1.q.out
+++ b/ql/src/test/results/clientnegative/insert_into_with_schema1.q.out
@@ -22,4 +22,4 @@ POSTHOOK: query: create table source(s1 int, s2 int)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:x314n
 POSTHOOK: Output: x314n@source
-FAILED: SemanticException 0:0 Expected 2 columns for insclause-0/x314n@source; select produces 1 columns. Error encountered near token 'TOK_ALLCOLREF'
+FAILED: SemanticException 0:0 Expected 2 columns for insclause-0/x314n@source; select produces 1 columns. Error encountered near token 'values__tmp__table__1.tmp_values_col1'

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/results/clientnegative/insert_into_with_schema2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/insert_into_with_schema2.q.out b/ql/src/test/results/clientnegative/insert_into_with_schema2.q.out
index ed55b89..4243e51 100644
--- a/ql/src/test/results/clientnegative/insert_into_with_schema2.q.out
+++ b/ql/src/test/results/clientnegative/insert_into_with_schema2.q.out
@@ -30,4 +30,4 @@ POSTHOOK: query: create table target1(x int, y int, z int)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:x314n
 POSTHOOK: Output: x314n@target1
-FAILED: SemanticException 0:0 Expected 3 columns for insclause-0/x314n@target1; select produces 2 columns. Error encountered near token 'TOK_ALLCOLREF'
+FAILED: SemanticException 0:0 Expected 3 columns for insclause-0/x314n@target1; select produces 2 columns. Error encountered near token 'source.s2'

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/results/clientnegative/ptf_negative_InvalidValueBoundary.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/ptf_negative_InvalidValueBoundary.q.out b/ql/src/test/results/clientnegative/ptf_negative_InvalidValueBoundary.q.out
index 36beba5..e80d828 100644
--- a/ql/src/test/results/clientnegative/ptf_negative_InvalidValueBoundary.q.out
+++ b/ql/src/test/results/clientnegative/ptf_negative_InvalidValueBoundary.q.out
@@ -1,2 +1 @@
-FAILED: SemanticException Failed to breakup Windowing invocations into Groups. At least 1 group must only depend on input columns. Also check for circular dependencies.
-Underlying error: org.apache.hadoop.hive.ql.parse.SemanticException: Line 8:43 Invalid table alias or column reference 'p_complex': (possible column names are: p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment)
+FAILED: SemanticException [Error 10004]: Line 8:43 Invalid table alias or column reference 'p_complex': (possible column names are: p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment)

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/results/clientnegative/subquery_corr_grandparent.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/subquery_corr_grandparent.q.out b/ql/src/test/results/clientnegative/subquery_corr_grandparent.q.out
index 4475502..4bc7c64 100644
--- a/ql/src/test/results/clientnegative/subquery_corr_grandparent.q.out
+++ b/ql/src/test/results/clientnegative/subquery_corr_grandparent.q.out
@@ -1 +1 @@
-FAILED: SemanticException [Error 10249]: Line 4:53 Unsupported SubQuery Expression 'p_name': Nested SubQuery expressions are not supported.
+FAILED: SemanticException [Error 10004]: Line 4:95 Invalid table alias or column reference 'x': (possible column names are: p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment)

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/results/clientnegative/subquery_scalar_multi_columns.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/subquery_scalar_multi_columns.q.out b/ql/src/test/results/clientnegative/subquery_scalar_multi_columns.q.out
index 48403f9..ee1e44e 100644
--- a/ql/src/test/results/clientnegative/subquery_scalar_multi_columns.q.out
+++ b/ql/src/test/results/clientnegative/subquery_scalar_multi_columns.q.out
@@ -1 +1,3 @@
-FAILED: SemanticException Line 0:-1 Unsupported SubQuery Expression 'p_type': Only SubQuery expressions that are top level conjuncts are allowed
+FAILED: SemanticException Line 2:67 Invalid SubQuery expression 'p_type' in definition of SubQuery sq_1 [
+(select p_size, p_type from part)
+] used as sq_1 at Line 0:-1: SubQuery can contain only 1 item in Select List.

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/results/clientnegative/union2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/union2.q.out b/ql/src/test/results/clientnegative/union2.q.out
index 07b6fa9..40971d8 100644
--- a/ql/src/test/results/clientnegative/union2.q.out
+++ b/ql/src/test/results/clientnegative/union2.q.out
@@ -14,4 +14,4 @@ POSTHOOK: query: create table if not exists union2_t2(s string, c string, v stri
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@union2_t2
-FAILED: SemanticException 8:47 Schema of both sides of union should match: Column v is of type array<string> on first table and type double on second table. Error encountered near token 'union2_t2'
+FAILED: SemanticException 8:47 Schema of both sides of setop should match: Column v is of type array<string> on first table and type double on second table. Error encountered near token 'union2_t2'

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/results/clientnegative/wrong_column_type.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/wrong_column_type.q.out b/ql/src/test/results/clientnegative/wrong_column_type.q.out
index 6ff90ea..a3b28c9 100644
--- a/ql/src/test/results/clientnegative/wrong_column_type.q.out
+++ b/ql/src/test/results/clientnegative/wrong_column_type.q.out
@@ -6,4 +6,4 @@ POSTHOOK: query: CREATE TABLE dest1(a float)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@dest1
-FAILED: NoMatchingMethodException No matching method for class org.apache.hadoop.hive.ql.udf.UDFToFloat with (array<decimal(1,0)>). Possible choices: _FUNC_(bigint)  _FUNC_(boolean)  _FUNC_(decimal(38,18))  _FUNC_(double)  _FUNC_(int)  _FUNC_(smallint)  _FUNC_(string)  _FUNC_(timestamp)  _FUNC_(tinyint)  _FUNC_(void)  
+FAILED: NoMatchingMethodException No matching method for class org.apache.hadoop.hive.ql.udf.UDFToFloat with (array<int>). Possible choices: _FUNC_(bigint)  _FUNC_(boolean)  _FUNC_(decimal(38,18))  _FUNC_(double)  _FUNC_(int)  _FUNC_(smallint)  _FUNC_(string)  _FUNC_(timestamp)  _FUNC_(tinyint)  _FUNC_(void)  

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/results/clientpositive/llap/cbo_rp_views.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_views.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_views.q.out
index d603957..25ea4cf 100644
--- a/ql/src/test/results/clientpositive/llap/cbo_rp_views.q.out
+++ b/ql/src/test/results/clientpositive/llap/cbo_rp_views.q.out
@@ -173,7 +173,7 @@ POSTHOOK: Input: default@cbo_t1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@v4
 POSTHOOK: Lineage: v4.c_int SIMPLE [(cbo_t1)cbo_t1.FieldSchema(name:c_int, type:int, comment:null), ]
-POSTHOOK: Lineage: v4.key SIMPLE [(cbo_t1)cbo_t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: v4.key SIMPLE []
 PREHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false),
 q2 as ( select c_int,c_boolean from v1  where value = '1')
 select sum(c_int) from (select c_int from q1) a

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out b/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out
index 74bd60b..483b7f9 100644
--- a/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out
+++ b/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out
@@ -101,6 +101,7 @@ srcbucket	default	MANAGED_TABLE
 srcbucket2	default	MANAGED_TABLE
 srcpart	default	MANAGED_TABLE
 tables	default	EXTERNAL_TABLE
+Warning: Shuffle Join MERGEJOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: explain
 select
   t1.name as a, t2.key as b
@@ -130,7 +131,7 @@ STAGE PLANS:
     Tez
 #### A masked pattern was here ####
       Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 5 (SIMPLE_EDGE)
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE), Reducer 5 (CUSTOM_SIMPLE_EDGE)
         Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
         Reducer 5 <- Map 4 (SIMPLE_EDGE)
 #### A masked pattern was here ####
@@ -142,14 +143,12 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Select Operator
                     expressions: name (type: string)
-                    outputColumnNames: _col1
+                    outputColumnNames: _col0
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Reduce Output Operator
-                      key expressions: 1.0 (type: double)
-                      sort order: +
-                      Map-reduce partition columns: 1.0 (type: double)
+                      sort order: 
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                      value expressions: _col1 (type: string)
+                      value expressions: _col0 (type: string)
             Execution mode: llap
             LLAP IO: no inputs
         Map 4 
@@ -158,18 +157,18 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (key - 1) is not null (type: boolean)
-                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    predicate: ((UDFToDouble(key) - 1.0) = 1.0) (type: boolean)
+                    Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0
-                      Statistics: Num rows: 205 Data size: 17835 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 103 Data size: 8961 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 205 Data size: 17835 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 103 Data size: 8961 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -179,28 +178,24 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 1.0 (type: double)
-                  1 (_col0 - 1) (type: double)
-                outputColumnNames: _col1, _col2
-                Statistics: Num rows: 225 Data size: 19618 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col1 (type: string), _col2 (type: string)
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 225 Data size: 19618 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Statistics: Num rows: 225 Data size: 19618 Basic stats: COMPLETE Column stats: NONE
+                  0 
+                  1 
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 103 Data size: 9064 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Statistics: Num rows: 103 Data size: 9064 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 225 Data size: 19618 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 103 Data size: 9064 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 225 Data size: 19618 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 103 Data size: 9064 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -212,12 +207,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 205 Data size: 17835 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 103 Data size: 8961 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
-                  key expressions: (_col0 - 1) (type: double)
-                  sort order: +
-                  Map-reduce partition columns: (_col0 - 1) (type: double)
-                  Statistics: Num rows: 205 Data size: 17835 Basic stats: COMPLETE Column stats: COMPLETE
+                  sort order: 
+                  Statistics: Num rows: 103 Data size: 8961 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions: _col0 (type: string)
 
   Stage: Stage-0
@@ -226,6 +219,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+Warning: Shuffle Join MERGEJOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: select
   t1.name as a, t2.key as b
 from

http://git-wip-us.apache.org/repos/asf/hive/blob/8a946ccb/ql/src/test/results/clientpositive/position_alias_test_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/position_alias_test_1.q.out b/ql/src/test/results/clientpositive/position_alias_test_1.q.out
index 9053bf1..dc3fb5a 100644
--- a/ql/src/test/results/clientpositive/position_alias_test_1.q.out
+++ b/ql/src/test/results/clientpositive/position_alias_test_1.q.out
@@ -22,6 +22,7 @@ POSTHOOK: query: create table alias_test_03(a INT, b STRING)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@alias_test_03
+Warning: Shuffle Join JOIN[12][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain
                          select * from
                          alias_test_01 alias01
@@ -47,12 +48,13 @@ POSTHOOK: query: explain
                          on alias01.a = alias03.a
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
+  Stage-3 is a root stage
+  Stage-1 depends on stages: Stage-3
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -63,23 +65,23 @@ STAGE PLANS:
               outputColumnNames: b
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                keys: 2017 (type: int), b (type: string)
+                keys: b (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
-                  key expressions: 2017 (type: int), _col1 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: 2017 (type: int), _col1 (type: string)
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
-          keys: 2017 (type: int), KEY._col1 (type: string)
+          keys: KEY._col0 (type: string)
           mode: mergepartial
-          outputColumnNames: _col0, _col1
+          outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
-            expressions: 2017 (type: int), _col1 (type: string)
+            expressions: 2017 (type: int), _col0 (type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             File Output Operator
@@ -89,56 +91,83 @@ STAGE PLANS:
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                   serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
-  Stage: Stage-2
+  Stage: Stage-1
     Map Reduce
       Map Operator Tree:
           TableScan
+            alias: alias01
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Select Operator
+              expressions: a (type: int), b (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              Reduce Output Operator
+                sort order: 
+                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                value expressions: _col0 (type: int), _col1 (type: string)
+          TableScan
             Reduce Output Operator
-              key expressions: _col0 (type: int)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: int)
+              sort order: 
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              value expressions: _col1 (type: string)
+              value expressions: _col0 (type: int), _col1 (type: string)
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Left Outer Join0 to 1
+          filter predicates:
+            0 {(VALUE._col0 = 2017)}
+            1 
+          keys:
+            0 
+            1 
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
           TableScan
-            alias: alias01
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Reduce Output Operator
-              key expressions: a (type: int)
+              key expressions: _col0 (type: int)
               sort order: +
-              Map-reduce partition columns: a (type: int)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              value expressions: b (type: string)
+              Map-reduce partition columns: _col0 (type: int)
+              Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: string)
           TableScan
             alias: alias03
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Reduce Output Operator
-              key expressions: a (type: int)
-              sort order: +
-              Map-reduce partition columns: a (type: int)
+            Select Operator
+              expressions: a (type: int), b (type: string)
+              outputColumnNames: _col0, _col1
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              value expressions: b (type: string)
+              Reduce Output Operator
+                key expressions: _col0 (type: int)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: int)
+                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
           condition map:
                Left Outer Join0 to 1
-               Left Outer Join0 to 2
           keys:
-            0 a (type: int)
+            0 _col0 (type: int)
             1 _col0 (type: int)
-            2 a (type: int)
-          outputColumnNames: _col0, _col1, _col5, _col6, _col7, _col8
-          Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: int), _col8 (type: string)
-            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-            Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+          Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator


Mime
View raw message