hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From xu...@apache.org
Subject [44/55] [abbrv] hive git commit: HIVE-10807 : Invalidate basic stats for insert queries if autogather=false (Ashutosh Chauhan via Gopal V)
Date Wed, 28 Oct 2015 12:11:37 GMT
HIVE-10807 : Invalidate basic stats for insert queries if autogather=false (Ashutosh Chauhan via Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/86346fb1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/86346fb1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/86346fb1

Branch: refs/heads/spark
Commit: 86346fb150f0358e40b6435077eccda3e07d17e2
Parents: f9517ef
Author: Ashutosh Chauhan <hashutosh@apache.org>
Authored: Mon Oct 26 17:45:59 2015 -0700
Committer: Ashutosh Chauhan <hashutosh@apache.org>
Committed: Mon Oct 26 17:45:59 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/QueryProperties.java  |  10 --
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  13 +-
 .../hive/ql/optimizer/GenMapRedUtils.java       |   3 +-
 .../hive/ql/optimizer/StatsOptimizer.java       |  38 +++--
 .../hadoop/hive/ql/parse/QBParseInfo.java       |   9 --
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   6 -
 .../test/queries/clientpositive/insert_into1.q  |  10 +-
 .../test/queries/clientpositive/insert_into2.q  |   8 +
 .../clientpositive/bucket_map_join_1.q.out      |   4 -
 .../clientpositive/bucket_map_join_2.q.out      |   4 -
 .../encryption_insert_partition_dynamic.q.out   |   4 -
 .../encryption_join_unencrypted_tbl.q.out       |   4 -
 .../results/clientpositive/insert_into1.q.out   | 151 +++++++++++++++++++
 .../results/clientpositive/insert_into2.q.out   |  69 +++++++++
 .../spark/bucket_map_join_1.q.out               |   8 -
 .../spark/bucket_map_join_2.q.out               |   8 -
 .../clientpositive/spark/insert_into1.q.out     | 116 ++++++++++++++
 .../clientpositive/spark/insert_into2.q.out     |  75 +++++++++
 .../results/clientpositive/spark/stats3.q.out   |   2 -
 ql/src/test/results/clientpositive/stats3.q.out |   2 -
 .../clientpositive/tez/insert_into1.q.out       | 120 +++++++++++++++
 .../clientpositive/tez/insert_into2.q.out       |  75 +++++++++
 22 files changed, 661 insertions(+), 78 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java
index e8f7fba..3bc9432 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java
@@ -39,7 +39,6 @@ public class QueryProperties {
   boolean noScanAnalyzeCommand;
   boolean analyzeRewrite;
   boolean ctas;
-  boolean insertToTable;
   int outerQueryLimit;
 
   boolean hasJoin = false;
@@ -115,14 +114,6 @@ public class QueryProperties {
     this.ctas = ctas;
   }
 
-  public boolean isInsertToTable() {
-    return insertToTable;
-  }
-
-  public void setInsertToTable(boolean insertToTable) {
-    this.insertToTable = insertToTable;
-  }
-
   public int getOuterQueryLimit() {
     return outerQueryLimit;
   }
@@ -276,7 +267,6 @@ public class QueryProperties {
     noScanAnalyzeCommand = false;
     analyzeRewrite = false;
     ctas = false;
-    insertToTable = false;
     outerQueryLimit = -1;
 
     hasJoin = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index c64d8d1..a2dea67 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -1464,6 +1464,7 @@ public class Hive {
 
       newTPart = getPartition(tbl, partSpec, true, newPartPath.toString(),
           inheritTableSpecs, newFiles);
+
       // recreate the partition if it existed before
       if (isSkewedStoreAsSubdir) {
         org.apache.hadoop.hive.metastore.api.Partition newCreatedTpart = newTPart.getTPartition();
@@ -1474,12 +1475,18 @@ public class Hive {
         /* Add list bucketing location mappings. */
         skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps);
         newCreatedTpart.getSd().setSkewedInfo(skewedInfo);
+        if(!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
+          newTPart.getParameters().put(StatsSetupConst.COLUMN_STATS_ACCURATE, "false");
+        }
         alterPartition(tbl.getDbName(), tbl.getTableName(), new Partition(tbl, newCreatedTpart));
         newTPart = getPartition(tbl, partSpec, true, newPartPath.toString(), inheritTableSpecs,
             newFiles);
         return new Partition(tbl, newCreatedTpart);
       }
-
+      if(!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
+        newTPart.getParameters().put(StatsSetupConst.COLUMN_STATS_ACCURATE, "false");
+        alterPartition(tbl.getDbName(), tbl.getTableName(), new Partition(tbl, newTPart.getTPartition()));
+      }
     } catch (IOException e) {
       LOG.error(StringUtils.stringifyException(e));
       throw new HiveException(e);
@@ -1714,6 +1721,10 @@ private void constructOneLBLocationMap(FileStatus fSta,
       } catch (IOException e) {
         throw new HiveException("addFiles: filesystem error in check phase", e);
       }
+    }
+    if(!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
+      tbl.getParameters().put(StatsSetupConst.COLUMN_STATS_ACCURATE, "false");
+    }  else {
       tbl.getParameters().put(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK, "true");
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
index 109b938..c22c35f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
@@ -1500,8 +1500,7 @@ public final class GenMapRedUtils {
    * @return
    */
   public static boolean isInsertInto(ParseContext parseCtx, FileSinkOperator fsOp) {
-    return fsOp.getConf().getTableInfo().getTableName() != null &&
-        parseCtx.getQueryProperties().isInsertToTable();
+    return fsOp.getConf().getTableInfo().getTableName() != null;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
index 5a21e6b..aa204c7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
@@ -144,17 +144,23 @@ public class StatsOptimizer implements Transform {
     }
 
     enum LongSubType {
-      BIGINT { Object cast(long longValue) { return longValue; } }, 
-      INT { Object cast(long longValue) { return (int)longValue; } },
-      SMALLINT { Object cast(long longValue) { return (short)longValue; } },
-      TINYINT { Object cast(long longValue) { return (byte)longValue; } };
+      BIGINT { @Override
+      Object cast(long longValue) { return longValue; } },
+      INT { @Override
+      Object cast(long longValue) { return (int)longValue; } },
+      SMALLINT { @Override
+      Object cast(long longValue) { return (short)longValue; } },
+      TINYINT { @Override
+      Object cast(long longValue) { return (byte)longValue; } };
 
       abstract Object cast(long longValue);
     }
 
     enum DoubleSubType {
-      DOUBLE { Object cast(double doubleValue) { return doubleValue; } },
-      FLOAT { Object cast(double doubleValue) { return (float) doubleValue; } };
+      DOUBLE { @Override
+      Object cast(double doubleValue) { return doubleValue; } },
+      FLOAT { @Override
+      Object cast(double doubleValue) { return (float) doubleValue; } };
 
       abstract Object cast(double doubleValue);
     }
@@ -221,7 +227,7 @@ public class StatsOptimizer implements Transform {
         // Since we have done an exact match on TS-SEL-GBY-RS-GBY-(SEL)-FS
         // we need not to do any instanceof checks for following.
         GroupByOperator pgbyOp = (GroupByOperator)stack.get(2);
-        if (pgbyOp.getConf().getOutputColumnNames().size() != 
+        if (pgbyOp.getConf().getOutputColumnNames().size() !=
             pgbyOp.getConf().getAggregators().size()) {
           return null;
         }
@@ -260,7 +266,7 @@ public class StatsOptimizer implements Transform {
         FileSinkOperator fsOp = (FileSinkOperator)last;
         if (fsOp.getNumChild() > 0) {
           // looks like a subq plan.
-          return null;  // todo we can collapse this part of tree into single TS 
+          return null;  // todo we can collapse this part of tree into single TS
         }
 
         Table tbl = tsOp.getConf().getTableMetadata();
@@ -296,7 +302,7 @@ public class StatsOptimizer implements Transform {
               return null;
             }
             switch (category) {
-              case LONG: 
+              case LONG:
                 oneRow.add(Long.valueOf(constant) * rowCnt);
                 break;
               case DOUBLE:
@@ -436,7 +442,7 @@ public class StatsOptimizer implements Transform {
               switch (type) {
                 case Integeral: {
                   LongSubType subType = LongSubType.valueOf(name);
-                  
+
                   Long maxVal = null;
                   Collection<List<ColumnStatisticsObj>> result =
                       verifyAndGetPartStats(hive, tbl, colName, parts);
@@ -462,7 +468,7 @@ public class StatsOptimizer implements Transform {
                 }
                 case Double: {
                   DoubleSubType subType = DoubleSubType.valueOf(name);
-                  
+
                   Double maxVal = null;
                   Collection<List<ColumnStatisticsObj>> result =
                       verifyAndGetPartStats(hive, tbl, colName, parts);
@@ -537,7 +543,7 @@ public class StatsOptimizer implements Transform {
               switch(type) {
                 case Integeral: {
                   LongSubType subType = LongSubType.valueOf(name);
-                  
+
                   Long minVal = null;
                   Collection<List<ColumnStatisticsObj>> result =
                       verifyAndGetPartStats(hive, tbl, colName, parts);
@@ -563,7 +569,7 @@ public class StatsOptimizer implements Transform {
                 }
                 case Double: {
                   DoubleSubType subType = DoubleSubType.valueOf(name);
-                  
+
                   Double minVal = null;
                   Collection<List<ColumnStatisticsObj>> result =
                       verifyAndGetPartStats(hive, tbl, colName, parts);
@@ -680,6 +686,9 @@ public class StatsOptimizer implements Transform {
       if (tbl.isPartitioned()) {
         for (Partition part : pctx.getPrunedPartitions(
             tsOp.getConf().getAlias(), tsOp).getPartitions()) {
+          if (!StatsSetupConst.areStatsUptoDate(part.getParameters())) {
+            return null;
+          }
           long partRowCnt = Long.parseLong(part.getParameters().get(StatsSetupConst.ROW_COUNT));
           if (partRowCnt < 1) {
             Log.debug("Partition doesn't have upto date stats " + part.getSpec());
@@ -688,6 +697,9 @@ public class StatsOptimizer implements Transform {
           rowCnt += partRowCnt;
         }
       } else { // unpartitioned table
+        if (!StatsSetupConst.areStatsUptoDate(tbl.getParameters())) {
+          return null;
+        }
         rowCnt = Long.parseLong(tbl.getProperty(StatsSetupConst.ROW_COUNT));
         if (rowCnt < 1) {
           // if rowCnt < 1 than its either empty table or table on which stats are not

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
index 14a7e9c..9072d7f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
@@ -65,7 +65,6 @@ public class QBParseInfo {
   private final HashSet<String> insertIntoTables;
 
   private boolean isAnalyzeCommand; // used for the analyze command (statistics)
-  private boolean isInsertToTable;  // used for insert overwrite command (statistics)
   private boolean isNoScanAnalyzeCommand; // used for the analyze command (statistics) (noscan)
   private boolean isPartialScanAnalyzeCommand; // used for the analyze command (statistics)
                                                // (partialscan)
@@ -550,14 +549,6 @@ public class QBParseInfo {
     return isAnalyzeCommand;
   }
 
-  public void setIsInsertToTable(boolean isInsertToTable) {
-    this.isInsertToTable = isInsertToTable;
-  }
-
-  public boolean isInsertToTable() {
-    return isInsertToTable;
-  }
-
   public void addTableSpec(String tName, TableSpec tSpec) {
     tableSpecs.put(tName, tSpec);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index f47428c..8927800 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -1735,8 +1735,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
             qb.getMetaData().setDestForAlias(name, ts.partHandle);
           }
           if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
-            // Set that variable to automatically collect stats during the MapReduce job
-            qb.getParseInfo().setIsInsertToTable(true);
             // Add the table spec for the destination table.
             qb.getParseInfo().addTableSpec(ts.tableName.toLowerCase(), ts);
           }
@@ -1773,8 +1771,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
               }
               if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
                 TableSpec ts = new TableSpec(db, conf, this.ast);
-                // Set that variable to automatically collect stats during the MapReduce job
-                qb.getParseInfo().setIsInsertToTable(true);
                 // Add the table spec for the destination table.
                 qb.getParseInfo().addTableSpec(ts.tableName.toLowerCase(), ts);
               }
@@ -6328,7 +6324,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       // verify that our destination is empty before proceeding
       if (dest_tab.isImmutable() &&
           qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),dest_tab.getTableName())){
-        qb.getParseInfo().isInsertToTable();
         try {
           FileSystem fs = partPath.getFileSystem(conf);
           if (! MetaStoreUtils.isDirEmpty(fs,partPath)){
@@ -12208,7 +12203,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       queryProperties.setNoScanAnalyzeCommand(qb.getParseInfo().isNoScanAnalyzeCommand());
       queryProperties.setAnalyzeRewrite(qb.isAnalyzeRewrite());
       queryProperties.setCTAS(qb.getTableDesc() != null);
-      queryProperties.setInsertToTable(qb.getParseInfo().isInsertToTable());
       queryProperties.setHasOuterOrderBy(!qb.getParseInfo().getIsSubQ() &&
               !qb.getParseInfo().getDestToOrderBy().isEmpty());
       queryProperties.setOuterQueryLimit(qb.getParseInfo().getOuterQueryLimit());

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/test/queries/clientpositive/insert_into1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_into1.q b/ql/src/test/queries/clientpositive/insert_into1.q
index af82e3a..f00d06e 100644
--- a/ql/src/test/queries/clientpositive/insert_into1.q
+++ b/ql/src/test/queries/clientpositive/insert_into1.q
@@ -43,6 +43,14 @@ insert into insert_into1 select 2, 'b';
 
 select * from insert_into1;
 
-DROP TABLE insert_into1;
+set hive.stats.autogather=false;                                                                                                                                    
+explain
+insert into table insert_into1 values(1, 'abc');                                                                                                                    
+insert into table insert_into1 values(1, 'abc');                                                                                                                    
+explain
+SELECT COUNT(*) FROM insert_into1;                                                                                                                                  
+select count(*) from insert_into1;
 
+DROP TABLE insert_into1;
+set hive.stats.autogather=true;
 set hive.compute.query.using.stats=false;

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/test/queries/clientpositive/insert_into2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_into2.q b/ql/src/test/queries/clientpositive/insert_into2.q
index 7183c75..d127c04 100644
--- a/ql/src/test/queries/clientpositive/insert_into2.q
+++ b/ql/src/test/queries/clientpositive/insert_into2.q
@@ -41,7 +41,15 @@ explain
 SELECT COUNT(*) FROM insert_into2 WHERE ds='2';
 SELECT COUNT(*) FROM insert_into2 WHERE ds='2';
 
+set hive.stats.autogather=false;                                                                     
+
+insert into table insert_into2 partition (ds='2') values(1, 'abc');                                                                                                                    
+explain
+SELECT COUNT(*) FROM insert_into2 where ds='2';                                                                                                                                  
+select count(*) from insert_into2 where ds='2';
+
 
 DROP TABLE insert_into2;
 
+set hive.stats.autogather=true;                                                                                                                                    
 set hive.compute.query.using.stats=false;

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/test/results/clientpositive/bucket_map_join_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_map_join_1.q.out b/ql/src/test/results/clientpositive/bucket_map_join_1.q.out
index ce0df01..c7a8a20 100644
--- a/ql/src/test/results/clientpositive/bucket_map_join_1.q.out
+++ b/ql/src/test/results/clientpositive/bucket_map_join_1.q.out
@@ -182,8 +182,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.table1
               numFiles 1
-              numRows 0
-              rawDataSize 0
               serialization.ddl struct table1 { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -204,8 +202,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.table1
                 numFiles 1
-                numRows 0
-                rawDataSize 0
                 serialization.ddl struct table1 { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/test/results/clientpositive/bucket_map_join_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_map_join_2.q.out b/ql/src/test/results/clientpositive/bucket_map_join_2.q.out
index 791e606..3c3793f 100644
--- a/ql/src/test/results/clientpositive/bucket_map_join_2.q.out
+++ b/ql/src/test/results/clientpositive/bucket_map_join_2.q.out
@@ -182,8 +182,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.table1
               numFiles 1
-              numRows 0
-              rawDataSize 0
               serialization.ddl struct table1 { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -204,8 +202,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.table1
                 numFiles 1
-                numRows 0
-                rawDataSize 0
                 serialization.ddl struct table1 { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out
index 3ed1fdb..13fae42 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out
@@ -309,8 +309,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.src
               numFiles 1
-              numRows 0
-              rawDataSize 0
               serialization.ddl struct src { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -329,8 +327,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.src
                 numFiles 1
-                numRows 0
-                rawDataSize 0
                 serialization.ddl struct src { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out
index 7997fcb..5dd927d 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out
@@ -673,8 +673,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.src
               numFiles 1
-              numRows 0
-              rawDataSize 0
               serialization.ddl struct src { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -693,8 +691,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.src
                 numFiles 1
-                numRows 0
-                rawDataSize 0
                 serialization.ddl struct src { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/test/results/clientpositive/insert_into1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_into1.q.out b/ql/src/test/results/clientpositive/insert_into1.q.out
index cf627a6..7f3112c 100644
--- a/ql/src/test/results/clientpositive/insert_into1.q.out
+++ b/ql/src/test/results/clientpositive/insert_into1.q.out
@@ -539,6 +539,157 @@ POSTHOOK: Input: default@insert_into1
 #### A masked pattern was here ####
 1	a
 2	b
+PREHOOK: query: explain
+insert into table insert_into1 values(1, 'abc')
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert into table insert_into1 values(1, 'abc')
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-6 depends on stages: Stage-1 , consists of Stage-3, Stage-2, Stage-4
+  Stage-3
+  Stage-0 depends on stages: Stage-3, Stage-2, Stage-5
+  Stage-2
+  Stage-4
+  Stage-5 depends on stages: Stage-4
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: values__tmp__table__1
+            Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.insert_into1
+
+  Stage: Stage-6
+    Conditional Operator
+
+  Stage: Stage-3
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.insert_into1
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.insert_into1
+
+  Stage: Stage-4
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.insert_into1
+
+  Stage: Stage-5
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: insert into table insert_into1 values(1, 'abc')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__2
+PREHOOK: Output: default@insert_into1
+POSTHOOK: query: insert into table insert_into1 values(1, 'abc')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__2
+POSTHOOK: Output: default@insert_into1
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: explain
+SELECT COUNT(*) FROM insert_into1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT COUNT(*) FROM insert_into1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: insert_into1
+            Statistics: Num rows: -1 Data size: 14 Basic stats: PARTIAL Column stats: COMPLETE
+            Select Operator
+              Statistics: Num rows: -1 Data size: 14 Basic stats: PARTIAL Column stats: COMPLETE
+              Group By Operator
+                aggregations: count()
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from insert_into1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from insert_into1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+3
 PREHOOK: query: DROP TABLE insert_into1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@insert_into1

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/test/results/clientpositive/insert_into2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_into2.q.out b/ql/src/test/results/clientpositive/insert_into2.q.out
index cc7e135..737e576 100644
--- a/ql/src/test/results/clientpositive/insert_into2.q.out
+++ b/ql/src/test/results/clientpositive/insert_into2.q.out
@@ -394,6 +394,75 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@insert_into2
 #### A masked pattern was here ####
 50
+PREHOOK: query: insert into table insert_into2 partition (ds='2') values(1, 'abc')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@insert_into2@ds=2
+POSTHOOK: query: insert into table insert_into2 partition (ds='2') values(1, 'abc')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@insert_into2@ds=2
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 where ds='2'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 where ds='2'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: insert_into2
+            Statistics: Num rows: 50 Data size: 530 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              Statistics: Num rows: 50 Data size: 530 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: count()
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from insert_into2 where ds='2'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into2
+PREHOOK: Input: default@insert_into2@ds=2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from insert_into2 where ds='2'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into2
+POSTHOOK: Input: default@insert_into2@ds=2
+#### A masked pattern was here ####
+51
 PREHOOK: query: DROP TABLE insert_into2
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@insert_into2

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out
index d3f433d..c1b13aa 100644
--- a/ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out
@@ -149,8 +149,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.table2
                     numFiles 1
-                    numRows 0
-                    rawDataSize 0
                     serialization.ddl struct table2 { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -171,8 +169,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       name default.table2
                       numFiles 1
-                      numRows 0
-                      rawDataSize 0
                       serialization.ddl struct table2 { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -242,8 +238,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.table1
                     numFiles 1
-                    numRows 0
-                    rawDataSize 0
                     serialization.ddl struct table1 { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -264,8 +258,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       name default.table1
                       numFiles 1
-                      numRows 0
-                      rawDataSize 0
                       serialization.ddl struct table1 { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out
index 3d850db..580e098 100644
--- a/ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out
@@ -149,8 +149,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.table2
                     numFiles 1
-                    numRows 0
-                    rawDataSize 0
                     serialization.ddl struct table2 { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -171,8 +169,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       name default.table2
                       numFiles 1
-                      numRows 0
-                      rawDataSize 0
                       serialization.ddl struct table2 { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -242,8 +238,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.table1
                     numFiles 1
-                    numRows 0
-                    rawDataSize 0
                     serialization.ddl struct table1 { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -264,8 +258,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       name default.table1
                       numFiles 1
-                      numRows 0
-                      rawDataSize 0
                       serialization.ddl struct table1 { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/test/results/clientpositive/spark/insert_into1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/insert_into1.q.out b/ql/src/test/results/clientpositive/spark/insert_into1.q.out
index 38134a1..00e71ba 100644
--- a/ql/src/test/results/clientpositive/spark/insert_into1.q.out
+++ b/ql/src/test/results/clientpositive/spark/insert_into1.q.out
@@ -475,6 +475,122 @@ POSTHOOK: Input: default@insert_into1
 #### A masked pattern was here ####
 1	a
 2	b
+PREHOOK: query: explain
+insert into table insert_into1 values(1, 'abc')
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert into table insert_into1 values(1, 'abc')
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: values__tmp__table__1
+                  Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.insert_into1
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.insert_into1
+
+PREHOOK: query: insert into table insert_into1 values(1, 'abc')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__2
+PREHOOK: Output: default@insert_into1
+POSTHOOK: query: insert into table insert_into1 values(1, 'abc')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__2
+POSTHOOK: Output: default@insert_into1
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: explain
+SELECT COUNT(*) FROM insert_into1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT COUNT(*) FROM insert_into1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: insert_into1
+                  Statistics: Num rows: -1 Data size: 14 Basic stats: PARTIAL Column stats: COMPLETE
+                  Select Operator
+                    Statistics: Num rows: -1 Data size: 14 Basic stats: PARTIAL Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: bigint)
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from insert_into1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from insert_into1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+3
 PREHOOK: query: DROP TABLE insert_into1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@insert_into1

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/test/results/clientpositive/spark/insert_into2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/insert_into2.q.out b/ql/src/test/results/clientpositive/spark/insert_into2.q.out
index 578fae2..26bf1e6 100644
--- a/ql/src/test/results/clientpositive/spark/insert_into2.q.out
+++ b/ql/src/test/results/clientpositive/spark/insert_into2.q.out
@@ -412,6 +412,81 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@insert_into2
 #### A masked pattern was here ####
 50
+PREHOOK: query: insert into table insert_into2 partition (ds='2') values(1, 'abc')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@insert_into2@ds=2
+POSTHOOK: query: insert into table insert_into2 partition (ds='2') values(1, 'abc')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@insert_into2@ds=2
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 where ds='2'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 where ds='2'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: insert_into2
+                  Statistics: Num rows: 50 Data size: 530 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    Statistics: Num rows: 50 Data size: 530 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from insert_into2 where ds='2'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into2
+PREHOOK: Input: default@insert_into2@ds=2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from insert_into2 where ds='2'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into2
+POSTHOOK: Input: default@insert_into2@ds=2
+#### A masked pattern was here ####
+51
 PREHOOK: query: DROP TABLE insert_into2
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@insert_into2

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/test/results/clientpositive/spark/stats3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats3.q.out b/ql/src/test/results/clientpositive/spark/stats3.q.out
index 2afb76e..cbd66e5 100644
--- a/ql/src/test/results/clientpositive/spark/stats3.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats3.q.out
@@ -88,8 +88,6 @@ Table Type:         	MANAGED_TABLE
 Table Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
-	numRows             	0                   
-	rawDataSize         	0                   
 	totalSize           	11                  
 #### A masked pattern was here ####
 	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/test/results/clientpositive/stats3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats3.q.out b/ql/src/test/results/clientpositive/stats3.q.out
index 2afb76e..cbd66e5 100644
--- a/ql/src/test/results/clientpositive/stats3.q.out
+++ b/ql/src/test/results/clientpositive/stats3.q.out
@@ -88,8 +88,6 @@ Table Type:         	MANAGED_TABLE
 Table Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
-	numRows             	0                   
-	rawDataSize         	0                   
 	totalSize           	11                  
 #### A masked pattern was here ####
 	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/test/results/clientpositive/tez/insert_into1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/insert_into1.q.out b/ql/src/test/results/clientpositive/tez/insert_into1.q.out
index b24b407..0e82691 100644
--- a/ql/src/test/results/clientpositive/tez/insert_into1.q.out
+++ b/ql/src/test/results/clientpositive/tez/insert_into1.q.out
@@ -495,6 +495,126 @@ POSTHOOK: Input: default@insert_into1
 #### A masked pattern was here ####
 1	a
 2	b
+PREHOOK: query: explain
+insert into table insert_into1 values(1, 'abc')
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert into table insert_into1 values(1, 'abc')
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: values__tmp__table__1
+                  Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.insert_into1
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.insert_into1
+
+PREHOOK: query: insert into table insert_into1 values(1, 'abc')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__2
+PREHOOK: Output: default@insert_into1
+POSTHOOK: query: insert into table insert_into1 values(1, 'abc')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__2
+POSTHOOK: Output: default@insert_into1
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: explain
+SELECT COUNT(*) FROM insert_into1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT COUNT(*) FROM insert_into1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: insert_into1
+                  Statistics: Num rows: -1 Data size: 14 Basic stats: PARTIAL Column stats: COMPLETE
+                  Select Operator
+                    Statistics: Num rows: -1 Data size: 14 Basic stats: PARTIAL Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: bigint)
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from insert_into1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from insert_into1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+3
 PREHOOK: query: DROP TABLE insert_into1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@insert_into1

http://git-wip-us.apache.org/repos/asf/hive/blob/86346fb1/ql/src/test/results/clientpositive/tez/insert_into2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/insert_into2.q.out b/ql/src/test/results/clientpositive/tez/insert_into2.q.out
index 2c7ec63..b7668ff 100644
--- a/ql/src/test/results/clientpositive/tez/insert_into2.q.out
+++ b/ql/src/test/results/clientpositive/tez/insert_into2.q.out
@@ -424,6 +424,81 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@insert_into2
 #### A masked pattern was here ####
 50
+PREHOOK: query: insert into table insert_into2 partition (ds='2') values(1, 'abc')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@insert_into2@ds=2
+POSTHOOK: query: insert into table insert_into2 partition (ds='2') values(1, 'abc')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@insert_into2@ds=2
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 where ds='2'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 where ds='2'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: insert_into2
+                  Statistics: Num rows: 50 Data size: 530 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    Statistics: Num rows: 50 Data size: 530 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from insert_into2 where ds='2'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into2
+PREHOOK: Input: default@insert_into2@ds=2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from insert_into2 where ds='2'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into2
+POSTHOOK: Input: default@insert_into2@ds=2
+#### A masked pattern was here ####
+51
 PREHOOK: query: DROP TABLE insert_into2
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@insert_into2


Mime
View raw message