hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hashut...@apache.org
Subject hive git commit: HIVE-17965 : Remove HIVELIMITTABLESCANPARTITION support (Zoltan Haindrich via Ashutosh Chauhan)
Date Mon, 06 Nov 2017 15:53:30 GMT
Repository: hive
Updated Branches:
  refs/heads/master 071db96e9 -> 31f547fe2


HIVE-17965 : Remove HIVELIMITTABLESCANPARTITION support (Zoltan Haindrich via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <hashutosh@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/31f547fe
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/31f547fe
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/31f547fe

Branch: refs/heads/master
Commit: 31f547fe2f48929c924d64ac0f8f9c3c1e6fa2ad
Parents: 071db96
Author: Zoltan Haindrich <kirk@rxd.hu>
Authored: Mon Nov 6 07:53:00 2017 -0800
Committer: Ashutosh Chauhan <hashutosh@apache.org>
Committed: Mon Nov 6 07:53:00 2017 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   5 -
 .../test/resources/testconfiguration.properties |   1 -
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |   2 -
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  62 +-
 .../queries/clientnegative/limit_partition.q    |   8 -
 .../clientnegative/limit_partition_stats.q      |  18 -
 .../limit_partition_metadataonly.q              |  11 -
 .../clientnegative/limit_partition.q.out        |  63 --
 .../clientnegative/limit_partition_stats.q.out  |   8 -
 .../limit_partition_metadataonly.q.out          | 598 ------------------
 .../spark/limit_partition_metadataonly.q.out    | 602 -------------------
 11 files changed, 10 insertions(+), 1368 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/31f547fe/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 15ab625..10b364a 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1457,11 +1457,6 @@ public class HiveConf extends Configuration {
     HIVELIMITPUSHDOWNMEMORYUSAGE("hive.limit.pushdown.memory.usage", 0.1f, new RatioValidator(),
         "The fraction of available memory to be used for buffering rows in Reducesink operator
for limit pushdown optimization."),
 
-    @Deprecated
-    HIVELIMITTABLESCANPARTITION("hive.limit.query.max.table.partition", -1,
-        "This controls how many partitions can be scanned for each partitioned table.\n"
+
-        "The default value \"-1\" means no limit. (DEPRECATED: Please use " + ConfVars.METASTORE_LIMIT_PARTITION_REQUEST
+ " in the metastore instead.)"),
-
     HIVECONVERTJOINMAXENTRIESHASHTABLE("hive.auto.convert.join.hashtable.max.entries", 21000000L,
         "If hive.auto.convert.join.noconditionaltask is off, this parameter does not take
affect. \n" +
         "However, if it is on, and the predicted number of entries in hashtable for a given
join \n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/31f547fe/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 42c17f4..9b0bace 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -1122,7 +1122,6 @@ spark.query.files=add_part_multiple.q, \
   lateral_view_explode2.q, \
   leftsemijoin.q, \
   leftsemijoin_mr.q, \
-  limit_partition_metadataonly.q, \
   limit_pushdown.q, \
   limit_pushdown2.q, \
   list_bucket_dml_2.q, \

http://git-wip-us.apache.org/repos/asf/hive/blob/31f547fe/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 4b6bae1..186d580 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -476,8 +476,6 @@ public enum ErrorMsg {
   DYNAMIC_PARTITIONS_TOO_MANY_PER_NODE_ERROR(20004, "Fatal error occurred when node " +
       "tried to create too many dynamic partitions. The maximum number of dynamic partitions
" +
       "is controlled by hive.exec.max.dynamic.partitions and hive.exec.max.dynamic.partitions.pernode.
"),
-  PARTITION_SCAN_LIMIT_EXCEEDED(20005, "Number of partitions scanned (={0}) on table {1}
exceeds limit" +
-      " (={2}). This is controlled by hive.limit.query.max.table.partition.", true),
   /**
    * {1} is the transaction id;
    * use {@link org.apache.hadoop.hive.common.JavaUtils#txnIdToString(long)} to format

http://git-wip-us.apache.org/repos/asf/hive/blob/31f547fe/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 549b38d..7a7460e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -87,7 +87,6 @@ import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.ArchiveUtils;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorFactory;
-import org.apache.hadoop.hive.ql.exec.FetchTask;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.FilterOperator;
 import org.apache.hadoop.hive.ql.exec.FunctionInfo;
@@ -7003,7 +7002,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
             Utilities.FILE_OP_LOGGER.trace("Setting query directory " + queryTmpdir
                   + " from " + dest_path + " (" + isMmTable + ")");
           }
-        } catch (Exception e) { 
+        } catch (Exception e) {
           throw new SemanticException("Error creating temporary folder on: "
               + dest_path, e);
         }
@@ -7412,7 +7411,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   private DynamicPartitionCtx checkDynPart(QB qb, QBMetaData qbm, Table dest_tab,
       Map<String, String> partSpec, String dest) throws SemanticException {
     List<FieldSchema> parts = dest_tab.getPartitionKeys();
-    if (parts == null || parts.isEmpty()) return null; // table is not partitioned
+    if (parts == null || parts.isEmpty()) {
+      return null; // table is not partitioned
+    }
     if (partSpec == null || partSpec.size() == 0) { // user did NOT specify partition
       throw new SemanticException(generateErrorMessage(qb.getParseInfo().getDestForClause(dest),
           ErrorMsg.NEED_PARTITION_ERROR.getMsg()));
@@ -11685,7 +11686,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       // set ColumnAccessInfo for view column authorization
       setColumnAccessInfo(pCtx.getColumnAccessInfo());
     }
-    FetchTask origFetchTask = pCtx.getFetchTask();
     if (LOG.isDebugEnabled()) {
       LOG.debug("After logical optimization\n" + Operator.toString(pCtx.getTopOps().values()));
     }
@@ -11718,12 +11718,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       putAccessedColumnsToReadEntity(inputs, columnAccessInfo);
     }
 
-    // 11. if desired check we're not going over partition scan limits
-    if (!ctx.isExplainSkipExecution()) {
-      enforceScanLimits(pCtx, origFetchTask);
-    }
-
-    return;
   }
 
   private void putAccessedColumnsToReadEntity(HashSet<ReadEntity> inputs, ColumnAccessInfo
columnAccessInfo) {
@@ -11751,45 +11745,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     }
   }
 
-  private void enforceScanLimits(ParseContext pCtx, FetchTask fTask)
-      throws SemanticException {
-    int scanLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVELIMITTABLESCANPARTITION);
-
-    if (scanLimit > -1) {
-      // a scan limit on the number of partitions has been set by the user
-      if (fTask != null) {
-        // having a fetch task at this point means that we're not going to
-        // launch a job on the cluster
-        if (!fTask.getWork().isNotPartitioned() && fTask.getWork().getLimit() ==
-1
-            && scanLimit < fTask.getWork().getPartDir().size()) {
-          throw new SemanticException(ErrorMsg.PARTITION_SCAN_LIMIT_EXCEEDED, ""
-              + fTask.getWork().getPartDir().size(), ""
-              + fTask.getWork().getTblDesc().getTableName(), "" + scanLimit);
-        }
-      } else {
-        // At this point we've run the partition pruner for all top ops. Let's
-        // check whether any of them break the limit
-        for (Operator<?> topOp : topOps.values()) {
-          if (topOp instanceof TableScanOperator) {
-            TableScanOperator tsOp = (TableScanOperator) topOp;
-            if (tsOp.getConf().getIsMetadataOnly()) {
-              continue;
-            }
-            PrunedPartitionList parts = pCtx.getPrunedPartitions(tsOp);
-            if (!parts.getSourceTable().isPartitioned()) {
-              continue;
-            }
-            if (parts.getPartitions().size() > scanLimit) {
-              throw new SemanticException(ErrorMsg.PARTITION_SCAN_LIMIT_EXCEEDED, ""
-                  + parts.getPartitions().size(), "" + parts.getSourceTable().getTableName(),
""
-                  + scanLimit);
-            }
-          }
-        }
-      }
-    }
-  }
-
   @Override
   public List<FieldSchema> getResultSchema() {
     return resultSchema;
@@ -13744,9 +13699,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     // Don't know the characteristics of non-native tables,
     // and don't have a rational way to guess, so assume the most
     // conservative case.
-    if (isNonNativeTable) return WriteEntity.WriteType.INSERT_OVERWRITE;
-    else return ((ltd.getLoadFileType() == LoadFileType.REPLACE_ALL)
-                         ? WriteEntity.WriteType.INSERT_OVERWRITE : getWriteType(dest));
+    if (isNonNativeTable) {
+      return WriteEntity.WriteType.INSERT_OVERWRITE;
+    } else {
+      return ((ltd.getLoadFileType() == LoadFileType.REPLACE_ALL)
+                           ? WriteEntity.WriteType.INSERT_OVERWRITE : getWriteType(dest));
+    }
   }
 
   private WriteEntity.WriteType getWriteType(String dest) {

http://git-wip-us.apache.org/repos/asf/hive/blob/31f547fe/ql/src/test/queries/clientnegative/limit_partition.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/limit_partition.q b/ql/src/test/queries/clientnegative/limit_partition.q
deleted file mode 100644
index cc79acb..0000000
--- a/ql/src/test/queries/clientnegative/limit_partition.q
+++ /dev/null
@@ -1,8 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.limit.query.max.table.partition=1;
-
-explain select * from srcpart limit 1;
-select * from srcpart limit 1;
-
-explain select * from srcpart;
-select * from srcpart;

http://git-wip-us.apache.org/repos/asf/hive/blob/31f547fe/ql/src/test/queries/clientnegative/limit_partition_stats.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/limit_partition_stats.q b/ql/src/test/queries/clientnegative/limit_partition_stats.q
deleted file mode 100644
index 0afd4a9..0000000
--- a/ql/src/test/queries/clientnegative/limit_partition_stats.q
+++ /dev/null
@@ -1,18 +0,0 @@
-set hive.exec.dynamic.partition=true;
-set hive.exec.dynamic.partition.mode=nonstrict;
-set hive.stats.autogather=true;
-set hive.compute.query.using.stats=true;
-
-create table part (c int) partitioned by (d string);
-insert into table part partition (d)
-select hr,ds from srcpart;
-
-set hive.limit.query.max.table.partition=1;
-
-explain select count(*) from part;
-select count(*) from part;
-
-set hive.compute.query.using.stats=false;
-
-explain select count(*) from part;
-select count(*) from part;

http://git-wip-us.apache.org/repos/asf/hive/blob/31f547fe/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q b/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q
deleted file mode 100644
index 3e0a749..0000000
--- a/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q
+++ /dev/null
@@ -1,11 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.limit.query.max.table.partition=1;
-set hive.optimize.metadataonly=true;
-
--- SORT_QUERY_RESULTS
-
-explain select ds from srcpart where hr=11 and ds='2008-04-08';
-select ds from srcpart where hr=11 and ds='2008-04-08';
-
-explain select distinct hr from srcpart;
-select distinct hr from srcpart;

http://git-wip-us.apache.org/repos/asf/hive/blob/31f547fe/ql/src/test/results/clientnegative/limit_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/limit_partition.q.out b/ql/src/test/results/clientnegative/limit_partition.q.out
deleted file mode 100644
index e434deb..0000000
--- a/ql/src/test/results/clientnegative/limit_partition.q.out
+++ /dev/null
@@ -1,63 +0,0 @@
-PREHOOK: query: explain select * from srcpart limit 1
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select * from srcpart limit 1
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: 1
-      Processor Tree:
-        TableScan
-          alias: srcpart
-          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats:
NONE
-          Select Operator
-            expressions: key (type: string), value (type: string), ds (type: string), hr
(type: string)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats:
NONE
-            Limit
-              Number of rows: 1
-              Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
-              ListSink
-
-PREHOOK: query: select * from srcpart limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select * from srcpart limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-238	val_238	2008-04-08	11
-PREHOOK: query: explain select * from srcpart
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select * from srcpart
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        TableScan
-          alias: srcpart
-          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats:
NONE
-          Select Operator
-            expressions: key (type: string), value (type: string), ds (type: string), hr
(type: string)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats:
NONE
-            ListSink
-
-FAILED: SemanticException Number of partitions scanned (=4) on table default.srcpart exceeds
limit (=1). This is controlled by hive.limit.query.max.table.partition.

http://git-wip-us.apache.org/repos/asf/hive/blob/31f547fe/ql/src/test/results/clientnegative/limit_partition_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/limit_partition_stats.q.out b/ql/src/test/results/clientnegative/limit_partition_stats.q.out
deleted file mode 100644
index 83e78a0..0000000
--- a/ql/src/test/results/clientnegative/limit_partition_stats.q.out
+++ /dev/null
@@ -1,8 +0,0 @@
-PREHOOK: query: create table part (c int) partitioned by (d string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@part
-FAILED: Hive Internal Error: java.lang.RuntimeException(Cannot overwrite read-only table:
part)
-java.lang.RuntimeException: Cannot overwrite read-only table: part
-#### A masked pattern was here ####
-

http://git-wip-us.apache.org/repos/asf/hive/blob/31f547fe/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out b/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out
deleted file mode 100644
index 40ac1e6..0000000
--- a/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out
+++ /dev/null
@@ -1,598 +0,0 @@
-PREHOOK: query: explain select ds from srcpart where hr=11 and ds='2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select ds from srcpart where hr=11 and ds='2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        TableScan
-          alias: srcpart
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: '2008-04-08' (type: string)
-            outputColumnNames: _col0
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats:
NONE
-            ListSink
-
-PREHOOK: query: select ds from srcpart where hr=11 and ds='2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select ds from srcpart where hr=11 and ds='2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-PREHOOK: query: explain select distinct hr from srcpart
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select distinct hr from srcpart
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats:
NONE
-            Select Operator
-              expressions: hr (type: string)
-              outputColumnNames: hr
-              Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats:
NONE
-              Group By Operator
-                keys: hr (type: string)
-                mode: hash
-                outputColumnNames: _col0
-                Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column
stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column
stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats:
NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats:
NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select distinct hr from srcpart
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select distinct hr from srcpart
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-11
-12

http://git-wip-us.apache.org/repos/asf/hive/blob/31f547fe/ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out b/ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out
deleted file mode 100644
index 005026d..0000000
--- a/ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out
+++ /dev/null
@@ -1,602 +0,0 @@
-PREHOOK: query: explain select ds from srcpart where hr=11 and ds='2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select ds from srcpart where hr=11 and ds='2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        TableScan
-          alias: srcpart
-          Select Operator
-            expressions: '2008-04-08' (type: string)
-            outputColumnNames: _col0
-            ListSink
-
-PREHOOK: query: select ds from srcpart where hr=11 and ds='2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select ds from srcpart where hr=11 and ds='2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-2008-04-08
-PREHOOK: query: explain select distinct hr from srcpart
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select distinct hr from srcpart
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-      Edges:
-        Reducer 2 <- Map 1 (GROUP, 2)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: srcpart
-                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column
stats: NONE
-                  Select Operator
-                    expressions: hr (type: string)
-                    outputColumnNames: hr
-                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column
stats: NONE
-                    Group By Operator
-                      keys: hr (type: string)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column
stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE
Column stats: NONE
-        Reducer 2 
-            Reduce Operator Tree:
-              Group By Operator
-                keys: KEY._col0 (type: string)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column
stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column
stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select distinct hr from srcpart
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select distinct hr from srcpart
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-11
-12


Mime
View raw message