Return-Path: X-Original-To: apmail-hive-commits-archive@www.apache.org Delivered-To: apmail-hive-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 31CD311B2E for ; Mon, 18 Aug 2014 00:45:09 +0000 (UTC) Received: (qmail 90902 invoked by uid 500); 18 Aug 2014 00:45:09 -0000 Delivered-To: apmail-hive-commits-archive@hive.apache.org Received: (qmail 90855 invoked by uid 500); 18 Aug 2014 00:45:09 -0000 Mailing-List: contact commits-help@hive.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hive-dev@hive.apache.org Delivered-To: mailing list commits@hive.apache.org Received: (qmail 90843 invoked by uid 99); 18 Aug 2014 00:45:09 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 18 Aug 2014 00:45:09 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 18 Aug 2014 00:45:06 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 54458238896F; Mon, 18 Aug 2014 00:44:46 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1618532 - in /hive/branches/cbo/ql/src: java/org/apache/hadoop/hive/ql/optimizer/optiq/ java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/ java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ java/org/apache/hadoop/hive/ql/optimiz... Date: Mon, 18 Aug 2014 00:44:45 -0000 To: commits@hive.apache.org From: hashutosh@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140818004446.54458238896F@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: hashutosh Date: Mon Aug 18 00:44:45 2014 New Revision: 1618532 URL: http://svn.apache.org/r1618532 Log: HIVE-7756 : CBO : Disallow partitions recomputation between optiq and hive planning (Ashutosh Chauhan via Harish Butani) Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java hive/branches/cbo/ql/src/test/queries/clientpositive/cbo_correctness.q hive/branches/cbo/ql/src/test/results/clientpositive/cbo_correctness.q.out Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java?rev=1618532&r1=1618531&r2=1618532&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java Mon Aug 18 00:44:45 2014 @@ -13,9 +13,7 @@ import org.apache.commons.logging.LogFac import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.ColumnInfo; -import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.optimizer.optiq.translator.ExprNodeConverter; import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; @@ -98,12 +96,15 @@ public class RelOptHiveTable extends Rel @Override public double getRowCount() { if (m_rowCount == -1) { - if (m_hiveTblMetadata.isPartitioned()) { + if (null == partitionList) { + // we are here either unpartitioned table or partitioned table with no predicates computePartitionList(m_hiveConf, null); - List rowCounts = StatsUtils.getBasicStatForPartitions( - m_hiveTblMetadata, partitionList.getNotDeniedPartns(), - StatsSetupConst.ROW_COUNT); - m_rowCount = StatsUtils.getSumIgnoreNegatives(rowCounts); + } + if (m_hiveTblMetadata.isPartitioned()) { + List rowCounts = StatsUtils.getBasicStatForPartitions( + m_hiveTblMetadata, partitionList.getNotDeniedPartns(), + StatsSetupConst.ROW_COUNT); + m_rowCount = StatsUtils.getSumIgnoreNegatives(rowCounts); } else { m_rowCount = StatsUtils.getNumRows(m_hiveTblMetadata); @@ -132,15 +133,9 @@ public class RelOptHiveTable extends Rel } public void computePartitionList(HiveConf conf, RexNode pruneNode) { - partitionList = null; - - if (!m_hiveTblMetadata.isPartitioned()) { - // no partitions for unpartitioned tables. - return; - } try { - if (pruneNode == null || InputFinder.bits(pruneNode).length() == 0 ) { + if (!m_hiveTblMetadata.isPartitioned() || pruneNode == null || InputFinder.bits(pruneNode).length() == 0 ) { // there is no predicate on partitioning column, we need all partitions in this case. partitionList = PartitionPruner.prune(m_hiveTblMetadata, null, conf, getName(), partitionCache); return; @@ -187,12 +182,11 @@ public class RelOptHiveTable extends Rel if (null == partitionList) { // We could be here either because its an unpartitioned table or because - // there are no pruning predicates on a partitioned table. If its latter, - // we need to fetch all partitions, so do that now. + // there are no pruning predicates on a partitioned table. computePartitionList(m_hiveConf, null); } - if (partitionList == null) { + if (!m_hiveTblMetadata.isPartitioned()) { // 2.1 Handle the case for unpartitioned table. hiveColStats = StatsUtils.getTableColumnStats(m_hiveTblMetadata, m_hiveNonPartitionCols, nonPartColNamesThatRqrStats); @@ -290,14 +284,11 @@ public class RelOptHiveTable extends Rel /* * use to check if a set of columns are all partition columns. * true only if: - * - there is a prunedPartList in place * - all columns in BitSet are partition * columns. */ public boolean containsPartitionColumnsOnly(BitSet cols) { - if (partitionList == null) { - return false; - } + for (int i = cols.nextSetBit(0); i >= 0; i++, i = cols.nextSetBit(i + 1)) { if (!m_hivePartitionColsMap.containsKey(i)) { return false; Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java?rev=1618532&r1=1618531&r2=1618532&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java Mon Aug 18 00:44:45 2014 @@ -231,9 +231,7 @@ public abstract class HivePushFilterPast } private void removeAlwaysTruePredicates(List predicates) { - if (predicates.size() < 2) { - return; - } + ListIterator iter = predicates.listIterator(); while (iter.hasNext()) { RexNode exp = iter.next(); Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java?rev=1618532&r1=1618531&r2=1618532&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java Mon Aug 18 00:44:45 2014 @@ -275,17 +275,6 @@ public class RexNodeConverter { return optiqLiteral; } - public static RexNode getAlwaysTruePredicate(RelOptCluster cluster) { - RelDataType dt = cluster.getTypeFactory().createSqlType(SqlTypeName.BOOLEAN); - SqlOperator optiqOp = SqlFunctionConverter.getOptiqOperator(new GenericUDFOPEqual(), - ImmutableList. of(dt), dt); - List childRexNodeLst = new LinkedList(); - childRexNodeLst.add(cluster.getRexBuilder().makeLiteral(true)); - childRexNodeLst.add(cluster.getRexBuilder().makeLiteral(true)); - - return cluster.getRexBuilder().makeCall(optiqOp, childRexNodeLst); - } - public static RexNode convert(RelOptCluster cluster, ExprNodeDesc joinCondnExprNode, List inputRels, LinkedHashMap relToHiveRR, Map> relToHiveColNameOptiqPosMap, boolean flattenExpr) Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java?rev=1618532&r1=1618531&r2=1618532&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java Mon Aug 18 00:44:45 2014 @@ -159,26 +159,69 @@ public class PartitionPruner implements public static PrunedPartitionList prune(Table tab, ExprNodeDesc prunerExpr, HiveConf conf, String alias, Map prunedPartitionsMap) throws SemanticException { + LOG.trace("Started pruning partiton"); LOG.trace("dbname = " + tab.getDbName()); LOG.trace("tabname = " + tab.getTableName()); -// LOG.trace("prune Expression = " + prunerExpr); + LOG.trace("prune Expression = " + prunerExpr == null ? "" : prunerExpr); + + if ("strict".equalsIgnoreCase(HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMAPREDMODE)) + && !hasColumnExpr(prunerExpr)) { + // If the "strict" mode is on, we have to provide partition pruner for each table. + throw new SemanticException(ErrorMsg.NO_PARTITION_PREDICATE + .getMsg("for Alias \"" + alias + "\" Table \"" + tab.getTableName() + "\"")); + } String key = tab.getDbName() + "." + tab.getTableName() + ";"; - if (prunerExpr != null) { - key = key + prunerExpr.getExprString(); + if (!tab.isPartitioned() || prunerExpr == null) { + // If the table is not partitioned, return everything. + // Or, Non-strict mode, and there is no predicates at all - get everything. + return getAllPartsFromCacheOrServer(tab, key, false, prunedPartitionsMap); + } + + Set partColsUsedInFilter = new LinkedHashSet(); + // Replace virtual columns with nulls. See javadoc for details. + prunerExpr = removeNonPartCols(prunerExpr, extractPartColNames(tab), partColsUsedInFilter); + // Remove all parts that are not partition columns. See javadoc for details. + ExprNodeGenericFuncDesc compactExpr = (ExprNodeGenericFuncDesc)compactExpr(prunerExpr.clone()); + String oldFilter = prunerExpr.getExprString(); + if (compactExpr == null) { + // Non-strict mode, and all the predicates are on non-partition columns - get everything. + LOG.debug("Filter " + oldFilter + " was null after compacting"); + return getAllPartsFromCacheOrServer(tab, key, true, prunedPartitionsMap); } - PrunedPartitionList ret = prunedPartitionsMap.get(key); - if (ret != null) { - return ret; + LOG.debug("Filter w/ compacting: " + compactExpr.getExprString() + + "; filter w/o compacting: " + oldFilter); + + key = key + compactExpr.getExprString(); + PrunedPartitionList ppList = prunedPartitionsMap.get(key); + if (ppList != null) { + return ppList; } - ret = getPartitionsFromServer(tab, prunerExpr, conf, alias); - prunedPartitionsMap.put(key, ret); - return ret; + ppList = getPartitionsFromServer(tab, compactExpr, conf, alias, partColsUsedInFilter, oldFilter.equals(compactExpr.getExprString())); + prunedPartitionsMap.put(key, ppList); + return ppList; + } + + private static PrunedPartitionList getAllPartsFromCacheOrServer(Table tab, String key, boolean unknownPartitions, + Map partsCache) throws SemanticException { + PrunedPartitionList ppList = partsCache.get(key); + if (ppList != null) { + return ppList; + } + Set parts; + try { + parts = getAllPartitions(tab); + } catch (HiveException e) { + throw new SemanticException(e); + } + ppList = new PrunedPartitionList(tab, parts, null, unknownPartitions); + partsCache.put(key, ppList); + return ppList; } - + private static ExprNodeDesc removeTruePredciates(ExprNodeDesc e) { if (e instanceof ExprNodeConstantDesc) { ExprNodeConstantDesc eC = (ExprNodeConstantDesc) e; @@ -281,40 +324,8 @@ public class PartitionPruner implements } private static PrunedPartitionList getPartitionsFromServer(Table tab, - ExprNodeDesc prunerExpr, HiveConf conf, String alias) throws SemanticException { + final ExprNodeGenericFuncDesc compactExpr, HiveConf conf, String alias, Set partColsUsedInFilter, boolean isPruningByExactFilter) throws SemanticException { try { - if (!tab.isPartitioned()) { - // If the table is not partitioned, return everything. - return new PrunedPartitionList(tab, getAllPartitions(tab), null, false); - } - LOG.debug("tabname = " + tab.getTableName() + " is partitioned"); - - if ("strict".equalsIgnoreCase(HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMAPREDMODE)) - && !hasColumnExpr(prunerExpr)) { - // If the "strict" mode is on, we have to provide partition pruner for each table. - throw new SemanticException(ErrorMsg.NO_PARTITION_PREDICATE - .getMsg("for Alias \"" + alias + "\" Table \"" + tab.getTableName() + "\"")); - } - - if (prunerExpr == null) { - // Non-strict mode, and there is no predicates at all - get everything. - return new PrunedPartitionList(tab, getAllPartitions(tab), null, false); - } - - Set referred = new LinkedHashSet(); - // Replace virtual columns with nulls. See javadoc for details. - prunerExpr = removeNonPartCols(prunerExpr, extractPartColNames(tab), referred); - // Remove all parts that are not partition columns. See javadoc for details. - ExprNodeGenericFuncDesc compactExpr = (ExprNodeGenericFuncDesc)compactExpr(prunerExpr.clone()); - String oldFilter = prunerExpr.getExprString(); - if (compactExpr == null) { - // Non-strict mode, and all the predicates are on non-partition columns - get everything. - LOG.debug("Filter " + oldFilter + " was null after compacting"); - return new PrunedPartitionList(tab, getAllPartitions(tab), null, true); - } - - LOG.debug("Filter w/ compacting: " + compactExpr.getExprString() - + "; filter w/o compacting: " + oldFilter); // Finally, check the filter for non-built-in UDFs. If these are present, we cannot // do filtering on the server, and have to fall back to client path. @@ -344,9 +355,8 @@ public class PartitionPruner implements // The partitions are "unknown" if the call says so due to the expression // evaluator returning null for a partition, or if we sent a partial expression to // metastore and so some partitions may have no data based on other filters. - boolean isPruningByExactFilter = oldFilter.equals(compactExpr.getExprString()); return new PrunedPartitionList(tab, new LinkedHashSet(partitions), - new ArrayList(referred), + new ArrayList(partColsUsedInFilter), hasUnknownPartitions || !isPruningByExactFilter); } catch (SemanticException e) { throw e; Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java?rev=1618532&r1=1618531&r2=1618532&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java Mon Aug 18 00:44:45 2014 @@ -194,7 +194,7 @@ public abstract class BaseSemanticAnalyz } public abstract void analyzeInternal(ASTNode ast) throws SemanticException; - public void init() { + public void init(boolean clearPartsCache) { //no-op } @@ -204,7 +204,7 @@ public abstract class BaseSemanticAnalyz public void analyze(ASTNode ast, Context ctx) throws SemanticException { initCtx(ctx); - init(); + init(true); analyzeInternal(ast); } @@ -231,7 +231,7 @@ public abstract class BaseSemanticAnalyz this.fetchTask = fetchTask; } - protected void reset() { + protected void reset(boolean clearPartsCache) { rootTasks = new ArrayList>(); } Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java?rev=1618532&r1=1618531&r2=1618532&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java Mon Aug 18 00:44:45 2014 @@ -427,7 +427,7 @@ public class ColumnStatsSemanticAnalyzer QBParseInfo qbp; // initialize QB - init(); + init(true); // Setup the necessary metadata if originating from analyze rewrite if (isRewritten) { Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1618532&r1=1618531&r2=1618532&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Mon Aug 18 00:44:45 2014 @@ -396,8 +396,11 @@ public class SemanticAnalyzer extends Ba } @Override - protected void reset() { - super.reset(); + protected void reset(boolean clearPartsCache) { + super.reset(true); + if(clearPartsCache) { + prunedPartitions.clear(); + } loadTableWork.clear(); loadFileWork.clear(); topOps.clear(); @@ -411,7 +414,6 @@ public class SemanticAnalyzer extends Ba smbMapJoinContext.clear(); opParseCtx.clear(); groupOpToInputTables.clear(); - prunedPartitions.clear(); disableJoinMerge = false; aliasToCTEs.clear(); topToTable.clear(); @@ -9491,9 +9493,9 @@ public class SemanticAnalyzer extends Ba } @Override - public void init() { + public void init(boolean clearPartsCache) { // clear most members - reset(); + reset(clearPartsCache); // init QB qb = new QB(null, null, false); @@ -9577,12 +9579,13 @@ public class SemanticAnalyzer extends Ba ASTNode newAST = new OptiqBasedPlanner().getOptimizedAST(prunedPartitions); // 2. Regen OP plan from optimized AST - init(); + init(false); ctx_1 = initPhase1Ctx(); if (!doPhase1(newAST, qb, ctx_1)) { throw new RuntimeException( "Couldn't do phase1 on CBO optimized query plan"); } + prunedPartitions = ImmutableMap.copyOf(prunedPartitions); getMetaData(qb); disableJoinMerge = true; @@ -9606,7 +9609,8 @@ public class SemanticAnalyzer extends Ba runCBO = false; disableJoinMerge = false; if (reAnalyzeAST) { - init(); + init(true); + prunedPartitions.clear(); analyzeInternal(ast); return; } @@ -11762,7 +11766,7 @@ public class SemanticAnalyzer extends Ba else return (ltd.getReplace() ? WriteEntity.WriteType.INSERT_OVERWRITE : WriteEntity.WriteType.INSERT); } - + /**** Temporary Place Holder For Optiq plan Gen, Optimizer ****/ /* @@ -11902,10 +11906,10 @@ public class SemanticAnalyzer extends Ba return basePlan; } - + private RelNode hepPlan(RelNode basePlan, RelMetadataProvider mdProvider, RelOptRule...rules) { - + HepProgramBuilder programBuilder = new HepProgramBuilder(); for(RelOptRule rule : rules) { programBuilder.addRuleInstance(rule); @@ -11959,7 +11963,7 @@ public class SemanticAnalyzer extends Ba optiqJoinCond = RexNodeConverter.convert(m_cluster, joinCondnExprNode, inputRels, m_relToHiveRR, m_relToHiveColNameOptiqPosMap, false); } else { - optiqJoinCond = RexNodeConverter.getAlwaysTruePredicate(m_cluster); + optiqJoinCond = m_cluster.getRexBuilder().makeLiteral(true); } // 3. Validate that join condition is legal (i.e no function refering to @@ -12002,7 +12006,7 @@ public class SemanticAnalyzer extends Ba /** * Generate Join Logical Plan Relnode by walking through the join AST. - * + * * @param qb * @param aliasToRel * Alias(Table/Relation alias) to RelNode; only read and not @@ -12392,7 +12396,7 @@ public class SemanticAnalyzer extends Ba /** * Generate GB plan. - * + * * @param qb * @param srcRel * @return TODO: 1. Grouping Sets (roll up..) @@ -12720,8 +12724,8 @@ public class SemanticAnalyzer extends Ba return rwb; } - - + + Pair genWindowingProj(QB qb, ASTNode windowProjAst, int wndSpecASTIndx, int wndProjPos, RelNode srcRel) throws SemanticException { RexNode w = null; @@ -12801,7 +12805,7 @@ public class SemanticAnalyzer extends Ba /** * NOTE: there can only be one select caluse since we don't handle multi * destination insert. - * + * * @throws SemanticException */ private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel) @@ -13004,7 +13008,7 @@ public class SemanticAnalyzer extends Ba for (ExprNodeDesc colExpr : col_list) { optiqColLst.add(rexNodeConv.convert(colExpr)); } - + // 9. Add windowing Proj Names for (Pair wndPair : windowingRexNodes) { optiqColLst.add(wndPair.getFirst(), wndPair.getSecond()); @@ -13197,5 +13201,5 @@ public class SemanticAnalyzer extends Ba return tabAliases; } } - + } Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java?rev=1618532&r1=1618531&r2=1618532&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java Mon Aug 18 00:44:45 2014 @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.serde.serd import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.BaseCharTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; @@ -93,7 +94,7 @@ public class ExprNodeConstantDesc extend return "null"; } - if (typeInfo.getTypeName().equals(serdeConstants.STRING_TYPE_NAME)) { + if (typeInfo.getTypeName().equals(serdeConstants.STRING_TYPE_NAME) || typeInfo instanceof BaseCharTypeInfo) { return "'" + value.toString() + "'"; } else if (typeInfo.getTypeName().equals(serdeConstants.BINARY_TYPE_NAME)) { byte[] bytes = (byte[]) value; Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/cbo_correctness.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/cbo_correctness.q?rev=1618532&r1=1618531&r2=1618532&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/cbo_correctness.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/cbo_correctness.q Mon Aug 18 00:44:45 2014 @@ -175,7 +175,7 @@ select x from (select count(c_int) over( select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1; select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1; -create view v1 as select c_int, value, c_boolean from t1; +create view v1 as select c_int, value, c_boolean, dt from t1; create view v2 as select c_int, value from t2; select value from v1 where c_boolean=false; @@ -205,8 +205,8 @@ with q1 as ( select c_int from q2 where q2 as ( select c_int,c_boolean from v1 where value = '1') select sum(c_int) from (select c_int from q1) a; -with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int), -q2 as ( select c_int,c_boolean from v1 where value = '1') +with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int and t1.dt='2014'), +q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int; Modified: hive/branches/cbo/ql/src/test/results/clientpositive/cbo_correctness.q.out URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/cbo_correctness.q.out?rev=1618532&r1=1618531&r2=1618532&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/results/clientpositive/cbo_correctness.q.out (original) +++ hive/branches/cbo/ql/src/test/results/clientpositive/cbo_correctness.q.out Mon Aug 18 00:44:45 2014 @@ -15789,10 +15789,10 @@ POSTHOOK: Input: default@t1@dt=2014 1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL -PREHOOK: query: create view v1 as select c_int, value, c_boolean from t1 +PREHOOK: query: create view v1 as select c_int, value, c_boolean, dt from t1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@t1 -POSTHOOK: query: create view v1 as select c_int, value, c_boolean from t1 +POSTHOOK: query: create view v1 as select c_int, value, c_boolean, dt from t1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@t1 POSTHOOK: Output: default@v1 @@ -15963,8 +15963,8 @@ POSTHOOK: Input: default@t1@dt=2014 POSTHOOK: Input: default@v1 #### A masked pattern was here #### 2 -PREHOOK: query: with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int), -q2 as ( select c_int,c_boolean from v1 where value = '1') +PREHOOK: query: with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int and t1.dt='2014'), +q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int PREHOOK: type: QUERY PREHOOK: Input: default@t1 @@ -15972,8 +15972,8 @@ PREHOOK: Input: default@t1@dt=2014 PREHOOK: Input: default@v1 PREHOOK: Input: default@v4 #### A masked pattern was here #### -POSTHOOK: query: with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int), -q2 as ( select c_int,c_boolean from v1 where value = '1') +POSTHOOK: query: with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int and t1.dt='2014'), +q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int POSTHOOK: type: QUERY POSTHOOK: Input: default@t1