hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hashut...@apache.org
Subject [10/10] hive git commit: HIVE-15544 : Support scalar subqueries (Vineet Garg via Ashutosh Chauhan)
Date Sat, 21 Jan 2017 03:51:52 GMT
HIVE-15544 : Support scalar subqueries (Vineet Garg via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <hashutosh@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d9343f6d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d9343f6d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d9343f6d

Branch: refs/heads/master
Commit: d9343f6d644696367615f5fd464dec2a6053ace7
Parents: 0e78add
Author: Vineet Garg <vgarg@hortonworks.com>
Authored: Wed Jan 4 20:35:00 2017 -0800
Committer: Ashutosh Chauhan <hashutosh@apache.org>
Committed: Fri Jan 20 19:49:44 2017 -0800

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |    1 +
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |    1 +
 .../CalciteSubquerySemanticException.java       |   52 +
 .../optimizer/calcite/HivePlannerContext.java   |   12 +-
 .../calcite/rules/HiveRelDecorrelator.java      |    5 +-
 .../calcite/rules/HiveSubQueryRemoveRule.java   |   93 +-
 .../calcite/translator/RexNodeConverter.java    |   19 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |   83 +-
 .../hadoop/hive/ql/parse/IdentifiersParser.g    |    8 +-
 .../apache/hadoop/hive/ql/parse/QBSubQuery.java |  100 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |    5 +-
 .../hive/ql/parse/TypeCheckProcFactory.java     |   29 +-
 .../hive/ql/plan/ExprNodeSubQueryDesc.java      |    4 +-
 .../ql/udf/generic/GenericUDFSQCountCheck.java  |   87 +
 .../calcite/TestCBORuleFiredOnlyOnce.java       |    2 +-
 .../subquery_exists_implicit_gby.q              |    2 +-
 .../clientnegative/subquery_in_implicit_gby.q   |    1 +
 .../subquery_notexists_implicit_gby.q           |    2 +-
 .../subquery_scalar_implicit_gby.q              |    1 +
 .../subquery_scalar_multi_columns.q             |    2 +
 .../clientnegative/subquery_scalar_multi_rows.q |    2 +
 .../clientnegative/subquery_with_or_cond.q      |    2 +-
 .../queries/clientpositive/cbo_subq_not_in.q    |    9 -
 .../test/queries/clientpositive/perf/query1.q   |   24 +
 .../test/queries/clientpositive/perf/query23.q  |   13 +-
 .../test/queries/clientpositive/perf/query30.q  |   30 +
 .../test/queries/clientpositive/perf/query6.q   |   25 +
 .../test/queries/clientpositive/perf/query81.q  |   30 +
 .../queries/clientpositive/subquery_exists.q    |    9 +
 .../test/queries/clientpositive/subquery_in.q   |   32 +-
 .../queries/clientpositive/subquery_in_having.q |   12 +-
 .../queries/clientpositive/subquery_notin.q     |   37 +-
 .../queries/clientpositive/subquery_scalar.q    |  177 +
 .../clientpositive/subquery_unqualcolumnrefs.q  |   29 -
 .../subquery_exists_implicit_gby.q.out          |    2 +-
 .../subquery_in_implicit_gby.q.out              |    1 +
 .../subquery_multiple_cols_in_select.q.out      |    2 +-
 .../subquery_notexists_implicit_gby.q.out       |    2 +-
 .../subquery_scalar_implicit_gby.q.out          |    1 +
 .../subquery_scalar_multi_columns.q.out         |    1 +
 .../subquery_scalar_multi_rows.q.out            |    7 +
 .../subquery_windowing_corr.q.out               |    2 +-
 .../clientnegative/subquery_with_or_cond.q.out  |    2 +-
 .../clientpositive/llap/cbo_subq_not_in.q.out   |   38 -
 .../clientpositive/llap/subquery_exists.q.out   |  144 +
 .../clientpositive/llap/subquery_in.q.out       |  500 +-
 .../clientpositive/llap/subquery_multi.q.out    |   43 +-
 .../clientpositive/llap/subquery_notin.q.out    |  955 +--
 .../clientpositive/llap/subquery_scalar.q.out   | 7319 ++++++++++++++++++
 .../results/clientpositive/perf/query1.q.out    |  249 +
 .../results/clientpositive/perf/query16.q.out   |  346 +-
 .../results/clientpositive/perf/query23.q.out   |  622 +-
 .../results/clientpositive/perf/query30.q.out   |  308 +
 .../results/clientpositive/perf/query6.q.out    |  332 +
 .../results/clientpositive/perf/query69.q.out   |  640 +-
 .../results/clientpositive/perf/query81.q.out   |  308 +
 .../results/clientpositive/show_functions.q.out |    1 +
 .../clientpositive/spark/cbo_subq_not_in.q.out  |   38 -
 .../clientpositive/spark/subquery_exists.q.out  |  137 +
 .../clientpositive/spark/subquery_in.q.out      |  464 +-
 .../clientpositive/subquery_exists.q.out        |  139 +
 .../clientpositive/subquery_in_having.q.out     | 1066 +--
 .../subquery_unqualcolumnrefs.q.out             |  767 --
 63 files changed, 11647 insertions(+), 3729 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index be5a747..0a52782 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -647,6 +647,7 @@ minillaplocal.query.files=acid_globallimit.q,\
   cluster.q,\
   subquery_in.q,\
   subquery_multi.q,\
+  subquery_scalar.q,\
   stats11.q,\
   orc_create.q,\
   orc_split_elimination.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index 6f01da0..4fce1ac 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -213,6 +213,7 @@ public final class FunctionRegistry {
     system.registerGenericUDF("ceiling", GenericUDFCeil.class);
     system.registerUDF("rand", UDFRand.class, false);
     system.registerGenericUDF("abs", GenericUDFAbs.class);
+    system.registerGenericUDF("sq_count_check", GenericUDFSQCountCheck.class);
     system.registerGenericUDF("pmod", GenericUDFPosMod.class);
 
     system.registerUDF("ln", UDFLn.class, false);

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/CalciteSubquerySemanticException.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/CalciteSubquerySemanticException.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/CalciteSubquerySemanticException.java
new file mode 100644
index 0000000..416a384
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/CalciteSubquerySemanticException.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.optimizer.calcite;
+
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+
+/**
+ * Exception from SemanticAnalyzer.
+ */
+
+public class CalciteSubquerySemanticException extends SemanticException {
+
+  private static final long serialVersionUID = 1L;
+
+  public CalciteSubquerySemanticException() {
+    super();
+  }
+
+  public CalciteSubquerySemanticException(String message) {
+    super(message);
+  }
+
+  public CalciteSubquerySemanticException(Throwable cause) {
+    super(cause);
+  }
+
+  public CalciteSubquerySemanticException(String message, Throwable cause) {
+    super(message, cause);
+  }
+
+  public CalciteSubquerySemanticException(ErrorMsg errorMsg, String... msgArgs) {
+    super(errorMsg, msgArgs);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
index 8beb0dd..9a65de3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
@@ -18,17 +18,24 @@
 package org.apache.hadoop.hive.ql.optimizer.calcite;
 
 import org.apache.calcite.plan.Context;
+import org.apache.calcite.rel.RelNode;
 import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveAlgorithmsConf;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRulesRegistry;
+import java.util.Set;
 
 
 public class HivePlannerContext implements Context {
   private HiveAlgorithmsConf algoConfig;
   private HiveRulesRegistry registry;
+  private Set<RelNode> corrScalarRexSQWithAgg;
 
-  public HivePlannerContext(HiveAlgorithmsConf algoConfig, HiveRulesRegistry registry) {
+  public HivePlannerContext(HiveAlgorithmsConf algoConfig, HiveRulesRegistry registry, Set<RelNode> corrScalarRexSQWithAgg) {
     this.algoConfig = algoConfig;
     this.registry = registry;
+    // this is to keep track if a subquery is correlated and contains aggregate
+    // this is computed in CalcitePlanner while planning and is later required by subuery remove rule
+    // hence this is passed using HivePlannerContext
+    this.corrScalarRexSQWithAgg = corrScalarRexSQWithAgg;
   }
 
   public <T> T unwrap(Class<T> clazz) {
@@ -38,6 +45,9 @@ public class HivePlannerContext implements Context {
     if (clazz.isInstance(registry)) {
       return clazz.cast(registry);
     }
+    if(clazz.isInstance(corrScalarRexSQWithAgg)) {
+      return clazz.cast(corrScalarRexSQWithAgg);
+    }
     return null;
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java
index a373cdd..5ab36db 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java
@@ -231,7 +231,10 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
             .addRuleInstance(new AdjustProjectForCountAggregateRule(true))
             .addRuleInstance(FilterJoinRule.FILTER_ON_JOIN)
             .addRuleInstance(FilterProjectTransposeRule.INSTANCE)
-            .addRuleInstance(FilterCorrelateRule.INSTANCE)
+            // FilterCorrelateRule rule mistakenly pushes a FILTER, consiting of correlated vars,
+            // on top of LogicalCorrelate to within  left input for scalar corr queries
+            // which causes exception during decorrelation. This has been disabled for now.
+            //.addRuleInstance(FilterCorrelateRule.INSTANCE)
             .build();
 
     HepPlanner planner = createPlanner(program);

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
index f1e8ebd..564ef7a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
@@ -22,22 +22,25 @@ import org.apache.calcite.plan.RelOptRuleCall;
 import org.apache.calcite.plan.RelOptRuleOperand;
 import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Aggregate;
 import org.apache.calcite.rel.core.CorrelationId;
 import org.apache.calcite.rel.core.Filter;
 import org.apache.calcite.rel.core.JoinRelType;
-import org.apache.calcite.rel.core.RelFactories;
-import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.rex.LogicVisitor;
 import org.apache.calcite.rex.RexInputRef;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexShuttle;
 import org.apache.calcite.rex.RexSubQuery;
 import org.apache.calcite.rex.RexUtil;
+import org.apache.calcite.sql.SqlFunctionCategory;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+import org.apache.calcite.sql.SqlFunction;
 import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.type.InferTypes;
+import org.apache.calcite.sql.type.OperandTypes;
+import org.apache.calcite.sql.type.ReturnTypes;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.tools.RelBuilderFactory;
-import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.calcite.util.Pair;
 
 import com.google.common.collect.ImmutableList;
@@ -88,9 +91,11 @@ public abstract class HiveSubQueryRemoveRule extends RelOptRule{
                     final int fieldCount = builder.peek().getRowType().getFieldCount();
 
                     assert(filter instanceof HiveFilter);
+                    Set<RelNode> corrScalarQueries = filter.getCluster().getPlanner().getContext().unwrap(Set.class);
+                    boolean isCorrScalarQuery = corrScalarQueries.contains(e.rel);
 
                     final RexNode target = apply(e, ((HiveFilter)filter).getVariablesSet(e), logic,
-                            builder, 1, fieldCount);
+                            builder, 1, fieldCount, isCorrScalarQuery);
                     final RexShuttle shuttle = new ReplaceSubQueryShuttle(e, target);
                     builder.filter(shuttle.apply(filter.getCondition()));
                     builder.project(fields(builder, filter.getRowType().getFieldCount()));
@@ -104,21 +109,85 @@ public abstract class HiveSubQueryRemoveRule extends RelOptRule{
         super(operand, relBuilderFactory, description);
     }
 
+    // given a subquery it checks to see what is the aggegate function
+    /// if COUNT returns true since COUNT produces 0 on empty result set
+    private boolean isAggZeroOnEmpty(RexSubQuery e) {
+        //as this is corr scalar subquery with agg we expect one aggregate
+        assert(e.getKind() == SqlKind.SCALAR_QUERY);
+        assert(e.rel.getInputs().size() == 1);
+        Aggregate relAgg = (Aggregate)e.rel.getInput(0);
+        assert( relAgg.getAggCallList().size() == 1); //should only have one aggregate
+        if( relAgg.getAggCallList().get(0).getAggregation().getKind() == SqlKind.COUNT ) {
+            return true;
+        }
+        return false;
+    }
+    private SqlTypeName getAggTypeForScalarSub(RexSubQuery e) {
+        assert(e.getKind() == SqlKind.SCALAR_QUERY);
+        assert(e.rel.getInputs().size() == 1);
+        Aggregate relAgg = (Aggregate)e.rel.getInput(0);
+        assert( relAgg.getAggCallList().size() == 1); //should only have one aggregate
+        return relAgg.getAggCallList().get(0).getType().getSqlTypeName();
+    }
+
     protected RexNode apply(RexSubQuery e, Set<CorrelationId> variablesSet,
                             RelOptUtil.Logic logic,
-                            HiveSubQRemoveRelBuilder builder, int inputCount, int offset) {
+                            HiveSubQRemoveRelBuilder builder, int inputCount, int offset,
+                            boolean isCorrScalarAgg) {
         switch (e.getKind()) {
             case SCALAR_QUERY:
+                if(isCorrScalarAgg) {
+                    // Transformation :
+                    // Outer Query Left Join (inner query) on correlated predicate and preserve rows only from left side.
+                    builder.push(e.rel);
+                    final List<RexNode> parentQueryFields = new ArrayList<>();
+                    parentQueryFields.addAll(builder.fields());
+
+                    // id is appended since there could be multiple scalar subqueries and FILTER
+                    // is created using field name
+                    String indicator = "alwaysTrue" + e.rel.getId();
+                    parentQueryFields.add(builder.alias(builder.literal(true), indicator));
+                    builder.project(parentQueryFields);
+                    builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
+
+                    final ImmutableList.Builder<RexNode> operands = ImmutableList.builder();
+                    RexNode literal;
+                    if(isAggZeroOnEmpty(e)) {
+                        literal = builder.literal(0);
+                    }
+                    else {
+                        literal = e.rel.getCluster().getRexBuilder().makeNullLiteral(getAggTypeForScalarSub(e));
+                    }
+                    operands.add((builder.isNull(builder.field(indicator))), literal);
+                    operands.add(field(builder, 1, builder.fields().size()-2));
+                    return builder.call(SqlStdOperatorTable.CASE, operands.build());
+                }
+
+                //Transformation is to left join for correlated predicates and inner join otherwise,
+                // but do a count on inner side before that to make sure it generates atmost 1 row.
                 builder.push(e.rel);
-                final RelMetadataQuery mq = RelMetadataQuery.instance();
-                final Boolean unique = mq.areColumnsUnique(builder.peek(),
-                        ImmutableBitSet.of());
-                if (unique == null || !unique) {
-                    builder.aggregate(builder.groupKey(),
-                            builder.aggregateCall(SqlStdOperatorTable.SINGLE_VALUE, false, null,
-                                    null, builder.field(0)));
+                // returns single row/column
+                builder.aggregate(builder.groupKey(),
+                        builder.count(false, "cnt"));
+
+                SqlFunction countCheck = new SqlFunction("sq_count_check", SqlKind.OTHER_FUNCTION, ReturnTypes.BIGINT,
+                        InferTypes.RETURN_TYPE, OperandTypes.NUMERIC, SqlFunctionCategory.USER_DEFINED_FUNCTION);
+
+                // we create FILTER (sq_count_check(count()) <= 1) instead of PROJECT because RelFieldTrimmer
+                //  ends up getting rid of Project since it is not used further up the tree
+                builder.filter(builder.call(SqlStdOperatorTable.LESS_THAN_OR_EQUAL,
+                        builder.call(countCheck, builder.field("cnt")),
+                        builder.literal(1)));
+
+                if( !variablesSet.isEmpty())
+                {
+                    builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
                 }
+                else
+                    builder.join(JoinRelType.INNER, builder.literal(true), variablesSet);
+                builder.push(e.rel);
                 builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
+                offset++;
                 return field(builder, inputCount, offset);
 
             case IN:

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
index 8d2e535..a05b89c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException.UnsupportedFeature;
+import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSubquerySemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveExtractDate;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFloorDate;
 import org.apache.hadoop.hive.ql.parse.ParseUtils;
@@ -178,13 +179,13 @@ public class RexNodeConverter {
   }
 
   private RexNode convert(final ExprNodeSubQueryDesc subQueryDesc) throws  SemanticException {
-    if(subQueryDesc.getType() == ExprNodeSubQueryDesc.SubqueryType.IN) {
+    if(subQueryDesc.getType() == ExprNodeSubQueryDesc.SubqueryType.IN ) {
      /*
       * Check.5.h :: For In and Not In the SubQuery must implicitly or
       * explicitly only contain one select item.
       */
       if(subQueryDesc.getRexSubQuery().getRowType().getFieldCount() > 1) {
-        throw new SemanticException(ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(
+        throw new CalciteSubquerySemanticException(ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(
                 "SubQuery can contain only 1 item in Select List."));
       }
       //create RexNode for LHS
@@ -199,9 +200,19 @@ public class RexNodeConverter {
       RexNode subQueryNode = RexSubQuery.exists(subQueryDesc.getRexSubQuery());
       return subQueryNode;
     }
+    else if( subQueryDesc.getType() == ExprNodeSubQueryDesc.SubqueryType.SCALAR){
+      if(subQueryDesc.getRexSubQuery().getRowType().getFieldCount() > 1) {
+        throw new CalciteSubquerySemanticException(ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(
+                "SubQuery can contain only 1 item in Select List."));
+      }
+      //create RexSubQuery node
+      RexNode rexSubQuery = RexSubQuery.scalar(subQueryDesc.getRexSubQuery());
+      return rexSubQuery;
+    }
+
     else {
-      throw new SemanticException(ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(
-              "Currently only IN and EXISTS type of subqueries are supported"));
+      throw new CalciteSubquerySemanticException(ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(
+              "Invalid subquery: " + subQueryDesc.getType()));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 9f1b9d5..fd99ba6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -115,6 +115,7 @@ import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.calcite.util.ImmutableIntList;
 import org.apache.calcite.util.Pair;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.conf.Constants;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -135,16 +136,8 @@ import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
-import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
+import org.apache.hadoop.hive.ql.optimizer.calcite.*;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException.UnsupportedFeature;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveDefaultRelMetadataProvider;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HivePlannerContext;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRexExecutorImpl;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveTypeSystemImpl;
-import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
-import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveAlgorithmsConf;
 import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveVolcanoPlanner;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
@@ -423,13 +416,19 @@ public class CalcitePlanner extends SemanticAnalyzer {
               this.ctx.setCboInfo("Plan not optimized by CBO.");
             }
           }
-          if (!conf.getBoolVar(ConfVars.HIVE_IN_TEST) || isMissingStats
-              || e instanceof CalciteSemanticException) {
-            reAnalyzeAST = true;
+          if( e instanceof CalciteSubquerySemanticException) {
+            // non-cbo path retries to execute subqueries and throws completely different exception/error
+            // to eclipse the original error message
+            // so avoid executing subqueries on non-cbo
+            throw new SemanticException(e);
+          }
+          else if (!conf.getBoolVar(ConfVars.HIVE_IN_TEST) || isMissingStats
+              || e instanceof CalciteSemanticException ) {
+              reAnalyzeAST = true;
           } else if (e instanceof SemanticException) {
             // although, its likely to be a valid exception, we will retry
             // with cbo off anyway.
-            reAnalyzeAST = true;
+              reAnalyzeAST = true;
           } else if (e instanceof RuntimeException) {
             throw (RuntimeException) e;
           } else {
@@ -1199,6 +1198,10 @@ public class CalcitePlanner extends SemanticAnalyzer {
     // this will be used in RexNodeConverter to create cor var
     private int subqueryId;
 
+    // this is to keep track if a subquery is correlated and contains aggregate
+    // since this is special cased when it is rewritten in SubqueryRemoveRule
+    Set<RelNode> corrScalarRexSQWithAgg = new HashSet<RelNode>();
+
     // TODO: Do we need to keep track of RR, ColNameToPosMap for every op or
     // just last one.
     LinkedHashMap<RelNode, RowResolver>                   relToHiveRR                   = new LinkedHashMap<RelNode, RowResolver>();
@@ -1225,7 +1228,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
               conf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD);
       HiveAlgorithmsConf algorithmsConf = new HiveAlgorithmsConf(maxSplitSize, maxMemory);
       HiveRulesRegistry registry = new HiveRulesRegistry();
-      HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, registry);
+      HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, registry, corrScalarRexSQWithAgg);
       RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(confContext);
       final RexBuilder rexBuilder = cluster.getRexBuilder();
       final RelOptCluster optCluster = RelOptCluster.create(planner, rexBuilder);
@@ -2260,8 +2263,32 @@ public class CalcitePlanner extends SemanticAnalyzer {
       return filterRel;
     }
 
+    private boolean topLevelConjunctCheck(ASTNode searchCond, ObjectPair<Boolean, Integer> subqInfo) {
+      if( searchCond.getType() == HiveParser.KW_OR) {
+        subqInfo.setFirst(Boolean.TRUE);
+        if(subqInfo.getSecond() > 1) {
+          return false;
+        }
+      }
+      if( searchCond.getType() == HiveParser.TOK_SUBQUERY_EXPR) {
+        subqInfo.setSecond(subqInfo.getSecond() + 1);
+        if(subqInfo.getSecond()> 1 && subqInfo.getFirst()) {
+          return false;
+        }
+        return true;
+      }
+      for(int i=0; i<searchCond.getChildCount(); i++){
+          boolean validSubQuery = topLevelConjunctCheck((ASTNode)searchCond.getChild(i), subqInfo);
+          if(!validSubQuery) {
+            return false;
+          }
+      }
+      return true;
+    }
+
     private void subqueryRestrictionCheck(QB qb, ASTNode searchCond, RelNode srcRel,
-                                         boolean forHavingClause, Map<String, RelNode> aliasToRel ) throws SemanticException {
+                                         boolean forHavingClause, Map<String, RelNode> aliasToRel,
+                                          Set<ASTNode> corrScalarQueries) throws SemanticException {
         List<ASTNode> subQueriesInOriginalTree = SubQueryUtils.findSubQueries(searchCond);
 
         ASTNode clonedSearchCond = (ASTNode) SubQueryUtils.adaptor.dupTree(searchCond);
@@ -2275,8 +2302,19 @@ public class CalcitePlanner extends SemanticAnalyzer {
           ASTNode originalSubQueryAST = subQueriesInOriginalTree.get(i);
 
           ASTNode subQueryAST = subQueries.get(i);
+          //SubQueryUtils.rewriteParentQueryWhere(clonedSearchCond, subQueryAST);
+          Boolean orInSubquery = new Boolean(false);
+          Integer subqueryCount = new Integer(0);
+          ObjectPair<Boolean, Integer> subqInfo = new ObjectPair<Boolean, Integer>(false, 0);
+          if(!topLevelConjunctCheck(clonedSearchCond, subqInfo)){
+          /*
+           *  Restriction.7.h :: SubQuery predicates can appear only as top level conjuncts.
+           */
+
+            throw new CalciteSubquerySemanticException(ErrorMsg.UNSUPPORTED_SUBQUERY_EXPRESSION.getMsg(
+                    subQueryAST, "Only SubQuery expressions that are top level conjuncts are allowed"));
 
-          SubQueryUtils.rewriteParentQueryWhere(clonedSearchCond, subQueryAST);
+          }
 
           QBSubQuery subQuery = SubQueryUtils.buildSubQuery(qb.getId(), sqIdx, subQueryAST,
                   originalSubQueryAST, ctx);
@@ -2290,15 +2328,19 @@ public class CalcitePlanner extends SemanticAnalyzer {
             aliasToRel.put(havingInputAlias, srcRel);
           }
 
-          subQuery.subqueryRestrictionsCheck(inputRR, forHavingClause, havingInputAlias);
+          boolean isCorrScalarWithAgg = subQuery.subqueryRestrictionsCheck(inputRR, forHavingClause, havingInputAlias);
+          if(isCorrScalarWithAgg) {
+            corrScalarQueries.add(originalSubQueryAST);
+          }
       }
     }
     private boolean genSubQueryRelNode(QB qb, ASTNode node, RelNode srcRel, boolean forHavingClause,
                                        Map<ASTNode, RelNode> subQueryToRelNode,
                                        Map<String, RelNode> aliasToRel) throws SemanticException {
 
+        Set<ASTNode> corrScalarQueriesWithAgg = new HashSet<ASTNode>();
         //disallow subqueries which HIVE doesn't currently support
-        subqueryRestrictionCheck(qb, node, srcRel, forHavingClause, aliasToRel);
+        subqueryRestrictionCheck(qb, node, srcRel, forHavingClause, aliasToRel, corrScalarQueriesWithAgg);
         Deque<ASTNode> stack = new ArrayDeque<ASTNode>();
         stack.push(node);
 
@@ -2326,6 +2368,11 @@ public class CalcitePlanner extends SemanticAnalyzer {
               RelNode subQueryRelNode = genLogicalPlan(qbSQ, false,  relToHiveColNameCalcitePosMap.get(srcRel),
                       relToHiveRR.get(srcRel));
               subQueryToRelNode.put(next, subQueryRelNode);
+              //keep track of subqueries which are scalar, correlated and contains aggregate
+              // subquery expression. This will later be special cased in Subquery remove rule
+              if(corrScalarQueriesWithAgg.contains(next)) {
+                  corrScalarRexSQWithAgg.add(subQueryRelNode);
+              }
               isSubQuery = true;
               break;
             default:

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
index cd9adfc..b90210c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
@@ -397,12 +397,13 @@ atomExpression
     | floorExpression
     | caseExpression
     | whenExpression
+    | (LPAREN KW_SELECT)=> (subQueryExpression)
+        -> ^(TOK_SUBQUERY_EXPR TOK_SUBQUERY_OP subQueryExpression)
     | (functionName LPAREN) => function
     | tableOrColumn
     | LPAREN! expression RPAREN!
     ;
 
-
 precedenceFieldExpression
     :
     atomExpression ((LSQUARE^ expression RSQUARE!) | (DOT^ identifier))*
@@ -531,7 +532,7 @@ precedenceEqualExpressionSingle
        -> ^(KW_NOT ^(precedenceEqualNegatableOperator $precedenceEqualExpressionSingle $notExpr))
     | (precedenceEqualOperator equalExpr=precedenceBitwiseOrExpression)
        -> ^(precedenceEqualOperator $precedenceEqualExpressionSingle $equalExpr)
-    | (KW_NOT KW_IN LPAREN KW_SELECT)=>  (KW_NOT KW_IN subQueryExpression) 
+    | (KW_NOT KW_IN LPAREN KW_SELECT)=>  (KW_NOT KW_IN subQueryExpression)
        -> ^(KW_NOT ^(TOK_SUBQUERY_EXPR ^(TOK_SUBQUERY_OP KW_IN) subQueryExpression $precedenceEqualExpressionSingle))
     | (KW_NOT KW_IN expressions)
        -> ^(KW_NOT ^(TOK_FUNCTION KW_IN $precedenceEqualExpressionSingle expressions))
@@ -544,7 +545,8 @@ precedenceEqualExpressionSingle
     | ( KW_BETWEEN (min=precedenceBitwiseOrExpression) KW_AND (max=precedenceBitwiseOrExpression) )
        -> ^(TOK_FUNCTION Identifier["between"] KW_FALSE $left $min $max)
     )*
-    | (KW_EXISTS LPAREN KW_SELECT)=> (KW_EXISTS subQueryExpression) -> ^(TOK_SUBQUERY_EXPR ^(TOK_SUBQUERY_OP KW_EXISTS) subQueryExpression)
+    | (KW_EXISTS LPAREN KW_SELECT)=> (KW_EXISTS subQueryExpression) 
+	-> ^(TOK_SUBQUERY_EXPR ^(TOK_SUBQUERY_OP KW_EXISTS) subQueryExpression)
     ;
 
 expressions

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java
index 24381b9..7ca722a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSubquerySemanticException;
 import org.apache.hadoop.hive.ql.parse.SubQueryDiagnostic.QBSubQueryRewrite;
 import org.apache.hadoop.hive.ql.parse.SubQueryUtils.ISubQueryJoinInfo;
 import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory.DefaultExprProcessor;
@@ -43,9 +44,14 @@ public class QBSubQuery implements ISubQueryJoinInfo {
     EXISTS,
     NOT_EXISTS,
     IN,
-    NOT_IN;
+    NOT_IN,
+    SCALAR;
 
     public static SubQueryType get(ASTNode opNode) throws SemanticException {
+      if(opNode == null) {
+        return SCALAR;
+      }
+
       switch(opNode.getType()) {
       case HiveParser.KW_EXISTS:
         return EXISTS;
@@ -503,7 +509,14 @@ public class QBSubQuery implements ISubQueryJoinInfo {
         originalSQASTOrigin.getUsageNode());
   }
 
-  void subqueryRestrictionsCheck(RowResolver parentQueryRR,
+  /**
+   * @param parentQueryRR
+   * @param forHavingClause
+   * @param outerQueryAlias
+   * @return true if it is correlated scalar subquery with an aggregate
+   * @throws SemanticException
+   */
+  boolean subqueryRestrictionsCheck(RowResolver parentQueryRR,
                                  boolean forHavingClause,
                                  String outerQueryAlias)
           throws SemanticException {
@@ -540,42 +553,12 @@ public class QBSubQuery implements ISubQueryJoinInfo {
       hasAggreateExprs = hasAggreateExprs | ( r == 1 );
     }
 
-    /*
-     * Restriction.13.m :: In the case of an implied Group By on a
-     * correlated SubQuery, the SubQuery always returns 1 row.
-     * An exists on a SubQuery with an implied GBy will always return true.
-     * Whereas Algebraically transforming to a Join may not return true. See
-     * Specification doc for details.
-     * Similarly a not exists on a SubQuery with a implied GBY will always return false.
-     */
-    boolean noImplicityGby = true;
-    if ( insertClause.getChild(1).getChildCount() > 3 &&
-            insertClause.getChild(1).getChild(3).getType() == HiveParser.TOK_GROUPBY ) {
-      if((ASTNode) insertClause.getChild(1).getChild(3) != null){
-        noImplicityGby = false;
-      }
-    }
-    if ( operator.getType() == SubQueryType.EXISTS  &&
-            hasAggreateExprs &&
-            noImplicityGby) {
-      throw new SemanticException(ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(
-              subQueryAST,
-              "An Exists predicate on SubQuery with implicit Aggregation(no Group By clause) " +
-                      "cannot be rewritten. (predicate will always return true)."));
-    }
-    if ( operator.getType() == SubQueryType.NOT_EXISTS  &&
-            hasAggreateExprs &&
-            noImplicityGby) {
-      throw new SemanticException(ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(
-              subQueryAST,
-              "A Not Exists predicate on SubQuery with implicit Aggregation(no Group By clause) " +
-                      "cannot be rewritten. (predicate will always return false)."));
-    }
+
 
     ASTNode whereClause = SubQueryUtils.subQueryWhere(insertClause);
 
     if ( whereClause == null ) {
-      return;
+      return false;
     }
     ASTNode searchCond = (ASTNode) whereClause.getChild(0);
     List<ASTNode> conjuncts = new ArrayList<ASTNode>();
@@ -583,14 +566,23 @@ public class QBSubQuery implements ISubQueryJoinInfo {
 
     ConjunctAnalyzer conjunctAnalyzer = new ConjunctAnalyzer(parentQueryRR,
             forHavingClause, outerQueryAlias);
-    ASTNode sqNewSearchCond = null;
 
     boolean hasCorrelation = false;
+    boolean hasNonEquiJoinPred = false;
     for(ASTNode conjunctAST : conjuncts) {
       Conjunct conjunct = conjunctAnalyzer.analyzeConjunct(conjunctAST);
       if(conjunct.isCorrelated()){
        hasCorrelation = true;
-       break;
+      }
+      if ( conjunct.eitherSideRefersBoth() && conjunctAST.getType() != HiveParser.EQUAL) {
+        hasNonEquiJoinPred = true;
+      }
+    }
+    boolean noImplicityGby = true;
+    if ( insertClause.getChild(1).getChildCount() > 3 &&
+            insertClause.getChild(1).getChild(3).getType() == HiveParser.TOK_GROUPBY ) {
+      if((ASTNode) insertClause.getChild(1).getChild(3) != null){
+        noImplicityGby = false;
       }
     }
 
@@ -598,9 +590,43 @@ public class QBSubQuery implements ISubQueryJoinInfo {
      * Restriction.14.h :: Correlated Sub Queries cannot contain Windowing clauses.
      */
     if (  hasWindowing && hasCorrelation) {
-      throw new SemanticException(ErrorMsg.UNSUPPORTED_SUBQUERY_EXPRESSION.getMsg(
+      throw new CalciteSubquerySemanticException(ErrorMsg.UNSUPPORTED_SUBQUERY_EXPRESSION.getMsg(
               subQueryAST, "Correlated Sub Queries cannot contain Windowing clauses."));
     }
+
+    /*
+     * Restriction.13.m :: In the case of an implied Group By on a
+     * correlated SubQuery, the SubQuery always returns 1 row.
+     * An exists on a SubQuery with an implied GBy will always return true.
+     * Whereas Algebraically transforming to a Join may not return true. See
+     * Specification doc for details.
+     * Similarly a not exists on a SubQuery with a implied GBY will always return false.
+     */
+      if (hasAggreateExprs &&
+              noImplicityGby ) {
+
+        if( hasCorrelation && (operator.getType() == SubQueryType.EXISTS
+                || operator.getType() == SubQueryType.NOT_EXISTS
+                || operator.getType() == SubQueryType.IN
+                || operator.getType() == SubQueryType.NOT_IN)) {
+          throw new CalciteSubquerySemanticException(ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(
+                subQueryAST,
+                "A predicate on EXISTS/NOT EXISTS/IN/NOT IN SubQuery with implicit Aggregation(no Group By clause) " +
+                        "cannot be rewritten."));
+        }
+        else if(operator.getType() == SubQueryType.SCALAR && hasNonEquiJoinPred) {
+          // throw an error if predicates are not equal
+            throw new CalciteSubquerySemanticException(ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(
+                    subQueryAST,
+                    "Scalar subqueries with aggregate cannot have non-equi join predicate"));
+        }
+        else if(operator.getType() == SubQueryType.SCALAR && hasCorrelation) {
+            return true;
+        }
+
+      }
+
+    return false;
   }
 
   void validateAndRewriteAST(RowResolver outerQueryRR,

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index f275f6a..0b4b6e1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -9032,7 +9032,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     if (node.getPostJoinFilters().size() != 0) {
       // Safety check: if we are merging join operators and there are post-filtering
       // conditions, they cannot be outer joins
-      assert node.getNoOuterJoin() && target.getNoOuterJoin();
+      assert node.getNoOuterJoin() ;
+      if( target.getPostJoinFilters().size() != 0) {
+        assert target.getNoOuterJoin() ;
+      }
       for (ASTNode exprPostFilter : node.getPostJoinFilters()) {
         target.addPostJoinFilter(exprPostFilter);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
index 6c30efd..4aff56b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hive.ql.lib.Rule;
 import org.apache.hadoop.hive.ql.lib.RuleRegExp;
 import org.apache.hadoop.hive.ql.lib.ExpressionWalker;
 import org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcFactory;
+import org.apache.hadoop.hive.ql.optimizer.calcite.translator.TypeConverter;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnListDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
@@ -1414,10 +1415,11 @@ public class TypeCheckProcFactory {
 
       ASTNode subqueryOp = (ASTNode) expr.getChild(0);
 
-      boolean isIN = (subqueryOp.getChild(0).getType() == HiveParser.KW_IN
+      boolean isIN = (subqueryOp.getChildCount() > 0) && (subqueryOp.getChild(0).getType() == HiveParser.KW_IN
               || subqueryOp.getChild(0).getType() == HiveParser.TOK_SUBQUERY_OP_NOTIN);
-      boolean isEXISTS = (subqueryOp.getChild(0).getType() == HiveParser.KW_EXISTS
+      boolean isEXISTS = (subqueryOp.getChildCount() > 0) && (subqueryOp.getChild(0).getType() == HiveParser.KW_EXISTS
               || subqueryOp.getChild(0).getType() == HiveParser.TOK_SUBQUERY_OP_NOTEXISTS);
+      boolean isScalar = subqueryOp.getChildCount() == 0 ;
 
       // subqueryToRelNode might be null if subquery expression anywhere other than
       //  as expected in filter (where/having). We should throw an appropriate error
@@ -1430,25 +1432,38 @@ public class TypeCheckProcFactory {
                                 "Where and Having Clause predicates"));
       }
 
+      RelNode subqueryRel = subqueryToRelNode.get(expr);
+
       //For now because subquery is only supported in filter
       // we will create subquery expression of boolean type
       if(isEXISTS) {
-        return new ExprNodeSubQueryDesc(TypeInfoFactory.booleanTypeInfo, subqueryToRelNode.get(expr),
+        return new ExprNodeSubQueryDesc(TypeInfoFactory.booleanTypeInfo, subqueryRel,
                 ExprNodeSubQueryDesc.SubqueryType.EXISTS);
       }
-      if(isIN) {
+      else if(isIN) {
         assert(nodeOutputs[2] != null);
         ExprNodeDesc lhs = (ExprNodeDesc)nodeOutputs[2];
-        return new ExprNodeSubQueryDesc(TypeInfoFactory.booleanTypeInfo, subqueryToRelNode.get(expr),
+        return new ExprNodeSubQueryDesc(TypeInfoFactory.booleanTypeInfo, subqueryRel,
                 ExprNodeSubQueryDesc.SubqueryType.IN, lhs);
       }
+      else if(isScalar){
+        // only single subquery expr is supported
+        if(subqueryRel.getRowType().getFieldCount() != 1) {
+            throw new SemanticException(ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(
+                    "More than one column expression in subquery"));
+        }
+        // figure out subquery expression column's type
+        TypeInfo subExprType = TypeConverter.convert(subqueryRel.getRowType().getFieldList().get(0).getType());
+        return new ExprNodeSubQueryDesc(subExprType, subqueryRel,
+                ExprNodeSubQueryDesc.SubqueryType.SCALAR);
+      }
 
       /*
        * Restriction.1.h :: SubQueries only supported in the SQL Where Clause.
        */
       ctx.setError(ErrorMsg.UNSUPPORTED_SUBQUERY_EXPRESSION.getMsg(sqNode,
-          "Currently only IN & EXISTS SubQuery expressions are allowed"),
-          sqNode);
+              "Currently only IN & EXISTS SubQuery expressions are allowed"),
+              sqNode);
       return null;
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeSubQueryDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeSubQueryDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeSubQueryDesc.java
index aec331b..462e730 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeSubQueryDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeSubQueryDesc.java
@@ -35,11 +35,9 @@ public class ExprNodeSubQueryDesc extends ExprNodeDesc implements Serializable {
   public static enum SubqueryType{
     IN,
     EXISTS,
+    SCALAR
   };
 
-  public static final int IN=1;
-  public static final int EXISTS=2;
-
   /**
    * RexNode corresponding to subquery.
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSQCountCheck.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSQCountCheck.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSQCountCheck.java
new file mode 100644
index 0000000..53e6231
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSQCountCheck.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.udf.generic;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncAbsDecimalToDecimal;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncAbsDoubleToDouble;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncAbsLongToLong;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.io.LongWritable;
+
+/**
+ * GenericUDFAbs.
+ *
+ */
+@Description(name = "sq_count_check",
+    value = "_FUNC_(x) - Internal check on scalar subquery expression to make sure atmost one row is returned",
+    extended = "For internal use only")
+@VectorizedExpressions({FuncAbsLongToLong.class, FuncAbsDoubleToDouble.class, FuncAbsDecimalToDecimal.class})
+public class GenericUDFSQCountCheck extends GenericUDF {
+  private final LongWritable resultLong = new LongWritable();
+  private transient Converter[] converters = new Converter[1];
+
+  @Override
+  public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
+    if (arguments.length != 1) {
+      throw new UDFArgumentLengthException(
+          "Invalid scalar subquery expression. Subquery count check expected one argument but received: " + arguments.length);
+    }
+
+    converters[0] = ObjectInspectorConverters.getConverter(arguments[0],
+            PrimitiveObjectInspectorFactory.writableLongObjectInspector);
+
+    ObjectInspector outputOI = null;
+    outputOI = PrimitiveObjectInspectorFactory.writableLongObjectInspector;
+    return outputOI;
+  }
+
+  @Override
+  public Object evaluate(DeferredObject[] arguments) throws HiveException {
+    Object valObject = arguments[0].get();
+    assert(valObject != null);
+    Long val = getLongValue(arguments, 0, converters);
+    assert(val >= 0);
+    if(val > 1) {
+      throw new UDFArgumentException(
+              " Scalar subquery expression returns more than one row.");
+    }
+
+    resultLong.set(val);
+    return resultLong;
+  }
+
+  @Override
+  protected String getFuncName() {
+    return "sq_count_check";
+  }
+
+  @Override
+  public String getDisplayString(String[] children) {
+    return getStandardDisplayString(getFuncName(), children);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
index 44e157b..7229cc7 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
@@ -61,7 +61,7 @@ public class TestCBORuleFiredOnlyOnce {
 
     // Create rules registry to not trigger a rule more than once
     HiveRulesRegistry registry = new HiveRulesRegistry();
-    HivePlannerContext context = new HivePlannerContext(null, registry);
+    HivePlannerContext context = new HivePlannerContext(null, registry, null);
     HepPlanner planner = new HepPlanner(programBuilder.build(), context);
 
     // Cluster

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientnegative/subquery_exists_implicit_gby.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/subquery_exists_implicit_gby.q b/ql/src/test/queries/clientnegative/subquery_exists_implicit_gby.q
index 9013df6..497f3c2 100644
--- a/ql/src/test/queries/clientnegative/subquery_exists_implicit_gby.q
+++ b/ql/src/test/queries/clientnegative/subquery_exists_implicit_gby.q
@@ -1,6 +1,6 @@
 
 
-select * 
+select *
 from src b 
 where exists 
   (select count(*) 

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientnegative/subquery_in_implicit_gby.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/subquery_in_implicit_gby.q b/ql/src/test/queries/clientnegative/subquery_in_implicit_gby.q
new file mode 100644
index 0000000..338747e
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/subquery_in_implicit_gby.q
@@ -0,0 +1 @@
+explain select * from part where p_partkey IN (select count(*) from part pp where pp.p_type = part.p_type);

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientnegative/subquery_notexists_implicit_gby.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/subquery_notexists_implicit_gby.q b/ql/src/test/queries/clientnegative/subquery_notexists_implicit_gby.q
index 852b295..57c72d2 100644
--- a/ql/src/test/queries/clientnegative/subquery_notexists_implicit_gby.q
+++ b/ql/src/test/queries/clientnegative/subquery_notexists_implicit_gby.q
@@ -1,6 +1,6 @@
 
 
-select * 
+select *
 from src b 
 where not exists 
   (select sum(1)

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientnegative/subquery_scalar_implicit_gby.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/subquery_scalar_implicit_gby.q b/ql/src/test/queries/clientnegative/subquery_scalar_implicit_gby.q
new file mode 100644
index 0000000..318468f
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/subquery_scalar_implicit_gby.q
@@ -0,0 +1 @@
+select * from part where p_size <> (select count(p_size) from part pp where part.p_type <> pp.p_type);

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientnegative/subquery_scalar_multi_columns.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/subquery_scalar_multi_columns.q b/ql/src/test/queries/clientnegative/subquery_scalar_multi_columns.q
new file mode 100644
index 0000000..1f6ad1a
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/subquery_scalar_multi_columns.q
@@ -0,0 +1,2 @@
+set hive.mapred.mode=nonstrict;
+select p_name from part where p_size > (select p_size, p_type from part);

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientnegative/subquery_scalar_multi_rows.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/subquery_scalar_multi_rows.q b/ql/src/test/queries/clientnegative/subquery_scalar_multi_rows.q
new file mode 100644
index 0000000..14b68b9
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/subquery_scalar_multi_rows.q
@@ -0,0 +1,2 @@
+set hive.mapred.mode=nonstrict;
+select p_name from part where p_size > (select p_size from part);

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientnegative/subquery_with_or_cond.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/subquery_with_or_cond.q b/ql/src/test/queries/clientnegative/subquery_with_or_cond.q
index c2c3221..24ab9ba 100644
--- a/ql/src/test/queries/clientnegative/subquery_with_or_cond.q
+++ b/ql/src/test/queries/clientnegative/subquery_with_or_cond.q
@@ -1,5 +1,5 @@
 
 select count(*) 
 from src 
-where src.key in (select key from src s1 where s1.key > '9') or src.value is not null
+where src.key in (select key from src s1 where s1.key > '9') or src.value is not null or exists(select key from src);
 ;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientpositive/cbo_subq_not_in.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_subq_not_in.q b/ql/src/test/queries/clientpositive/cbo_subq_not_in.q
index 8e78225..e8e3dc2 100644
--- a/ql/src/test/queries/clientpositive/cbo_subq_not_in.q
+++ b/ql/src/test/queries/clientpositive/cbo_subq_not_in.q
@@ -35,15 +35,6 @@ part where part.p_size not in
   ) order by p_name
 ;
 
--- agg, corr
-select p_mfgr, p_name, p_size 
-from part b where b.p_size not in 
-  (select min(p_size) 
-  from (select p_mfgr, p_size from part) a 
-  where p_size < 10 and b.p_mfgr = a.p_mfgr
-  ) order by  p_name
-;
-
 -- non agg, non corr, Group By in Parent Query
 select li.l_partkey, count(*) 
 from lineitem li 

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientpositive/perf/query1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/perf/query1.q b/ql/src/test/queries/clientpositive/perf/query1.q
new file mode 100644
index 0000000..d40f66a
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/perf/query1.q
@@ -0,0 +1,24 @@
+set hive.mapred.mode=nonstrict;
+
+explain with customer_total_return as
+(select sr_customer_sk as ctr_customer_sk
+,sr_store_sk as ctr_store_sk
+,sum(SR_FEE) as ctr_total_return
+from store_returns
+,date_dim
+where sr_returned_date_sk = d_date_sk
+and d_year =2000
+group by sr_customer_sk
+,sr_store_sk)
+ select  c_customer_id
+from customer_total_return ctr1
+,store
+,customer
+where ctr1.ctr_total_return > (select avg(ctr_total_return)*1.2
+from customer_total_return ctr2
+where ctr1.ctr_store_sk = ctr2.ctr_store_sk)
+and s_store_sk = ctr1.ctr_store_sk
+and s_state = 'NM'
+and ctr1.ctr_customer_sk = c_customer_sk
+order by c_customer_id
+limit 100;

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientpositive/perf/query23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/perf/query23.q b/ql/src/test/queries/clientpositive/perf/query23.q
index e8ebd86..631d5a0 100644
--- a/ql/src/test/queries/clientpositive/perf/query23.q
+++ b/ql/src/test/queries/clientpositive/perf/query23.q
@@ -26,23 +26,26 @@ explain with frequent_ss_items as
       ,customer
   where ss_customer_sk = c_customer_sk
   group by c_customer_sk
-  having sum(ss_quantity*ss_sales_price) > (95/100.0))
+  having sum(ss_quantity*ss_sales_price) > (95/100.0) * (select
+  *
+from
+ max_store_sales))
   select  sum(sales)
- from (select cs_quantity*cs_list_price sales
+ from ((select cs_quantity*cs_list_price sales
        from catalog_sales
            ,date_dim 
        where d_year = 1999 
          and d_moy = 1 
          and cs_sold_date_sk = d_date_sk 
          and cs_item_sk in (select item_sk from frequent_ss_items)
-         and cs_bill_customer_sk in (select c_customer_sk from best_ss_customer)
+         and cs_bill_customer_sk in (select c_customer_sk from best_ss_customer))
       union all
-      select ws_quantity*ws_list_price sales
+      (select ws_quantity*ws_list_price sales
        from web_sales 
            ,date_dim 
        where d_year = 1999 
          and d_moy = 1 
          and ws_sold_date_sk = d_date_sk 
          and ws_item_sk in (select item_sk from frequent_ss_items)
-         and ws_bill_customer_sk in (select c_customer_sk from best_ss_customer)) y
+         and ws_bill_customer_sk in (select c_customer_sk from best_ss_customer))) y
  limit 100;

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientpositive/perf/query30.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/perf/query30.q b/ql/src/test/queries/clientpositive/perf/query30.q
new file mode 100644
index 0000000..25c9e07
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/perf/query30.q
@@ -0,0 +1,30 @@
+set hive.mapred.mode=nonstrict;
+
+explain with customer_total_return as
+ (select wr_returning_customer_sk as ctr_customer_sk
+        ,ca_state as ctr_state, 
+ 	sum(wr_return_amt) as ctr_total_return
+ from web_returns
+     ,date_dim
+     ,customer_address
+ where wr_returned_date_sk = d_date_sk 
+   and d_year =2002
+   and wr_returning_addr_sk = ca_address_sk 
+ group by wr_returning_customer_sk
+         ,ca_state)
+  select  c_customer_id,c_salutation,c_first_name,c_last_name,c_preferred_cust_flag
+       ,c_birth_day,c_birth_month,c_birth_year,c_birth_country,c_login,c_email_address
+       ,c_last_review_date,ctr_total_return
+ from customer_total_return ctr1
+     ,customer_address
+     ,customer
+ where ctr1.ctr_total_return > (select avg(ctr_total_return)*1.2
+ 			  from customer_total_return ctr2 
+                  	  where ctr1.ctr_state = ctr2.ctr_state)
+       and ca_address_sk = c_current_addr_sk
+       and ca_state = 'IL'
+       and ctr1.ctr_customer_sk = c_customer_sk
+ order by c_customer_id,c_salutation,c_first_name,c_last_name,c_preferred_cust_flag
+                  ,c_birth_day,c_birth_month,c_birth_year,c_birth_country,c_login,c_email_address
+                  ,c_last_review_date,ctr_total_return
+limit 100;

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientpositive/perf/query6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/perf/query6.q b/ql/src/test/queries/clientpositive/perf/query6.q
new file mode 100644
index 0000000..8e4a7d3
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/perf/query6.q
@@ -0,0 +1,25 @@
+set hive.mapred.mode=nonstrict;
+
+explain select  a.ca_state state, count(*) cnt
+ from customer_address a
+     ,customer c
+     ,store_sales s
+     ,date_dim d
+     ,item i
+ where       a.ca_address_sk = c.c_current_addr_sk
+ 	and c.c_customer_sk = s.ss_customer_sk
+ 	and s.ss_sold_date_sk = d.d_date_sk
+ 	and s.ss_item_sk = i.i_item_sk
+ 	and d.d_month_seq = 
+ 	     (select distinct (d_month_seq)
+ 	      from date_dim
+               where d_year = 2000
+ 	        and d_moy = 2 )
+ 	and i.i_current_price > 1.2 * 
+             (select avg(j.i_current_price) 
+ 	     from item j 
+ 	     where j.i_category = i.i_category)
+ group by a.ca_state
+ having count(*) >= 10
+ order by cnt 
+ limit 100;

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientpositive/perf/query81.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/perf/query81.q b/ql/src/test/queries/clientpositive/perf/query81.q
new file mode 100644
index 0000000..77de348
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/perf/query81.q
@@ -0,0 +1,30 @@
+set hive.mapred.mode=nonstrict;
+
+explain with customer_total_return as
+ (select cr_returning_customer_sk as ctr_customer_sk
+        ,ca_state as ctr_state, 
+ 	sum(cr_return_amt_inc_tax) as ctr_total_return
+ from catalog_returns
+     ,date_dim
+     ,customer_address
+ where cr_returned_date_sk = d_date_sk 
+   and d_year =1998
+   and cr_returning_addr_sk = ca_address_sk 
+ group by cr_returning_customer_sk
+         ,ca_state )
+  select  c_customer_id,c_salutation,c_first_name,c_last_name,ca_street_number,ca_street_name
+                   ,ca_street_type,ca_suite_number,ca_city,ca_county,ca_state,ca_zip,ca_country,ca_gmt_offset
+                  ,ca_location_type,ctr_total_return
+ from customer_total_return ctr1
+     ,customer_address
+     ,customer
+ where ctr1.ctr_total_return > (select avg(ctr_total_return)*1.2
+ 			  from customer_total_return ctr2 
+                  	  where ctr1.ctr_state = ctr2.ctr_state)
+       and ca_address_sk = c_current_addr_sk
+       and ca_state = 'IL'
+       and ctr1.ctr_customer_sk = c_customer_sk
+ order by c_customer_id,c_salutation,c_first_name,c_last_name,ca_street_number,ca_street_name
+                   ,ca_street_type,ca_suite_number,ca_city,ca_county,ca_state,ca_zip,ca_country,ca_gmt_offset
+                  ,ca_location_type,ctr_total_return
+ limit 100;

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientpositive/subquery_exists.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/subquery_exists.q b/ql/src/test/queries/clientpositive/subquery_exists.q
index cac8e1b..0dfab7a 100644
--- a/ql/src/test/queries/clientpositive/subquery_exists.q
+++ b/ql/src/test/queries/clientpositive/subquery_exists.q
@@ -76,3 +76,12 @@ where exists
   where a.value > 'val_9'
   );
 
+-- uncorr, aggregate in sub which produces result irrespective of zero rows
+create table t(i int);
+insert into t values(1);
+insert into t values(0);
+
+explain select * from t where exists (select count(*) from src where 1=2);
+select * from t where exists (select count(*) from src where 1=2);
+
+drop table t;

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientpositive/subquery_in.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/subquery_in.q b/ql/src/test/queries/clientpositive/subquery_in.q
index fe0c9c8..7293c77 100644
--- a/ql/src/test/queries/clientpositive/subquery_in.q
+++ b/ql/src/test/queries/clientpositive/subquery_in.q
@@ -55,24 +55,6 @@ part where part.p_size in
 	)
 ;
 
--- agg, corr
-explain
-select p_mfgr, p_name, p_size 
-from part b where b.p_size in 
-	(select min(p_size) 
-	 from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a 
-	 where r <= 2 and b.p_mfgr = a.p_mfgr
-	)
-;
-
-select p_mfgr, p_name, p_size 
-from part b where b.p_size in 
-	(select min(p_size) 
-	 from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a 
-	 where r <= 2 and b.p_mfgr = a.p_mfgr
-	)
-;
-
 -- distinct, corr
 explain 
 select * 
@@ -127,8 +109,8 @@ select sum(l_extendedprice) from lineitem, part where p_partkey = l_partkey and
 
 
 --where has multiple conjuction
-explain select * from part where p_brand <> 'Brand#14' AND p_size IN (select min(p_size) from part p where p.p_type = part.p_type group by p_type) AND p_size <> 340;
-select * from part where p_brand <> 'Brand#14' AND p_size IN (select min(p_size) from part p where p.p_type = part.p_type group by p_type) AND p_size <> 340;
+explain select * from part where p_brand <> 'Brand#14' AND p_size IN (select (p_size) from part p where p.p_type = part.p_type group by p_size) AND p_size <> 340;
+select * from part where p_brand <> 'Brand#14' AND p_size IN (select (p_size) from part p where p.p_type = part.p_type group by p_size) AND p_size <> 340;
 
 --lhs contains non-simple expression
 explain select * from part  where (p_size-1) IN (select min(p_size) from part group by p_type);
@@ -206,8 +188,18 @@ explain select p_partkey from
 select p_partkey from 
 	(select p_size, p_partkey from part where p_name in (select p.p_name from part p left outer join part pp on p.p_type = pp.p_type where pp.p_size = part.p_size)) subq;
 
+create table t(i int);
+insert into t values(1);
+insert into t values(0);
 
 create table tempty(i int);
+
+-- uncorr sub with aggregate which produces result irrespective of zero rows
+explain select * from t where i IN (select count(*) from tempty);
+select * from t where i IN (select count(*) from tempty);
+
+drop table t;
+
 create table tnull(i int);
 insert into tnull values(NULL) , (NULL);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientpositive/subquery_in_having.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/subquery_in_having.q b/ql/src/test/queries/clientpositive/subquery_in_having.q
index 40b7e32..0bf8657 100644
--- a/ql/src/test/queries/clientpositive/subquery_in_having.q
+++ b/ql/src/test/queries/clientpositive/subquery_in_having.q
@@ -35,14 +35,6 @@ group by key
 having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key )
 ;
 
--- non agg, corr
-explain
- select key, value, count(*) 
-from src b
-group by key, value
-having count(*) in (select count(*) from src s1 where s1.key > '9'  and s1.value = b.value group by s1.key )
-;
-
 set hive.optimize.correlation=false;
 
 -- agg, non corr
@@ -125,13 +117,13 @@ select key, value, count(*)
 from src b
 where b.key in (select key from src where src.value = b.value)
 group by key, value
-having count(*) in (select count(*) from src s1 where s1.key > '9' and s1.value = b.value group by s1.key )
+having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.key )
 ;
 select key, value, count(*)
 from src b
 where b.key in (select key from src where src.value = b.value)
 group by key, value
-having count(*) in (select count(*) from src s1 where s1.key > '9' and s1.value = b.value group by s1.key )
+having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.key )
 ;
 
 -- non agg, non corr, windowing

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientpositive/subquery_notin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/subquery_notin.q b/ql/src/test/queries/clientpositive/subquery_notin.q
index f9b5405..4181336 100644
--- a/ql/src/test/queries/clientpositive/subquery_notin.q
+++ b/ql/src/test/queries/clientpositive/subquery_notin.q
@@ -57,25 +57,6 @@ part where part.p_size not in
 order by p_name, p_size
 ;
 
--- agg, corr
-explain
-select p_mfgr, p_name, p_size 
-from part b where b.p_size not in 
-  (select min(p_size) 
-  from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a 
-  where r <= 2 and b.p_mfgr = a.p_mfgr
-  )
-;
-
-select p_mfgr, p_name, p_size 
-from part b where b.p_size not in 
-  (select min(p_size) 
-  from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a 
-  where r <= 2 and b.p_mfgr = a.p_mfgr
-  )
-order by p_mfgr, p_size
-;
-
 -- non agg, non corr, Group By in Parent Query
 select li.l_partkey, count(*)
 from lineitem li
@@ -106,8 +87,8 @@ select *
 from T1_v where T1_v.key not in (select T2_v.key from T2_v);
 
 --where has multiple conjuction
-explain select * from part where p_brand <> 'Brand#14' AND p_size NOT IN (select min(p_size) from part p where p.p_type = part.p_type group by p_type) AND p_size <> 340;
-select * from part where p_brand <> 'Brand#14' AND p_size NOT IN (select min(p_size) from part p where p.p_type = part.p_type group by p_type) AND p_size <> 340;
+explain select * from part where p_brand <> 'Brand#14' AND p_size NOT IN (select (p_size*p_size) from part p where p.p_type = part.p_type ) AND p_size <> 340;
+select * from part where p_brand <> 'Brand#14' AND p_size NOT IN (select (p_size*p_size) from part p where p.p_type = part.p_type ) AND p_size <> 340;
 
 --lhs contains non-simple expression
 explain select * from part  where (p_size-1) NOT IN (select min(p_size) from part group by p_type);
@@ -132,20 +113,20 @@ explain select * from part where p_name NOT IN (select p_name from part p where
 select * from part where p_name NOT IN (select p_name from part p where p.p_size = part.p_size AND part.p_partkey= p.p_partkey );
 
 -- correlated var refers to outer table alias
-explain select p_name from (select p_name, p_type, p_brand as brand from part) fpart where fpart.p_type NOT IN (select p_type from part where part.p_brand = fpart.brand);
-select p_name from (select p_name, p_type, p_brand as brand from part) fpart where fpart.p_type NOT IN (select p_type from part where part.p_brand = fpart.brand);
+explain select p_name from (select p_name, p_type, p_brand as brand from part) fpart where fpart.p_type NOT IN (select p_type+2 from part where part.p_brand = fpart.brand);
+select p_name from (select p_name, p_type, p_brand as brand from part) fpart where fpart.p_type NOT IN (select p_type+2 from part where part.p_brand = fpart.brand);
  
 -- correlated var refers to outer table alias which is an expression 
 explain select p_name from (select p_name, p_type, p_size+1 as size from part) fpart where fpart.p_type NOT IN (select p_type from part where (part.p_size+1) = fpart.size);
-select p_name from (select p_name, p_type, p_size+1 as size from part) fpart where fpart.p_type NOT IN (select p_type from part where (part.p_size+1) = fpart.size);
+select p_name from (select p_name, p_type, p_size+1 as size from part) fpart where fpart.p_type NOT IN (select p_type from part where (part.p_size+1) = fpart.size+1);
 
 -- where plus having
-explain select key, count(*) from src where value NOT IN (select value from src) group by key having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key );
-select key, count(*) from src where value NOT IN (select value from src) group by key having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key );
+explain select key, count(*) from src where value NOT IN (select key from src) group by key having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key );
+select key, count(*) from src where value NOT IN (select key from src) group by key having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key );
 
 -- where with having, correlated
-explain select key, count(*) from src where value NOT IN (select value from src sc where sc.key = src.key ) group by key having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key );
-select key, count(*) from src where value NOT IN (select value from src sc where sc.key = src.key ) group by key having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key );
+explain select key, count(*) from src where value NOT IN (select concat('v', value) from src sc where sc.key = src.key ) group by key having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key );
+select key, count(*) from src where value NOT IN (select concat('v', value) from src sc where sc.key = src.key ) group by key having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key );
 
 -- subquery with order by
 explain select * from part  where (p_size-1) NOT IN (select min(p_size) from part group by p_type) order by p_brand;

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientpositive/subquery_scalar.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/subquery_scalar.q b/ql/src/test/queries/clientpositive/subquery_scalar.q
new file mode 100644
index 0000000..97d34c3
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/subquery_scalar.q
@@ -0,0 +1,177 @@
+set hive.mapred.mode=nonstrict;
+set hive.explain.user=false;
+
+create table tnull(i int, c char(2));
+insert into tnull values(NULL, NULL), (NULL, NULL);
+
+create table tempty(c char(2));
+
+CREATE TABLE part_null(
+    p_partkey INT,
+    p_name STRING,
+    p_mfgr STRING,
+    p_brand STRING,
+    p_type STRING,
+    p_size INT,
+    p_container STRING,
+    p_retailprice DOUBLE,
+    p_comment STRING
+)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY ","
+;
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny_nulls.txt' overwrite into table part_null;
+
+insert into part_null values(78487,NULL,'Manufacturer#6','Brand#52','LARGE BRUSHED BRASS', 23, 'MED BAG',1464.48,'hely blith');
+
+
+-- non corr, simple less than  
+explain select * from part where p_size > (select avg(p_size) from part_null);
+select * from part where p_size > (select avg(p_size) from part_null);
+
+-- non corr, empty
+select * from part where p_size > (select * from tempty);
+explain select * from part where p_size > (select * from tempty);
+
+-- non corr, null comparison
+explain select * from part where p_name = (select p_name from part_null where p_name is null);
+select * from part where p_name = (select p_name from part_null where p_name is null);
+
+-- non corr, is null 
+explain select * from part where (select i from tnull limit 1) is null;
+select * from part where (select i from tnull limit 1) is null;
+
+-- non corr, is not null
+explain select * from part where (select max(p_name) from part_null) is not null;
+select * from part where (select max(p_name) from part_null) is not null;
+
+-- non corr, between
+explain select * from part where p_size between (select min(p_size) from part) and (select avg(p_size) from part);
+select * from part where p_size between (select min(p_size) from part) and (select avg(p_size) from part);
+
+-- non corr, windowing
+explain select p_mfgr, p_name, p_size from part 
+where part.p_size > 
+  (select first_value(p_size) over(partition by p_mfgr order by p_size) as fv from part order by fv limit 1);
+select p_mfgr, p_name, p_size from part 
+where part.p_size > 
+  (select first_value(p_size) over(partition by p_mfgr order by p_size) as fv from part order by fv limit 1);
+
+
+-- lhs contain complex expressions
+explain select * from part where (p_partkey*p_size) <> (select min(p_partkey) from part);
+select * from part where (p_partkey*p_size) <> (select min(p_partkey) from part);
+
+-- corr, lhs contain complex expressions
+explain select count(*) as c from part as e where p_size + 100 < (select max(p_partkey) from part where p_name = e.p_name);
+select count(*) as c from part as e where p_size + 100 < (select max(p_partkey) from part where p_name = e.p_name);
+
+
+-- corr, equi-join predicate
+explain select * from part where p_size > (select avg(p_size) from part_null where part_null.p_type = part.p_type);
+select * from part where p_size > (select avg(p_size) from part_null where part_null.p_type = part.p_type);
+
+-- mix of corr and uncorr
+explain select * from part where p_size BETWEEN (select min(p_size) from part_null where part_null.p_type = part.p_type) AND (select max(p_size) from part_null);
+select * from part where p_size BETWEEN (select min(p_size) from part_null where part_null.p_type = part.p_type) AND (select max(p_size) from part_null);
+
+-- mix of corr and uncorr
+explain select * from part where p_size >= (select min(p_size) from part_null where part_null.p_type = part.p_type) AND p_retailprice <= (select max(p_retailprice) from part_null);
+select * from part where p_size >= (select min(p_size) from part_null where part_null.p_type = part.p_type) AND p_retailprice <= (select max(p_retailprice) from part_null);
+
+-- mix of scalar and IN corr 
+explain select * from part where p_brand <> (select min(p_brand) from part ) AND p_size IN (select (p_size) from part p where p.p_type = part.p_type ) AND p_size <> 340;
+select * from part where p_brand <> (select min(p_brand) from part ) AND p_size IN (select (p_size) from part p where p.p_type = part.p_type ) AND p_size <> 340;
+
+-- multiple corr var with scalar query
+explain select * from part where p_size <> (select count(p_name) from part p where p.p_size = part.p_size AND part.p_partkey= p.p_partkey );
+select * from part where p_size <> (select count(p_name) from part p where p.p_size = part.p_size AND part.p_partkey= p.p_partkey );
+
+-- where + having
+explain select key, count(*) from src where value <> (select max(value) from src) group by key having count(*) > (select count(*) from src s1 where s1.key = '90' group by s1.key );
+select key, count(*) from src where value <> (select max(value) from src) group by key having count(*) > (select count(*) from src s1 where s1.key = '90' group by s1.key );
+
+-- scalar subquery with INTERSECT
+explain select * from part where p_size > (select count(p_name) from part INTERSECT select count(p_brand) from part);
+select * from part where p_size > (select count(p_name) from part INTERSECT select count(p_brand) from part);
+
+-- join in subquery
+explain select p_partkey from part where p_name like (select max(p.p_name) from part p left outer join part pp on p.p_type = pp.p_type where pp.p_size = part.p_size);
+select p_partkey from part where p_name like (select max(p.p_name) from part p left outer join part pp on p.p_type = pp.p_type where pp.p_size = part.p_size);
+
+-- mix of NOT IN and scalar
+explain select * from part_null where p_name NOT LIKE (select min(p_name) from part_null) AND p_brand NOT IN (select p_name from part);
+select * from part_null where p_name NOT LIKE (select min(p_name) from part_null) AND p_brand NOT IN (select p_name from part);
+
+-- mix of NOT IN and corr scalar
+explain select * from part_null where p_brand NOT IN (select p_name from part) AND p_name NOT LIKE (select min(p_name) from part_null pp where part_null.p_type = pp.p_type);
+select * from part_null where p_brand NOT IN (select p_name from part) AND p_name NOT LIKE (select min(p_name) from part_null pp where part_null.p_type = pp.p_type);
+
+-- non corr, with join in parent query
+explain select p.p_partkey, li.l_suppkey 
+from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
+where li.l_linenumber = 1 and
+li.l_orderkey <> (select min(l_orderkey) from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber)
+;
+select p.p_partkey, li.l_suppkey 
+from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
+where li.l_linenumber = 1 and
+li.l_orderkey <> (select min(l_orderkey) from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber)
+;
+
+-- corr, with join in outer query
+explain select p.p_partkey, li.l_suppkey 
+from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
+where li.l_linenumber = 1 and
+ li.l_orderkey <> (select min(l_orderkey) from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber);
+select p.p_partkey, li.l_suppkey 
+from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
+where li.l_linenumber = 1 and
+li.l_orderkey <> (select min(l_orderkey) from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber);
+
+-- corr, aggregate in outer
+explain select sum(l_extendedprice) from lineitem, part where p_partkey = l_partkey and l_quantity > (select avg(l_quantity) from lineitem where l_partkey = p_partkey);
+select sum(l_extendedprice) from lineitem, part where p_partkey = l_partkey and l_quantity > (select avg(l_quantity) from lineitem where l_partkey = p_partkey);
+
+-- nested with scalar
+explain select * from part_null where p_name IN (select p_name from part where part.p_type = part_null.p_type AND p_brand NOT LIKE (select min(p_brand) from part pp where part.p_type = pp.p_type));
+select * from part_null where p_name IN (select p_name from part where part.p_type = part_null.p_type AND p_brand NOT LIKE (select min(p_brand) from part pp where part.p_type = pp.p_type));
+
+drop table tnull;
+drop table part_null;
+drop table tempty;
+
+
+create table EMPS(EMPNO int,NAME string,DEPTNO int,GENDER string,CITY string,EMPID int,AGE int,SLACKER boolean,MANAGER boolean,JOINEDAT date);
+
+insert into EMPS values (100,'Fred',10,NULL,NULL,30,25,true,false,'1996-08-03');
+insert into EMPS values (110,'Eric',20,'M','San Francisco',3,80,NULL,false,'2001-01-01') ;
+insert into EMPS values (110,'John',40,'M','Vancouver',2,NULL,false,true,'2002-05-03');
+insert into EMPS values (120,'Wilma',20,'F',NULL,1,5,NULL,true,'2005-09-07');
+insert into EMPS values (130,'Alice',40,'F','Vancouver',2,NULL,false,true,'2007-01-01');
+
+create table DEPTS(deptno int, name string);
+insert into DEPTS values( 10,'Sales');
+insert into DEPTS values( 20,'Marketing');
+insert into DEPTS values( 30,'Accounts');
+
+-- corr, scalar, with count aggregate
+explain select * from emps where deptno <> (select count(deptno) from depts where depts.name = emps.name);
+select * from emps where deptno <> (select count(deptno) from depts where depts.name = emps.name);
+
+explain select * from emps where name > (select min(name) from depts where depts.deptno=emps.deptno);
+select * from emps where name > (select min(name) from depts where depts.deptno=emps.deptno);
+
+-- corr, scalar multiple subq with count aggregate
+explain select * from emps where deptno <> (select count(deptno) from depts where depts.name = emps.name) and empno > (select count(name) from depts where depts.deptno = emps.deptno);
+select * from emps where deptno <> (select count(deptno) from depts where depts.name = emps.name) and empno > (select count(name) from depts where depts.deptno = emps.deptno);
+
+-- mix of corr, uncorr with aggregate
+explain select * from emps where deptno <> (select sum(deptno) from depts where depts.name = emps.name) and empno > (select count(name) from depts);
+select * from emps where deptno <> (select count(deptno) from depts where depts.name = emps.name) and empno > (select count(name) from depts);
+
+drop table DEPTS;
+drop table EMPS;
+
+
+

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q b/ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q
index bdfa648..dad6f23 100644
--- a/ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q
+++ b/ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q
@@ -18,26 +18,6 @@ explain select * from src11 where src11.key1 in (select key from src where src11
 
 explain select * from src a where a.key in (select key from src where a.value = value and key > '9');
 
--- agg, corr
-explain
-select p_mfgr, p_name, p_size 
-from part b where b.p_size in 
-  (select min(p2_size) 
-    from (select p2_mfgr, p2_size, rank() over(partition by p2_mfgr order by p2_size) as r from part2) a 
-    where r <= 2 and b.p_mfgr = p2_mfgr
-  )
-;
-
-
-explain
-select p_mfgr, p_name, p_size 
-from part b where b.p_size in 
-  (select min(p_size) 
-   from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a 
-   where r <= 2 and b.p_mfgr = p_mfgr
-  )
-;
-
 -- distinct, corr
 explain 
 select * 
@@ -49,15 +29,6 @@ where b.key in
         )
 ;
 
--- non agg, corr, having
-explain
- select key, value, count(*) 
-from src b
-group by key, value
-having count(*) in (select count(*) from src where src.key > '9'  and src.value = b.value group by key )
-;
-
--- non agg, corr
 explain
 select p_mfgr, b.p_name, p_size 
 from part b 

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/results/clientnegative/subquery_exists_implicit_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/subquery_exists_implicit_gby.q.out b/ql/src/test/results/clientnegative/subquery_exists_implicit_gby.q.out
index f7251e3..bab6138 100644
--- a/ql/src/test/results/clientnegative/subquery_exists_implicit_gby.q.out
+++ b/ql/src/test/results/clientnegative/subquery_exists_implicit_gby.q.out
@@ -1 +1 @@
-FAILED: SemanticException [Error 10250]: Line 7:7 Invalid SubQuery expression 'key': An Exists predicate on SubQuery with implicit Aggregation(no Group By clause) cannot be rewritten. (predicate will always return true).
+FAILED: SemanticException [Error 10250]: org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSubquerySemanticException: Line 7:7 Invalid SubQuery expression ''val_9'': A predicate on EXISTS/NOT EXISTS/IN/NOT IN SubQuery with implicit Aggregation(no Group By clause) cannot be rewritten.

http://git-wip-us.apache.org/repos/asf/hive/blob/d9343f6d/ql/src/test/results/clientnegative/subquery_in_implicit_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/subquery_in_implicit_gby.q.out b/ql/src/test/results/clientnegative/subquery_in_implicit_gby.q.out
new file mode 100644
index 0000000..a882fbc
--- /dev/null
+++ b/ql/src/test/results/clientnegative/subquery_in_implicit_gby.q.out
@@ -0,0 +1 @@
+FAILED: SemanticException [Error 10250]: org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSubquerySemanticException: Line 1:68 Invalid SubQuery expression 'p_type': A predicate on EXISTS/NOT EXISTS/IN/NOT IN SubQuery with implicit Aggregation(no Group By clause) cannot be rewritten.


Mime
View raw message