hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sze...@apache.org
Subject svn commit: r1649740 [4/10] - in /hive/branches/spark: ./ beeline/src/java/org/apache/hive/beeline/ common/src/java/org/apache/hadoop/hive/conf/ contrib/src/test/results/clientpositive/ data/files/ itests/util/src/main/java/org/apache/hadoop/hive/ql/ho...
Date Tue, 06 Jan 2015 06:58:44 GMT
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java Tue Jan  6 06:58:42 2015
@@ -273,8 +273,12 @@ public final class SemanticAnalyzerFacto
       case HiveParser.TOK_DELETE_FROM:
         return new UpdateDeleteSemanticAnalyzer(conf);
 
-      default:
-        return new SemanticAnalyzer(conf);
+      default: {
+        SemanticAnalyzer semAnalyzer = HiveConf
+            .getBoolVar(conf, HiveConf.ConfVars.HIVE_CBO_ENABLED) ? new CalcitePlanner(conf)
+            : new SemanticAnalyzer(conf);
+        return semAnalyzer;
+      }
       }
     }
   }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java Tue Jan  6 06:58:42 2015
@@ -141,7 +141,8 @@ public class TableAccessAnalyzer {
         return null;
       }
 
-      Map<String, List<String>> tableToKeysMap = new HashMap<String, List<String>>();
+      // Must be deterministic order map for consistent q-test output across Java versions
+      Map<String, List<String>> tableToKeysMap = new LinkedHashMap<String, List<String>>();
       Table tbl = pGraphContext.getTopToTable().get(tso);
       tableToKeysMap.put(tbl.getCompleteName(), keyColNames);
       tableAccessCtx.addOperatorTableAccess(op, tableToKeysMap);
@@ -165,7 +166,8 @@ public class TableAccessAnalyzer {
         Object... nodeOutputs) {
       JoinOperator op = (JoinOperator)nd;
       TableAccessCtx tableAccessCtx = (TableAccessCtx)procCtx;
-      Map<String, List<String>> tableToKeysMap = new HashMap<String, List<String>>();
+      // Must be deterministic order map for consistent q-test output across Java versions
+      Map<String, List<String>> tableToKeysMap = new LinkedHashMap<String, List<String>>();
 
       List<Operator<? extends OperatorDesc>> parentOps = op.getParentOperators();
 

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessInfo.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessInfo.java?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessInfo.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessInfo.java Tue Jan  6 06:58:42 2015
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import java.util.HashMap;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -33,8 +33,9 @@ public class TableAccessInfo {
     Map<String, List<String>>> operatorToTableAccessMap;
 
   public TableAccessInfo() {
+    // Must be deterministic order map for consistent q-test output across Java versions
     operatorToTableAccessMap =
-      new HashMap<Operator<? extends OperatorDesc>, Map<String, List<String>>>();
+      new LinkedHashMap<Operator<? extends OperatorDesc>, Map<String, List<String>>>();
   }
 
   public void add(Operator<? extends OperatorDesc> op,

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java Tue Jan  6 06:58:42 2015
@@ -62,8 +62,10 @@ import org.apache.hadoop.hive.ql.udf.gen
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseCompare;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
 import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
@@ -128,9 +130,7 @@ public class TypeCheckProcFactory {
     // If the current subExpression is pre-calculated, as in Group-By etc.
     ColumnInfo colInfo = input.getExpression(expr);
     if (colInfo != null) {
-      desc = new ExprNodeColumnDesc(colInfo.getType(), colInfo
-          .getInternalName(), colInfo.getTabAlias(), colInfo
-          .getIsVirtualCol());
+      desc = new ExprNodeColumnDesc(colInfo);
       ASTNode source = input.getExpressionSource(expr);
       if (source != null) {
         ctx.getUnparseTranslator().addCopyTranslation(expr, source);
@@ -513,9 +513,7 @@ public class TypeCheckProcFactory {
             return null;
           }
           // It's a column.
-          return new ExprNodeColumnDesc(colInfo.getType(), colInfo
-              .getInternalName(), colInfo.getTabAlias(), colInfo
-              .getIsVirtualCol());
+          return toExprNodeDesc(colInfo);
         } else {
           // It's a table alias.
           // We will process that later in DOT.
@@ -547,11 +545,7 @@ public class TypeCheckProcFactory {
           }
         } else {
           // It's a column.
-          ExprNodeColumnDesc exprNodColDesc = new ExprNodeColumnDesc(colInfo.getType(), colInfo
-              .getInternalName(), colInfo.getTabAlias(), colInfo
-              .getIsVirtualCol());
-          exprNodColDesc.setSkewedCol(colInfo.isSkewedCol());
-          return exprNodColDesc;
+          return toExprNodeDesc(colInfo);
         }
       }
 
@@ -559,6 +553,20 @@ public class TypeCheckProcFactory {
 
   }
 
+  private static ExprNodeDesc toExprNodeDesc(ColumnInfo colInfo) {
+    ObjectInspector inspector = colInfo.getObjectInspector();
+    if (inspector instanceof ConstantObjectInspector && 
+        inspector instanceof PrimitiveObjectInspector) {
+      PrimitiveObjectInspector poi = (PrimitiveObjectInspector) inspector;
+      Object constant = ((ConstantObjectInspector) inspector).getWritableConstantValue();
+      return new ExprNodeConstantDesc(colInfo.getType(), poi.getPrimitiveJavaObject(constant));
+    }
+    // non-constant or non-primitive constants
+    ExprNodeColumnDesc column = new ExprNodeColumnDesc(colInfo);
+    column.setSkewedCol(colInfo.isSkewedCol());
+    return column;
+  }
+
   /**
    * Factory method to get ColumnExprProcessor.
    *
@@ -979,7 +987,7 @@ public class TypeCheckProcFactory {
       return false;
     }
 
-    protected ExprNodeColumnDesc processQualifiedColRef(TypeCheckCtx ctx, ASTNode expr,
+    protected ExprNodeDesc processQualifiedColRef(TypeCheckCtx ctx, ASTNode expr,
         Object... nodeOutputs) throws SemanticException {
       RowResolver input = ctx.getInputRR();
       String tableAlias = BaseSemanticAnalyzer.unescapeIdentifier(expr.getChild(0).getChild(0)
@@ -993,8 +1001,7 @@ public class TypeCheckProcFactory {
         ctx.setError(ErrorMsg.INVALID_COLUMN.getMsg(expr.getChild(1)), expr);
         return null;
       }
-      return new ExprNodeColumnDesc(colInfo.getType(), colInfo.getInternalName(),
-          colInfo.getTabAlias(), colInfo.getIsVirtualCol());
+      return toExprNodeDesc(colInfo);
     }
 
     @Override
@@ -1080,16 +1087,14 @@ public class TypeCheckProcFactory {
           for (Map.Entry<String, ColumnInfo> colMap : columns.entrySet()) {
             ColumnInfo colInfo = colMap.getValue();
             if (!colInfo.getIsVirtualCol()) {
-              columnList.addColumn(new ExprNodeColumnDesc(colInfo.getType(),
-                  colInfo.getInternalName(), colInfo.getTabAlias(), false));
+              columnList.addColumn(toExprNodeDesc(colInfo));
             }
           }
         } else {
           // all columns (select *, for example)
           for (ColumnInfo colInfo : input.getColumnInfos()) {
             if (!colInfo.getIsVirtualCol()) {
-              columnList.addColumn(new ExprNodeColumnDesc(colInfo.getType(),
-                  colInfo.getInternalName(), colInfo.getTabAlias(), false));
+              columnList.addColumn(toExprNodeDesc(colInfo));
             }
           }
         }
@@ -1127,7 +1132,7 @@ public class TypeCheckProcFactory {
           expr.getChildCount() - childrenBegin);
       for (int ci = childrenBegin; ci < expr.getChildCount(); ci++) {
         if (nodeOutputs[ci] instanceof ExprNodeColumnListDesc) {
-          children.addAll(((ExprNodeColumnListDesc)nodeOutputs[ci]).getChildren());
+          children.addAll(((ExprNodeColumnListDesc) nodeOutputs[ci]).getChildren());
         } else {
           children.add((ExprNodeDesc) nodeOutputs[ci]);
         }
@@ -1142,8 +1147,7 @@ public class TypeCheckProcFactory {
         RowResolver input = ctx.getInputRR();
         for (ColumnInfo colInfo : input.getColumnInfos()) {
           if (!colInfo.getIsVirtualCol()) {
-            children.add(new ExprNodeColumnDesc(colInfo.getType(),
-                colInfo.getInternalName(), colInfo.getTabAlias(), false));
+            children.add(toExprNodeDesc(colInfo));
           }
         }
       }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java Tue Jan  6 06:58:42 2015
@@ -19,7 +19,8 @@ package org.apache.hadoop.hive.ql.parse;
 
 import java.io.IOException;
 import java.util.HashMap;
-import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -158,7 +159,8 @@ public class UpdateDeleteSemanticAnalyze
     rewrittenQueryStr.append(" select ROW__ID");
     Map<Integer, ASTNode> setColExprs = null;
     Map<String, ASTNode> setCols = null;
-    Set<String> setRCols = new HashSet<String>();
+    // Must be deterministic order set for consistent q-test output across Java versions
+    Set<String> setRCols = new LinkedHashSet<String>();
     if (updating()) {
       // An update needs to select all of the columns, as we rewrite the entire row.  Also,
       // we need to figure out which columns we are going to replace.  We won't write the set
@@ -171,7 +173,8 @@ public class UpdateDeleteSemanticAnalyze
 
       // Get the children of the set clause, each of which should be a column assignment
       List<? extends Node> assignments = setClause.getChildren();
-      setCols = new HashMap<String, ASTNode>(assignments.size());
+      // Must be deterministic order map for consistent q-test output across Java versions
+      setCols = new LinkedHashMap<String, ASTNode>(assignments.size());
       setColExprs = new HashMap<Integer, ASTNode>(assignments.size());
       for (Node a : assignments) {
         ASTNode assignment = (ASTNode)a;

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeColumnListDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeColumnListDesc.java?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeColumnListDesc.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeColumnListDesc.java Tue Jan  6 06:58:42 2015
@@ -30,16 +30,17 @@ import org.apache.hadoop.hive.serde2.typ
  */
 public class ExprNodeColumnListDesc extends ExprNodeDesc {
 
-  List<ExprNodeColumnDesc> columns = new ArrayList<ExprNodeColumnDesc>();
+  // column or constant
+  final List<ExprNodeDesc> columns = new ArrayList<ExprNodeDesc>();
 
-  public void addColumn(ExprNodeColumnDesc column) {
+  public void addColumn(ExprNodeDesc column) {
     columns.add(column);
   }
 
   @Override
   public ExprNodeDesc clone() {
     ExprNodeColumnListDesc clone = new ExprNodeColumnListDesc();
-    clone.columns = new ArrayList<ExprNodeColumnDesc>(columns);
+    clone.columns.addAll(columns);
     return clone;
   }
 
@@ -73,11 +74,7 @@ public class ExprNodeColumnListDesc exte
 
   @Override
   public List<String> getCols() {
-    List<String> cols = new ArrayList<String>();
-    for (ExprNodeColumnDesc column : columns) {
-      cols.add(column.getColumn());
-    }
-    return cols;
+    throw new IllegalStateException();
   }
 
   @Override

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/FetchWork.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/FetchWork.java?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/FetchWork.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/FetchWork.java Tue Jan  6 06:58:42 2015
@@ -20,14 +20,16 @@ package org.apache.hadoop.hive.ql.plan;
 
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.TreeMap;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.exec.ListSinkOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
+import org.apache.hadoop.hive.ql.exec.OperatorFactory;
 import org.apache.hadoop.hive.ql.parse.SplitSample;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 
 /**
  * FetchWork.
@@ -52,7 +54,7 @@ public class FetchWork implements Serial
   private SplitSample splitSample;
 
   private transient List<List<Object>> rowsComputedFromStats;
-  private transient ObjectInspector statRowOI;
+  private transient StructObjectInspector statRowOI;
 
   /**
    * Serialization Null Format for the serde used to fetch data.
@@ -62,12 +64,12 @@ public class FetchWork implements Serial
   public FetchWork() {
   }
 
-  public FetchWork(List<List<Object>> rowsComputedFromStats,ObjectInspector statRowOI) {
+  public FetchWork(List<List<Object>> rowsComputedFromStats, StructObjectInspector statRowOI) {
     this.rowsComputedFromStats = rowsComputedFromStats;
     this.statRowOI = statRowOI;
   }
 
-  public ObjectInspector getStatRowOI() {
+  public StructObjectInspector getStatRowOI() {
     return statRowOI;
   }
 
@@ -99,8 +101,8 @@ public class FetchWork implements Serial
 
   public void initializeForFetch() {
     if (source == null) {
-      sink = new ListSinkOperator();
-      sink.setConf(new ListSinkDesc(serializationNullFormat));
+      ListSinkDesc desc = new ListSinkDesc(serializationNullFormat);
+      sink = (ListSinkOperator) OperatorFactory.get(desc);
       source = sink;
     }
   }
@@ -173,6 +175,11 @@ public class FetchWork implements Serial
     return partDesc;
   }
 
+  public List<Path> getPathLists() {
+    return isPartitioned() ? partDir == null ?
+        null : new ArrayList<Path>(partDir) : Arrays.asList(tblDir);
+  }
+
   /**
    * Get Partition descriptors in sorted (ascending) order of partition directory
    *

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java Tue Jan  6 06:58:42 2015
@@ -33,6 +33,7 @@ import org.apache.hadoop.hive.serde2.Des
 import org.apache.hadoop.hive.serde2.SerDeUtils;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.OutputFormat;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hive.common.util.HiveStringUtils;
 
 /**
@@ -84,8 +85,17 @@ public class TableDesc implements Serial
    * Return a deserializer object corresponding to the tableDesc.
    */
   public Deserializer getDeserializer(Configuration conf) throws Exception {
-    Deserializer de = getDeserializerClass().newInstance();
-    SerDeUtils.initializeSerDe(de, conf, properties, null);
+    return getDeserializer(conf, false);
+  }
+
+  public Deserializer getDeserializer(Configuration conf, boolean ignoreError) throws Exception {
+    Deserializer de = ReflectionUtils.newInstance(
+        getDeserializerClass().asSubclass(Deserializer.class), conf);
+    if (ignoreError) {
+      SerDeUtils.initializeSerDeWithoutErrorCheck(de, conf, properties, null);
+    } else {
+      SerDeUtils.initializeSerDe(de, conf, properties, null);
+    }
     return de;
   }
 

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java Tue Jan  6 06:58:42 2015
@@ -310,7 +310,8 @@ public class SessionState {
     this.userName = userName;
     isSilent = conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONSILENT);
     ls = new LineageState();
-    overriddenConfigurations = new HashMap<String, String>();
+    // Must be deterministic order map for consistent q-test output across Java versions
+    overriddenConfigurations = new LinkedHashMap<String, String>();
     overriddenConfigurations.putAll(HiveConf.getConfSystemProperties());
     // if there isn't already a session name, go ahead and create it.
     if (StringUtils.isEmpty(conf.getVar(HiveConf.ConfVars.HIVESESSIONID))) {
@@ -1204,7 +1205,8 @@ public class SessionState {
 
   public Map<String, String> getOverriddenConfigurations() {
     if (overriddenConfigurations == null) {
-      overriddenConfigurations = new HashMap<String, String>();
+      // Must be deterministic order map for consistent q-test output across Java versions
+      overriddenConfigurations = new LinkedHashMap<String, String>();
     }
     return overriddenConfigurations;
   }

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java Tue Jan  6 06:58:42 2015
@@ -42,7 +42,7 @@ public class TestIUD {
   @Before
   public void setup() throws SemanticException {
     pd = new ParseDriver();
-    sA = new SemanticAnalyzer(conf);
+    sA = new CalcitePlanner(conf);
   }
 
   ASTNode parse(String query) throws ParseException {

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBJoinTreeApplyPredicate.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBJoinTreeApplyPredicate.java?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBJoinTreeApplyPredicate.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBJoinTreeApplyPredicate.java Tue Jan  6 06:58:42 2015
@@ -42,7 +42,7 @@ public class TestQBJoinTreeApplyPredicat
 
   @Before
   public void setup() throws SemanticException {
-    sA = new SemanticAnalyzer(conf);
+    sA = new CalcitePlanner(conf);
   }
 
   static ASTNode constructIdentifier(String nm) {

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBSubQuery.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBSubQuery.java?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBSubQuery.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBSubQuery.java Tue Jan  6 06:58:42 2015
@@ -54,7 +54,7 @@ public class TestQBSubQuery {
   @Before
   public void setup() throws SemanticException {
     pd = new ParseDriver();
-    sA = new SemanticAnalyzer(conf);
+    sA = new CalcitePlanner(conf);
   }
 
   ASTNode parse(String query) throws ParseException {

Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java Tue Jan  6 06:58:42 2015
@@ -29,6 +29,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.LinkedHashMap;
 import java.util.Set;
 
 public class TestConditionalResolverCommonJoin {
@@ -52,8 +53,9 @@ public class TestConditionalResolverComm
     task2.setId("alias3");
 
     // joins alias1, alias2, alias3 (alias1 was not eligible for big pos)
+    // Must be deterministic order map for consistent q-test output across Java versions
     HashMap<Task<? extends Serializable>, Set<String>> taskToAliases =
-        new HashMap<Task<? extends Serializable>, Set<String>>();
+        new LinkedHashMap<Task<? extends Serializable>, Set<String>>();
     taskToAliases.put(task1, new HashSet<String>(Arrays.asList("alias2")));
     taskToAliases.put(task2, new HashSet<String>(Arrays.asList("alias3")));
 

Modified: hive/branches/spark/ql/src/test/queries/clientnegative/columnstats_partlvl_invalid_values.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientnegative/columnstats_partlvl_invalid_values.q?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientnegative/columnstats_partlvl_invalid_values.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientnegative/columnstats_partlvl_invalid_values.q Tue Jan  6 06:58:42 2015
@@ -1,3 +1,5 @@
+-- JAVA_VERSION_SPECIFIC_OUTPUT
+
 DROP TABLE Employee_Part;
 
 CREATE TABLE Employee_Part(employeeID int, employeeName String) partitioned by (employeeSalary double, country string)

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/nonmr_fetch.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/nonmr_fetch.q?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/nonmr_fetch.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/nonmr_fetch.q Tue Jan  6 06:58:42 2015
@@ -9,7 +9,6 @@ select * from src limit 10;
 explain select * from srcpart where ds='2008-04-08' AND hr='11' limit 10;
 select * from srcpart where ds='2008-04-08' AND hr='11' limit 10;
 
--- negative, select expression
 explain select key from src limit 10;
 select key from src limit 10;
 
@@ -62,6 +61,16 @@ select * from src TABLESAMPLE (0.25 PERC
 explain select *, BLOCK__OFFSET__INSIDE__FILE from srcpart TABLESAMPLE (0.25 PERCENT);
 select *, BLOCK__OFFSET__INSIDE__FILE from srcpart TABLESAMPLE (0.25 PERCENT);
 
+-- sub query
+explain
+select key, value from (select value key,key value from src where key > 200) a where value < 250 limit 20;
+select key, value from (select value key,key value from src where key > 200) a where value < 250 limit 20;
+
+-- lateral view
+explain
+select key,X from srcpart lateral view explode(array(key,value)) L as x where (ds='2008-04-08' AND hr='11') limit 20;
+select key,X from srcpart lateral view explode(array(key,value)) L as x where (ds='2008-04-08' AND hr='11') limit 20;
+
 -- non deterministic func
 explain select key, value, BLOCK__OFFSET__INSIDE__FILE from srcpart where ds="2008-04-09" AND rand() > 1;
 select key, value, BLOCK__OFFSET__INSIDE__FILE from srcpart where ds="2008-04-09" AND rand() > 1;
@@ -78,8 +87,5 @@ explain create table srcx as select dist
 -- negative, analyze
 explain analyze table src compute statistics;
 
--- negative, subq
-explain select a.* from (select * from src) a;
-
 -- negative, join
 explain select * from src join src src2 on src.key=src2.key;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/nonmr_fetch_threshold.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/nonmr_fetch_threshold.q?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/nonmr_fetch_threshold.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/nonmr_fetch_threshold.q Tue Jan  6 06:58:42 2015
@@ -3,6 +3,11 @@ set hive.fetch.task.conversion=more;
 explain select * from srcpart where ds='2008-04-08' AND hr='11' limit 10;
 explain select cast(key as int) * 10, upper(value) from src limit 10;
 
+set hive.fetch.task.conversion.threshold=10000;
+
+explain select * from srcpart where ds='2008-04-08' AND hr='11' limit 10;
+explain select cast(key as int) * 10, upper(value) from src limit 10;
+
 set hive.fetch.task.conversion.threshold=100;
 
 -- from HIVE-7397, limit + partition pruning filter

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/outer_join_ppr.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/outer_join_ppr.q?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/outer_join_ppr.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/outer_join_ppr.q Tue Jan  6 06:58:42 2015
@@ -1,6 +1,7 @@
 set hive.optimize.ppd=true;
 
 -- SORT_QUERY_RESULTS
+-- JAVA_VERSION_SPECIFIC_OUTPUT
 
 EXPLAIN EXTENDED
  FROM 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/parquet_map_null.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/parquet_map_null.q?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/parquet_map_null.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/parquet_map_null.q Tue Jan  6 06:58:42 2015
@@ -1,4 +1,5 @@
 -- This test attempts to write a parquet table from an avro table that contains map null values
+-- JAVA_VERSION_SPECIFIC_OUTPUT
 
 DROP TABLE IF EXISTS avro_table;
 DROP TABLE IF EXISTS parquet_table;
@@ -10,4 +11,4 @@ CREATE TABLE parquet_table STORED AS PAR
 SELECT * FROM parquet_table;
 
 DROP TABLE avro_table;
-DROP TABLE parquet_table;
\ No newline at end of file
+DROP TABLE parquet_table;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/subquery_multiinsert.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/subquery_multiinsert.q?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/subquery_multiinsert.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/subquery_multiinsert.q Tue Jan  6 06:58:42 2015
@@ -1,6 +1,7 @@
 set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecutePrinter,org.apache.hadoop.hive.ql.hooks.PrintCompletedTasksHook;
 
 -- SORT_QUERY_RESULTS
+-- JAVA_VERSION_SPECIFIC_OUTPUT
 
 CREATE TABLE src_4(
   key STRING, 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/subquery_notin_having.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/subquery_notin_having.q?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/subquery_notin_having.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/subquery_notin_having.q Tue Jan  6 06:58:42 2015
@@ -1,4 +1,6 @@
 -- non agg, non corr
+-- JAVA_VERSION_SPECIFIC_OUTPUT
+
 explain
 select key, count(*) 
 from src 
@@ -53,4 +55,4 @@ having b.p_mfgr not in
   group by p_mfgr
   having max(p_retailprice) - min(p_retailprice) > 600
   )
-;
\ No newline at end of file
+;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q Tue Jan  6 06:58:42 2015
@@ -91,3 +91,8 @@ set hive.cbo.enable=false;
 explain 
 select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket;
 select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket;
+
+-- with CBO
+explain 
+select percentile_approx(key, 0.5) from bucket;
+select percentile_approx(key, 0.5) from bucket;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/udtf_explode.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/udtf_explode.q?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/udtf_explode.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/udtf_explode.q Tue Jan  6 06:58:42 2015
@@ -21,8 +21,3 @@ SELECT src.key, myKey, myVal FROM src la
 
 -- HIVE-4295
 SELECT BLOCK__OFFSET__INSIDE__FILE, src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3;
-
--- cp knob is removed, hardly convincible
--- set hive.optimize.cp=false;
--- SELECT src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3;
--- SELECT BLOCK__OFFSET__INSIDE__FILE, src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3;
\ No newline at end of file

Modified: hive/branches/spark/ql/src/test/results/clientnegative/unset_table_property.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/unset_table_property.q.out?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/unset_table_property.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/unset_table_property.q.out Tue Jan  6 06:58:42 2015
@@ -29,4 +29,4 @@ totalSize	0
 #### A masked pattern was here ####
 FAILED: SemanticException [Error 10215]: Please use the following syntax if not sure whether the property existed or not:
 ALTER TABLE tableName UNSET TBLPROPERTIES IF EXISTS (key1, key2, ...)
- The following property z does not exist in testtable
+ The following property x does not exist in testtable

Modified: hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_select.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_select.q.out?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_select.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_select.q.out Tue Jan  6 06:58:42 2015
@@ -750,11 +750,9 @@ STAGE PLANS:
             alias: alltypes_orc
             Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: 1 (type: int)
-              outputColumnNames: _col0
               Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
-                aggregations: count(_col0)
+                aggregations: count(1)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE

Modified: hive/branches/spark/ql/src/test/results/clientpositive/auto_join26.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/auto_join26.q.out?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/auto_join26.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/auto_join26.q.out Tue Jan  6 06:58:42 2015
@@ -70,11 +70,11 @@ STAGE PLANS:
                   outputColumnNames: _col1
                   Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: _col1 (type: string), 1 (type: int)
-                    outputColumnNames: _col0, _col1
+                    expressions: _col1 (type: string)
+                    outputColumnNames: _col0
                     Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col1)
+                      aggregations: count(1)
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1

Modified: hive/branches/spark/ql/src/test/results/clientpositive/auto_join27.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/auto_join27.q.out?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/auto_join27.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/auto_join27.q.out Tue Jan  6 06:58:42 2015
@@ -115,21 +115,17 @@ STAGE PLANS:
                       0 _col0 (type: string)
                       1 _col0 (type: string)
                     Statistics: Num rows: 273 Data size: 2908 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: 1 (type: int)
+                    Group By Operator
+                      aggregations: count(1)
+                      mode: hash
                       outputColumnNames: _col0
-                      Statistics: Num rows: 273 Data size: 2908 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: count(_col0)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                        File Output Operator
-                          compressed: false
-                          table:
-                              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                              serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
           TableScan
             Union
               Statistics: Num rows: 249 Data size: 2644 Basic stats: COMPLETE Column stats: NONE
@@ -140,21 +136,17 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 Statistics: Num rows: 273 Data size: 2908 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: 1 (type: int)
+                Group By Operator
+                  aggregations: count(1)
+                  mode: hash
                   outputColumnNames: _col0
-                  Statistics: Num rows: 273 Data size: 2908 Basic stats: COMPLETE Column stats: NONE
-                  Group By Operator
-                    aggregations: count(_col0)
-                    mode: hash
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
       Local Work:
         Map Reduce Local Work
 

Modified: hive/branches/spark/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out Tue Jan  6 06:58:42 2015
@@ -944,8 +944,8 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
 RUN: Stage-10:CONDITIONAL
-RUN: Stage-14:MAPREDLOCAL
-RUN: Stage-9:MAPRED
+RUN: Stage-13:MAPREDLOCAL
+RUN: Stage-8:MAPRED
 RUN: Stage-7:CONDITIONAL
 RUN: Stage-12:MAPREDLOCAL
 RUN: Stage-6:MAPRED
@@ -1005,7 +1005,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
 RUN: Stage-10:CONDITIONAL
-RUN: Stage-14:MAPREDLOCAL
+RUN: Stage-13:MAPREDLOCAL
 RUN: Stage-3:MAPRED
 RUN: Stage-7:CONDITIONAL
 RUN: Stage-12:MAPREDLOCAL

Modified: hive/branches/spark/ql/src/test/results/clientpositive/bucket_groupby.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/bucket_groupby.q.out?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/bucket_groupby.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/bucket_groupby.q.out Tue Jan  6 06:58:42 2015
@@ -58,11 +58,11 @@ STAGE PLANS:
             alias: clustergroupby
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: key (type: string), 1 (type: int)
-              outputColumnNames: _col0, _col1
+              expressions: key (type: string)
+              outputColumnNames: _col0
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
+                aggregations: count(1)
                 keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
@@ -167,11 +167,11 @@ STAGE PLANS:
             alias: clustergroupby
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: key (type: string), 1 (type: int)
-              outputColumnNames: _col0, _col1
+              expressions: key (type: string)
+              outputColumnNames: _col0
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
+                aggregations: count(1)
                 keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
@@ -250,11 +250,11 @@ STAGE PLANS:
             alias: clustergroupby
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: length(key) (type: int), 1 (type: int)
-              outputColumnNames: _col0, _col1
+              expressions: length(key) (type: int)
+              outputColumnNames: _col0
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
+                aggregations: count(1)
                 keys: _col0 (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1
@@ -324,11 +324,11 @@ STAGE PLANS:
             alias: clustergroupby
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: abs(length(key)) (type: int), 1 (type: int)
-              outputColumnNames: _col0, _col1
+              expressions: abs(length(key)) (type: int)
+              outputColumnNames: _col0
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
+                aggregations: count(1)
                 keys: _col0 (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1
@@ -400,12 +400,12 @@ STAGE PLANS:
             alias: clustergroupby
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: key (type: string), 3 (type: int), 1 (type: int)
-              outputColumnNames: _col0, _col1, _col2
+              expressions: key (type: string)
+              outputColumnNames: _col0
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col2)
-                keys: _col0 (type: string), _col1 (type: int)
+                aggregations: count(1)
+                keys: _col0 (type: string), 3 (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -483,11 +483,11 @@ STAGE PLANS:
             alias: clustergroupby
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: value (type: string), 1 (type: int)
-              outputColumnNames: _col0, _col1
+              expressions: value (type: string)
+              outputColumnNames: _col0
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
+                aggregations: count(1)
                 keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
@@ -564,11 +564,11 @@ STAGE PLANS:
             alias: clustergroupby
             Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: key (type: string), 1 (type: int)
-              outputColumnNames: _col0, _col1
+              expressions: key (type: string)
+              outputColumnNames: _col0
               Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
+                aggregations: count(1)
                 keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
@@ -943,12 +943,12 @@ STAGE PLANS:
             alias: clustergroupby
             Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: key (type: string), 3 (type: int), 1 (type: int)
-              outputColumnNames: _col0, _col1, _col2
+              expressions: key (type: string)
+              outputColumnNames: _col0
               Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col2)
-                keys: _col0 (type: string), _col1 (type: int)
+                aggregations: count(1)
+                keys: _col0 (type: string), 3 (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -1055,11 +1055,11 @@ STAGE PLANS:
             alias: clustergroupby
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: key (type: string), 1 (type: int)
-              outputColumnNames: _col0, _col1
+              expressions: key (type: string)
+              outputColumnNames: _col0
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
+                aggregations: count(1)
                 bucketGroup: true
                 keys: _col0 (type: string)
                 mode: hash
@@ -1137,11 +1137,11 @@ STAGE PLANS:
             alias: clustergroupby
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: value (type: string), 1 (type: int)
-              outputColumnNames: _col0, _col1
+              expressions: value (type: string)
+              outputColumnNames: _col0
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
+                aggregations: count(1)
                 keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
@@ -1218,11 +1218,11 @@ STAGE PLANS:
             alias: clustergroupby
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: key (type: string), value (type: string), 1 (type: int)
-              outputColumnNames: _col0, _col1, _col2
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col2)
+                aggregations: count(1)
                 bucketGroup: true
                 keys: _col0 (type: string), _col1 (type: string)
                 mode: hash
@@ -1352,11 +1352,11 @@ STAGE PLANS:
             alias: clustergroupby
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: key (type: string), 1 (type: int)
-              outputColumnNames: _col0, _col1
+              expressions: key (type: string)
+              outputColumnNames: _col0
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
+                aggregations: count(1)
                 bucketGroup: true
                 keys: _col0 (type: string)
                 mode: hash
@@ -1434,11 +1434,11 @@ STAGE PLANS:
             alias: clustergroupby
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: value (type: string), key (type: string), 1 (type: int)
-              outputColumnNames: _col0, _col1, _col2
+              expressions: value (type: string), key (type: string)
+              outputColumnNames: _col0, _col1
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col2)
+                aggregations: count(1)
                 keys: _col0 (type: string), _col1 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2

Modified: hive/branches/spark/ql/src/test/results/clientpositive/column_access_stats.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/column_access_stats.q.out?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/column_access_stats.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/column_access_stats.q.out Tue Jan  6 06:58:42 2015
@@ -360,10 +360,10 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 PREHOOK: Input: default@t2
 #### A masked pattern was here ####
-Table:default@t2
+Table:default@t1
 Columns:key,val
 
-Table:default@t1
+Table:default@t2
 Columns:key,val
 
 1	11	1	1
@@ -441,10 +441,10 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 PREHOOK: Input: default@t2
 #### A masked pattern was here ####
-Table:default@t2
+Table:default@t1
 Columns:key
 
-Table:default@t1
+Table:default@t2
 Columns:key
 
 1
@@ -460,10 +460,10 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 PREHOOK: Input: default@t2
 #### A masked pattern was here ####
-Table:default@t2
+Table:default@t1
 Columns:key,val
 
-Table:default@t1
+Table:default@t2
 Columns:key,val
 
 PREHOOK: query: -- Map join
@@ -474,10 +474,10 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 PREHOOK: Input: default@t2
 #### A masked pattern was here ####
-Table:default@t2
+Table:default@t1
 Columns:key,val
 
-Table:default@t1
+Table:default@t2
 Columns:key,val
 
 1	11	1	1
@@ -556,10 +556,10 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 PREHOOK: Input: default@t2
 #### A masked pattern was here ####
-Table:default@t2
+Table:default@t1
 Columns:key,val
 
-Table:default@t1
+Table:default@t2
 Columns:key,val
 
 PREHOOK: query: EXPLAIN
@@ -653,10 +653,10 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 PREHOOK: Input: default@t2
 #### A masked pattern was here ####
-Table:default@t2
+Table:default@t1
 Columns:key,val
 
-Table:default@t1
+Table:default@t2
 Columns:key,val
 
 PREHOOK: query: -- Join followed by join
@@ -802,10 +802,10 @@ PREHOOK: Input: default@t1
 PREHOOK: Input: default@t2
 PREHOOK: Input: default@t3
 #### A masked pattern was here ####
-Table:default@t2
+Table:default@t1
 Columns:key
 
-Table:default@t1
+Table:default@t2
 Columns:key
 
 Table:default@t3

Modified: hive/branches/spark/ql/src/test/results/clientpositive/combine2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/combine2.q.out?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/combine2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/combine2.q.out Tue Jan  6 06:58:42 2015
@@ -188,11 +188,9 @@ STAGE PLANS:
             Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Select Operator
-              expressions: 1 (type: int)
-              outputColumnNames: _col0
               Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col0)
+                aggregations: count(1)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -656,11 +654,11 @@ STAGE PLANS:
             alias: srcpart
             Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: ds (type: string), 1 (type: int)
-              outputColumnNames: _col0, _col1
+              expressions: ds (type: string)
+              outputColumnNames: _col0
               Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
+                aggregations: count(1)
                 keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1

Modified: hive/branches/spark/ql/src/test/results/clientpositive/correlationoptimizer1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/correlationoptimizer1.q.out?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/correlationoptimizer1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/correlationoptimizer1.q.out Tue Jan  6 06:58:42 2015
@@ -70,11 +70,11 @@ STAGE PLANS:
           outputColumnNames: _col1
           Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col1 (type: string), 1 (type: int)
-            outputColumnNames: _col0, _col1
+            expressions: _col1 (type: string)
+            outputColumnNames: _col0
             Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
-              aggregations: count(_col1)
+              aggregations: count(1)
               keys: _col0 (type: string)
               mode: hash
               outputColumnNames: _col0, _col1
@@ -231,13 +231,13 @@ STAGE PLANS:
             outputColumnNames: _col1
             Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
             Select Operator
-              expressions: _col1 (type: string), 1 (type: int)
-              outputColumnNames: _col0, _col1
+              expressions: _col1 (type: string)
+              outputColumnNames: _col0
               Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
               Mux Operator
                 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 Group By Operator
-                  aggregations: count(_col1)
+                  aggregations: count(1)
                   keys: _col0 (type: string)
                   mode: complete
                   outputColumnNames: _col0, _col1
@@ -379,11 +379,11 @@ STAGE PLANS:
                   outputColumnNames: _col1
                   Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: _col1 (type: string), 1 (type: int)
-                    outputColumnNames: _col0, _col1
+                    expressions: _col1 (type: string)
+                    outputColumnNames: _col0
                     Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col1)
+                      aggregations: count(1)
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -540,22 +540,18 @@ STAGE PLANS:
             1 _col0 (type: string)
           outputColumnNames: _col0
           Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: string), 1 (type: int)
+          Group By Operator
+            aggregations: count(1)
+            keys: _col0 (type: string)
+            mode: hash
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-            Group By Operator
-              aggregations: count(_col1)
-              keys: _col0 (type: string)
-              mode: hash
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
@@ -706,33 +702,29 @@ STAGE PLANS:
               1 _col0 (type: string)
             outputColumnNames: _col0
             Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), 1 (type: int)
-              outputColumnNames: _col0, _col1
+            Mux Operator
               Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-              Mux Operator
+              Group By Operator
+                aggregations: count(1)
+                keys: _col0 (type: string)
+                mode: complete
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                Group By Operator
-                  aggregations: count(_col1)
-                  keys: _col0 (type: string)
-                  mode: complete
+                Select Operator
+                  expressions: hash(_col0) (type: int), hash(_col1) (type: int)
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  Select Operator
-                    expressions: hash(_col0) (type: int), hash(_col1) (type: int)
+                  Group By Operator
+                    aggregations: sum(_col0), sum(_col1)
+                    mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    Group By Operator
-                      aggregations: sum(_col0), sum(_col1)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
@@ -845,11 +837,11 @@ STAGE PLANS:
           outputColumnNames: _col1
           Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col1 (type: string), 1 (type: int)
-            outputColumnNames: _col0, _col1
+            expressions: _col1 (type: string)
+            outputColumnNames: _col0
             Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
-              aggregations: count(_col1)
+              aggregations: count(1)
               keys: _col0 (type: string)
               mode: hash
               outputColumnNames: _col0, _col1
@@ -1000,13 +992,13 @@ STAGE PLANS:
             outputColumnNames: _col1
             Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
             Select Operator
-              expressions: _col1 (type: string), 1 (type: int)
-              outputColumnNames: _col0, _col1
+              expressions: _col1 (type: string)
+              outputColumnNames: _col0
               Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
               Mux Operator
                 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 Group By Operator
-                  aggregations: count(_col1)
+                  aggregations: count(1)
                   keys: _col0 (type: string)
                   mode: complete
                   outputColumnNames: _col0, _col1
@@ -1137,22 +1129,18 @@ STAGE PLANS:
             1 _col0 (type: string)
           outputColumnNames: _col0
           Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: string), 1 (type: int)
+          Group By Operator
+            aggregations: count(1)
+            keys: _col0 (type: string)
+            mode: hash
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-            Group By Operator
-              aggregations: count(_col1)
-              keys: _col0 (type: string)
-              mode: hash
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
@@ -1291,22 +1279,18 @@ STAGE PLANS:
             1 _col0 (type: string)
           outputColumnNames: _col0
           Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: string), 1 (type: int)
+          Group By Operator
+            aggregations: count(1)
+            keys: _col0 (type: string)
+            mode: hash
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-            Group By Operator
-              aggregations: count(_col1)
-              keys: _col0 (type: string)
-              mode: hash
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
@@ -1449,11 +1433,11 @@ STAGE PLANS:
           outputColumnNames: _col1, _col2
           Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col2 (type: string), _col1 (type: string), 1 (type: int)
-            outputColumnNames: _col0, _col1, _col2
+            expressions: _col2 (type: string), _col1 (type: string)
+            outputColumnNames: _col0, _col1
             Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
-              aggregations: count(_col2)
+              aggregations: count(1)
               keys: _col0 (type: string), _col1 (type: string)
               mode: hash
               outputColumnNames: _col0, _col1, _col2
@@ -1583,11 +1567,11 @@ STAGE PLANS:
           outputColumnNames: _col1, _col2
           Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col2 (type: string), _col1 (type: string), 1 (type: int)
-            outputColumnNames: _col0, _col1, _col2
+            expressions: _col2 (type: string), _col1 (type: string)
+            outputColumnNames: _col0, _col1
             Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
-              aggregations: count(_col2)
+              aggregations: count(1)
               keys: _col0 (type: string), _col1 (type: string)
               mode: hash
               outputColumnNames: _col0, _col1, _col2
@@ -1725,22 +1709,18 @@ STAGE PLANS:
             1 _col0 (type: string)
           outputColumnNames: _col0
           Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: string), 1 (type: int)
+          Group By Operator
+            aggregations: count(1)
+            keys: _col0 (type: string)
+            mode: hash
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-            Group By Operator
-              aggregations: count(_col1)
-              keys: _col0 (type: string)
-              mode: hash
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
@@ -1880,33 +1860,29 @@ STAGE PLANS:
               1 _col0 (type: string)
             outputColumnNames: _col0
             Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), 1 (type: int)
-              outputColumnNames: _col0, _col1
+            Mux Operator
               Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-              Mux Operator
+              Group By Operator
+                aggregations: count(1)
+                keys: _col0 (type: string)
+                mode: complete
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                Group By Operator
-                  aggregations: count(_col1)
-                  keys: _col0 (type: string)
-                  mode: complete
+                Select Operator
+                  expressions: hash(_col0) (type: int), hash(_col1) (type: int)
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  Select Operator
-                    expressions: hash(_col0) (type: int), hash(_col1) (type: int)
+                  Group By Operator
+                    aggregations: sum(_col0), sum(_col1)
+                    mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    Group By Operator
-                      aggregations: sum(_col0), sum(_col1)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
@@ -2019,11 +1995,11 @@ STAGE PLANS:
           outputColumnNames: _col1
           Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col1 (type: string), 1 (type: int)
-            outputColumnNames: _col0, _col1
+            expressions: _col1 (type: string)
+            outputColumnNames: _col0
             Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
-              aggregations: count(_col1)
+              aggregations: count(1)
               keys: _col0 (type: string)
               mode: hash
               outputColumnNames: _col0, _col1
@@ -2173,11 +2149,11 @@ STAGE PLANS:
           outputColumnNames: _col1
           Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col1 (type: string), 1 (type: int)
-            outputColumnNames: _col0, _col1
+            expressions: _col1 (type: string)
+            outputColumnNames: _col0
             Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
-              aggregations: count(_col1)
+              aggregations: count(1)
               keys: _col0 (type: string)
               mode: hash
               outputColumnNames: _col0, _col1
@@ -2335,11 +2311,11 @@ STAGE PLANS:
           outputColumnNames: _col1
           Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col1 (type: string), 1 (type: int)
-            outputColumnNames: _col0, _col1
+            expressions: _col1 (type: string)
+            outputColumnNames: _col0
             Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
-              aggregations: count(_col1)
+              aggregations: count(1)
               keys: _col0 (type: string)
               mode: hash
               outputColumnNames: _col0, _col1
@@ -2489,11 +2465,11 @@ STAGE PLANS:
           outputColumnNames: _col1
           Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col1 (type: string), 1 (type: int)
-            outputColumnNames: _col0, _col1
+            expressions: _col1 (type: string)
+            outputColumnNames: _col0
             Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
-              aggregations: count(_col1)
+              aggregations: count(1)
               keys: _col0 (type: string)
               mode: hash
               outputColumnNames: _col0, _col1
@@ -2652,11 +2628,11 @@ STAGE PLANS:
           outputColumnNames: _col1, _col2
           Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col1 (type: string), _col2 (type: string), 1 (type: int)
-            outputColumnNames: _col0, _col1, _col2
+            expressions: _col1 (type: string), _col2 (type: string)
+            outputColumnNames: _col0, _col1
             Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
-              aggregations: count(_col2)
+              aggregations: count(1)
               keys: _col0 (type: string), _col1 (type: string)
               mode: hash
               outputColumnNames: _col0, _col1, _col2
@@ -2813,11 +2789,11 @@ STAGE PLANS:
           outputColumnNames: _col1, _col2
           Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col1 (type: string), _col2 (type: string), 1 (type: int)
-            outputColumnNames: _col0, _col1, _col2
+            expressions: _col1 (type: string), _col2 (type: string)
+            outputColumnNames: _col0, _col1
             Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
-              aggregations: count(_col2)
+              aggregations: count(1)
               keys: _col0 (type: string), _col1 (type: string)
               mode: hash
               outputColumnNames: _col0, _col1, _col2
@@ -2975,11 +2951,11 @@ STAGE PLANS:
           outputColumnNames: _col2
           Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col2 (type: string), 1 (type: int)
-            outputColumnNames: _col0, _col1
+            expressions: _col2 (type: string)
+            outputColumnNames: _col0
             Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
-              aggregations: count(_col1)
+              aggregations: count(1)
               keys: _col0 (type: string)
               mode: hash
               outputColumnNames: _col0, _col1
@@ -3135,11 +3111,11 @@ STAGE PLANS:
           outputColumnNames: _col2
           Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col2 (type: string), 1 (type: int)
-            outputColumnNames: _col0, _col1
+            expressions: _col2 (type: string)
+            outputColumnNames: _col0
             Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
-              aggregations: count(_col1)
+              aggregations: count(1)
               keys: _col0 (type: string)
               mode: hash
               outputColumnNames: _col0, _col1

Modified: hive/branches/spark/ql/src/test/results/clientpositive/correlationoptimizer10.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/correlationoptimizer10.q.out?rev=1649740&r1=1649739&r2=1649740&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/correlationoptimizer10.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/correlationoptimizer10.q.out Tue Jan  6 06:58:42 2015
@@ -81,22 +81,18 @@ STAGE PLANS:
             1 _col0 (type: string)
           outputColumnNames: _col0
           Statistics: Num rows: 14 Data size: 108 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: string), 1 (type: int)
+          Group By Operator
+            aggregations: count(1)
+            keys: _col0 (type: string)
+            mode: hash
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 14 Data size: 108 Basic stats: COMPLETE Column stats: NONE
-            Group By Operator
-              aggregations: count(_col1)
-              keys: _col0 (type: string)
-              mode: hash
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 14 Data size: 108 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
@@ -296,39 +292,35 @@ STAGE PLANS:
               1 _col0 (type: string)
             outputColumnNames: _col0
             Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), 1 (type: int)
-              outputColumnNames: _col0, _col1
+            Mux Operator
               Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-              Mux Operator
+              Group By Operator
+                aggregations: count(1)
+                keys: _col0 (type: string)
+                mode: complete
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                Group By Operator
-                  aggregations: count(_col1)
-                  keys: _col0 (type: string)
-                  mode: complete
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  Mux Operator
-                    Statistics: Num rows: 276 Data size: 2854 Basic stats: COMPLETE Column stats: NONE
-                    Join Operator
-                      condition map:
-                           Left Semi Join 0 to 1
-                      keys:
-                        0 _col0 (type: string)
-                        1 _col0 (type: string)
+                Mux Operator
+                  Statistics: Num rows: 276 Data size: 2854 Basic stats: COMPLETE Column stats: NONE
+                  Join Operator
+                    condition map:
+                         Left Semi Join 0 to 1
+                    keys:
+                      0 _col0 (type: string)
+                      1 _col0 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Select Operator
+                      expressions: _col0 (type: string), _col1 (type: bigint)
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                      Select Operator
-                        expressions: _col0 (type: string), _col1 (type: bigint)
-                        outputColumnNames: _col0, _col1
+                      File Output Operator
+                        compressed: false
                         Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                        File Output Operator
-                          compressed: false
-                          Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                          table:
-                              input format: org.apache.hadoop.mapred.TextInputFormat
-                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Mux Operator
             Statistics: Num rows: 276 Data size: 2854 Basic stats: COMPLETE Column stats: NONE
             Join Operator



Mime
View raw message