hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gunt...@apache.org
Subject svn commit: r1508202 [11/48] - in /hive/branches/tez: ./ beeline/src/java/org/apache/hive/beeline/ cli/src/java/org/apache/hadoop/hive/cli/ common/src/java/org/apache/hadoop/hive/common/metrics/ common/src/java/org/apache/hadoop/hive/conf/ common/src/t...
Date Mon, 29 Jul 2013 21:08:19 GMT
Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Mon Jul 29 21:08:03 2013
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -38,9 +37,7 @@ import org.antlr.runtime.tree.Tree;
 import org.antlr.runtime.tree.TreeWizard;
 import org.antlr.runtime.tree.TreeWizard.ContextVisitor;
 import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.ObjectPair;
@@ -51,23 +48,16 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryProperties;
 import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.ArchiveUtils;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
-import org.apache.hadoop.hive.ql.exec.ColumnStatsTask;
-import org.apache.hadoop.hive.ql.exec.ConditionalTask;
-import org.apache.hadoop.hive.ql.exec.ExecDriver;
-import org.apache.hadoop.hive.ql.exec.FetchTask;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.FunctionInfo;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.GroupByOperator;
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
-import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
-import org.apache.hadoop.hive.ql.exec.MapRedTask;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.OperatorFactory;
 import org.apache.hadoop.hive.ql.exec.RecordReader;
@@ -75,7 +65,6 @@ import org.apache.hadoop.hive.ql.exec.Re
 import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
 import org.apache.hadoop.hive.ql.exec.RowSchema;
 import org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator;
-import org.apache.hadoop.hive.ql.exec.StatsTask;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
@@ -89,13 +78,9 @@ import org.apache.hadoop.hive.ql.io.Comb
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
 import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
-import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
 import org.apache.hadoop.hive.ql.lib.Dispatcher;
 import org.apache.hadoop.hive.ql.lib.GraphWalker;
 import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
-import org.apache.hadoop.hive.ql.lib.Rule;
-import org.apache.hadoop.hive.ql.lib.RuleRegExp;
 import org.apache.hadoop.hive.ql.metadata.DummyPartition;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -104,20 +89,7 @@ import org.apache.hadoop.hive.ql.metadat
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
-import org.apache.hadoop.hive.ql.optimizer.GenMRFileSink1;
-import org.apache.hadoop.hive.ql.optimizer.GenMROperator;
-import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext;
-import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx;
-import org.apache.hadoop.hive.ql.optimizer.GenMRRedSink1;
-import org.apache.hadoop.hive.ql.optimizer.GenMRRedSink2;
-import org.apache.hadoop.hive.ql.optimizer.GenMRRedSink3;
-import org.apache.hadoop.hive.ql.optimizer.GenMRTableScan1;
-import org.apache.hadoop.hive.ql.optimizer.GenMRUnion1;
-import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils;
-import org.apache.hadoop.hive.ql.optimizer.MapJoinFactory;
 import org.apache.hadoop.hive.ql.optimizer.Optimizer;
-import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalContext;
-import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalOptimizer;
 import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.tableSpec.SpecType;
 import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderExpression;
@@ -139,8 +111,6 @@ import org.apache.hadoop.hive.ql.parse.W
 import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFunctionSpec;
 import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowSpec;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
-import org.apache.hadoop.hive.ql.plan.ColumnStatsDesc;
-import org.apache.hadoop.hive.ql.plan.ColumnStatsWork;
 import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
 import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc;
 import org.apache.hadoop.hive.ql.plan.CreateViewDesc;
@@ -154,7 +124,6 @@ import org.apache.hadoop.hive.ql.plan.Ex
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeNullDesc;
 import org.apache.hadoop.hive.ql.plan.ExtractDesc;
-import org.apache.hadoop.hive.ql.plan.FetchWork;
 import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
 import org.apache.hadoop.hive.ql.plan.FilterDesc;
 import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc;
@@ -170,8 +139,6 @@ import org.apache.hadoop.hive.ql.plan.Li
 import org.apache.hadoop.hive.ql.plan.LoadFileDesc;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
 import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
-import org.apache.hadoop.hive.ql.plan.MapredWork;
-import org.apache.hadoop.hive.ql.plan.MoveWork;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.PTFDesc;
 import org.apache.hadoop.hive.ql.plan.PTFDesc.OrderExpressionDef;
@@ -205,7 +172,6 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.mapred.InputFormat;
 
 /**
@@ -225,6 +191,7 @@ public class SemanticAnalyzer extends Ba
   private final HashMap<TableScanOperator, Table> topToTable;
   private final Map<FileSinkOperator, Table> fsopToTable;
   private final List<ReduceSinkOperator> reduceSinkOperatorsAddedByEnforceBucketingSorting;
+  private final HashMap<TableScanOperator, Map<String, String>> topToTableProps;
   private QB qb;
   private ASTNode ast;
   private int destTableId;
@@ -288,6 +255,7 @@ public class SemanticAnalyzer extends Ba
     topToTable = new HashMap<TableScanOperator, Table>();
     fsopToTable = new HashMap<FileSinkOperator, Table>();
     reduceSinkOperatorsAddedByEnforceBucketingSorting = new ArrayList<ReduceSinkOperator>();
+    topToTableProps = new HashMap<TableScanOperator, Map<String, String>>();
     destTableId = 1;
     uCtx = null;
     listMapJoinOpsNoReducer = new ArrayList<AbstractMapJoinOperator<? extends MapJoinDesc>>();
@@ -346,7 +314,7 @@ public class SemanticAnalyzer extends Ba
 
   public ParseContext getParseContext() {
     return new ParseContext(conf, qb, ast, opToPartPruner, opToPartList, topOps,
-        topSelOps, opParseCtx, joinContext, smbMapJoinContext, topToTable,
+        topSelOps, opParseCtx, joinContext, smbMapJoinContext, topToTable, topToTableProps,
         fsopToTable, loadTableWork,
         loadFileWork, ctx, idToTableNameMap, destTableId, uCtx,
         listMapJoinOpsNoReducer, groupOpToInputTables, prunedPartitions,
@@ -503,32 +471,23 @@ public class SemanticAnalyzer extends Ba
     // For each table reference get the table name
     // and the alias (if alias is not present, the table name
     // is used as an alias)
-    boolean tableSamplePresent = false;
-    boolean splitSamplePresent = false;
-
     int aliasIndex = 0;
-    if (tabref.getChildCount() == 2) {
-      // tablename tablesample
-      // OR
-      // tablename alias
-      ASTNode ct = (ASTNode) tabref.getChild(1);
+    int propsIndex = -1;
+    int tsampleIndex = -1;
+    int ssampleIndex = -1;
+    for (int index = 1; index < tabref.getChildCount(); index++) {
+      ASTNode ct = (ASTNode) tabref.getChild(index);
       if (ct.getToken().getType() == HiveParser.TOK_TABLEBUCKETSAMPLE) {
-        tableSamplePresent = true;
+        tsampleIndex = index;
       } else if (ct.getToken().getType() == HiveParser.TOK_TABLESPLITSAMPLE) {
-        splitSamplePresent = true;
+        ssampleIndex = index;
+      } else if (ct.getToken().getType() == HiveParser.TOK_TABLEPROPERTIES) {
+        propsIndex = index;
       } else {
-        aliasIndex = 1;
-      }
-    } else if (tabref.getChildCount() == 3) {
-      // table name table sample alias
-      aliasIndex = 2;
-      ASTNode ct = (ASTNode) tabref.getChild(1);
-      if (ct.getToken().getType() == HiveParser.TOK_TABLEBUCKETSAMPLE) {
-        tableSamplePresent = true;
-      } else if (ct.getToken().getType() == HiveParser.TOK_TABLESPLITSAMPLE) {
-        splitSamplePresent = true;
+        aliasIndex = index;
       }
     }
+
     ASTNode tableTree = (ASTNode) (tabref.getChild(0));
 
     String tabIdName = getUnescapedName(tableTree);
@@ -541,13 +500,19 @@ public class SemanticAnalyzer extends Ba
       alias = getUnescapedUnqualifiedTableName(tableTree);
     }
 
+    if (propsIndex >= 0) {
+      Tree propsAST = tabref.getChild(propsIndex);
+      Map<String, String> props = DDLSemanticAnalyzer.getProps((ASTNode) propsAST.getChild(0));
+      qb.setTabProps(alias, props);
+    }
+
     // If the alias is already there then we have a conflict
     if (qb.exists(alias)) {
       throw new SemanticException(ErrorMsg.AMBIGUOUS_TABLE_ALIAS.getMsg(tabref
           .getChild(aliasIndex)));
     }
-    if (tableSamplePresent) {
-      ASTNode sampleClause = (ASTNode) tabref.getChild(1);
+    if (tsampleIndex >= 0) {
+      ASTNode sampleClause = (ASTNode) tabref.getChild(tsampleIndex);
       ArrayList<ASTNode> sampleCols = new ArrayList<ASTNode>();
       if (sampleClause.getChildCount() > 2) {
         for (int i = 2; i < sampleClause.getChildCount(); i++) {
@@ -573,8 +538,8 @@ public class SemanticAnalyzer extends Ba
               .getChild(0));
         }
       }
-    } else if (splitSamplePresent) {
-      ASTNode sampleClause = (ASTNode) tabref.getChild(1);
+    } else if (ssampleIndex >= 0) {
+      ASTNode sampleClause = (ASTNode) tabref.getChild(ssampleIndex);
 
       Tree type = sampleClause.getChild(0);
       Tree numerator = sampleClause.getChild(1);
@@ -885,7 +850,7 @@ public class SemanticAnalyzer extends Ba
               ErrorMsg.ORDERBY_DISTRIBUTEBY_CONFLICT.getMsg()));
         }
         break;
-        
+
       case HiveParser.TOK_SORTBY:
      // Get the sort by aliases - these are aliased to the entries in the
         // select list
@@ -1432,6 +1397,10 @@ public class SemanticAnalyzer extends Ba
       break;
     case HiveParser.Number:
     case HiveParser.StringLiteral:
+    case HiveParser.BigintLiteral:
+    case HiveParser.SmallintLiteral:
+    case HiveParser.TinyintLiteral:
+    case HiveParser.DecimalLiteral:
     case HiveParser.TOK_STRINGLITERALSEQUENCE:
     case HiveParser.TOK_CHARSETLITERAL:
     case HiveParser.KW_TRUE:
@@ -2902,11 +2871,11 @@ public class SemanticAnalyzer extends Ba
    */
   @SuppressWarnings("nls")
   private Operator genGroupByPlanGroupByOperator(QBParseInfo parseInfo,
-      String dest, Operator reduceSinkOperatorInfo, GroupByDesc.Mode mode,
+      String dest, Operator input, ReduceSinkOperator rs, GroupByDesc.Mode mode,
       Map<String, GenericUDAFEvaluator> genericUDAFEvaluators)
       throws SemanticException {
     RowResolver groupByInputRowResolver = opParseCtx
-        .get(reduceSinkOperatorInfo).getRowResolver();
+        .get(input).getRowResolver();
     RowResolver groupByOutputRowResolver = new RowResolver();
     groupByOutputRowResolver.setIsExprResolver(true);
     ArrayList<ExprNodeDesc> groupByKeys = new ArrayList<ExprNodeDesc>();
@@ -2937,15 +2906,11 @@ public class SemanticAnalyzer extends Ba
     // get the last colName for the reduce KEY
     // it represents the column name corresponding to distinct aggr, if any
     String lastKeyColName = null;
-    List<ExprNodeDesc> reduceValues = null;
-    if (reduceSinkOperatorInfo.getConf() instanceof ReduceSinkDesc) {
-      List<String> inputKeyCols = ((ReduceSinkDesc)
-          reduceSinkOperatorInfo.getConf()).getOutputKeyColumnNames();
-      if (inputKeyCols.size() > 0) {
-        lastKeyColName = inputKeyCols.get(inputKeyCols.size() - 1);
-      }
-      reduceValues = ((ReduceSinkDesc) reduceSinkOperatorInfo.getConf()).getValueCols();
+    List<String> inputKeyCols = ((ReduceSinkDesc) rs.getConf()).getOutputKeyColumnNames();
+    if (inputKeyCols.size() > 0) {
+      lastKeyColName = inputKeyCols.get(inputKeyCols.size() - 1);
     }
+    List<ExprNodeDesc> reduceValues = ((ReduceSinkDesc) rs.getConf()).getValueCols();
     int numDistinctUDFs = 0;
     for (Map.Entry<String, ASTNode> entry : aggregationTrees.entrySet()) {
       ASTNode value = entry.getValue();
@@ -3022,7 +2987,7 @@ public class SemanticAnalyzer extends Ba
         new GroupByDesc(mode, outputColumnNames, groupByKeys, aggregations,
             false, groupByMemoryUsage, memoryThreshold, null, false, 0, numDistinctUDFs > 0),
         new RowSchema(groupByOutputRowResolver.getColumnInfos()),
-        reduceSinkOperatorInfo), groupByOutputRowResolver);
+        input), groupByOutputRowResolver);
     op.setColumnExprMap(colExprMap);
     return op;
   }
@@ -3490,7 +3455,7 @@ public class SemanticAnalyzer extends Ba
    * @throws SemanticException
    */
   @SuppressWarnings("nls")
-  private Operator genGroupByPlanReduceSinkOperator(QB qb,
+  private ReduceSinkOperator genGroupByPlanReduceSinkOperator(QB qb,
       String dest,
       Operator inputOperatorInfo,
       List<ASTNode> grpByExprs,
@@ -3531,7 +3496,8 @@ public class SemanticAnalyzer extends Ba
     }
 
     List<List<Integer>> distinctColIndices = getDistinctColIndicesForReduceSink(parseInfo, dest,
-        reduceKeys, reduceSinkInputRowResolver, reduceSinkOutputRowResolver, outputKeyColumnNames);
+        reduceKeys, reduceSinkInputRowResolver, reduceSinkOutputRowResolver, outputKeyColumnNames,
+        colExprMap);
 
     ArrayList<ExprNodeDesc> reduceValues = new ArrayList<ExprNodeDesc>();
     HashMap<String, ASTNode> aggregationTrees = parseInfo
@@ -3604,7 +3570,8 @@ public class SemanticAnalyzer extends Ba
   private List<List<Integer>> getDistinctColIndicesForReduceSink(QBParseInfo parseInfo,
       String dest,
       List<ExprNodeDesc> reduceKeys, RowResolver reduceSinkInputRowResolver,
-      RowResolver reduceSinkOutputRowResolver, List<String> outputKeyColumnNames)
+      RowResolver reduceSinkOutputRowResolver, List<String> outputKeyColumnNames,
+      Map<String, ExprNodeDesc> colExprMap)
       throws SemanticException {
 
     List<List<Integer>> distinctColIndices = new ArrayList<List<Integer>>();
@@ -3643,6 +3610,7 @@ public class SemanticAnalyzer extends Ba
           ColumnInfo colInfo = new ColumnInfo(field, expr.getTypeInfo(), null, false);
           reduceSinkOutputRowResolver.putExpression(parameter, colInfo);
           numExprs++;
+          colExprMap.put(colInfo.getInternalName(), expr);
         }
         distinctColIndices.add(distinctIndices);
       }
@@ -3680,7 +3648,7 @@ public class SemanticAnalyzer extends Ba
   }
 
   @SuppressWarnings("nls")
-  private Operator genCommonGroupByPlanReduceSinkOperator(QB qb, List<String> dests,
+  private ReduceSinkOperator genCommonGroupByPlanReduceSinkOperator(QB qb, List<String> dests,
       Operator inputOperatorInfo) throws SemanticException {
 
     RowResolver reduceSinkInputRowResolver = opParseCtx.get(inputOperatorInfo)
@@ -3704,7 +3672,8 @@ public class SemanticAnalyzer extends Ba
         colExprMap);
 
     List<List<Integer>> distinctColIndices = getDistinctColIndicesForReduceSink(parseInfo, dest,
-        reduceKeys, reduceSinkInputRowResolver, reduceSinkOutputRowResolver, outputKeyColumnNames);
+        reduceKeys, reduceSinkInputRowResolver, reduceSinkOutputRowResolver, outputKeyColumnNames,
+        colExprMap);
 
     ArrayList<ExprNodeDesc> reduceValues = new ArrayList<ExprNodeDesc>();
 
@@ -4027,7 +3996,7 @@ public class SemanticAnalyzer extends Ba
     }
 
     // ////// 1. Generate ReduceSinkOperator
-    Operator reduceSinkOperatorInfo =
+    ReduceSinkOperator reduceSinkOperatorInfo =
         genGroupByPlanReduceSinkOperator(qb,
             dest,
             input,
@@ -4040,7 +4009,7 @@ public class SemanticAnalyzer extends Ba
 
     // ////// 2. Generate GroupbyOperator
     Operator groupByOperatorInfo = genGroupByPlanGroupByOperator(parseInfo,
-        dest, reduceSinkOperatorInfo, GroupByDesc.Mode.COMPLETE, null);
+        dest, reduceSinkOperatorInfo, reduceSinkOperatorInfo, GroupByDesc.Mode.COMPLETE, null);
 
     return groupByOperatorInfo;
   }
@@ -4110,7 +4079,8 @@ public class SemanticAnalyzer extends Ba
     Operator select = insertSelectAllPlanForGroupBy(selectInput);
 
     // Generate ReduceSinkOperator
-    Operator reduceSinkOperatorInfo = genCommonGroupByPlanReduceSinkOperator(qb, dests, select);
+    ReduceSinkOperator reduceSinkOperatorInfo =
+        genCommonGroupByPlanReduceSinkOperator(qb, dests, select);
 
     // It is assumed throughout the code that a reducer has a single child, add a
     // ForwardOperator so that we can add multiple filter/group by operators as children
@@ -4130,7 +4100,7 @@ public class SemanticAnalyzer extends Ba
 
       // Generate GroupbyOperator
       Operator groupByOperatorInfo = genGroupByPlanGroupByOperator(parseInfo,
-          dest, curr, GroupByDesc.Mode.COMPLETE, null);
+          dest, curr, reduceSinkOperatorInfo, GroupByDesc.Mode.COMPLETE, null);
 
       curr = genPostGroupByBodyPlan(groupByOperatorInfo, dest, qb);
     }
@@ -4272,7 +4242,7 @@ public class SemanticAnalyzer extends Ba
     // DISTINCT
     // operator. We set the numPartitionColumns to -1 for this purpose. This is
     // captured by WritableComparableHiveObject.hashCode() function.
-    Operator reduceSinkOperatorInfo =
+    ReduceSinkOperator reduceSinkOperatorInfo =
         genGroupByPlanReduceSinkOperator(qb,
             dest,
             input,
@@ -4287,7 +4257,7 @@ public class SemanticAnalyzer extends Ba
     Map<String, GenericUDAFEvaluator> genericUDAFEvaluators =
         new LinkedHashMap<String, GenericUDAFEvaluator>();
     GroupByOperator groupByOperatorInfo = (GroupByOperator) genGroupByPlanGroupByOperator(
-        parseInfo, dest, reduceSinkOperatorInfo, GroupByDesc.Mode.PARTIAL1,
+        parseInfo, dest, reduceSinkOperatorInfo, reduceSinkOperatorInfo, GroupByDesc.Mode.PARTIAL1,
         genericUDAFEvaluators);
 
     int numReducers = -1;
@@ -6983,6 +6953,7 @@ public class SemanticAnalyzer extends Ba
               reduceValues.size() - 1).getTypeInfo(), "", false);
           reduceSinkOutputRowResolver.putExpression(grpbyExpr, colInfo);
           outputColumnNames.add(getColumnInternalName(reduceValues.size() - 1));
+          colExprMap.put(colInfo.getInternalName(), grpByExprNode);
         }
       }
 
@@ -7762,6 +7733,10 @@ public class SemanticAnalyzer extends Ba
 
       // Add a mapping from the table scan operator to Table
       topToTable.put((TableScanOperator) top, tab);
+      Map<String, String> props = qb.getTabPropsForAlias(alias);
+      if (props != null) {
+        topToTableProps.put((TableScanOperator) top, props);
+      }
     } else {
       rwsch = opParseCtx.get(top).getRowResolver();
       top.setChildOperators(null);
@@ -8223,434 +8198,6 @@ public class SemanticAnalyzer extends Ba
     }
   }
 
-  /**
-   * A helper function to generate a column stats task on top of map-red task. The column stats
-   * task fetches from the output of the map-red task, constructs the column stats object and
-   * persists it to the metastore.
-   *
-   * This method generates a plan with a column stats task on top of map-red task and sets up the
-   * appropriate metadata to be used during execution.
-   *
-   * @param qb
-   */
-  private void genColumnStatsTask(QB qb) {
-    QBParseInfo qbParseInfo = qb.getParseInfo();
-    ColumnStatsTask cStatsTask = null;
-    ColumnStatsWork cStatsWork = null;
-    FetchWork fetch = null;
-    String tableName = qbParseInfo.getTableName();
-    String partName = qbParseInfo.getPartName();
-    List<String> colName = qbParseInfo.getColName();
-    List<String> colType = qbParseInfo.getColType();
-    boolean isTblLevel = qbParseInfo.isTblLvl();
-
-    String cols = loadFileWork.get(0).getColumns();
-    String colTypes = loadFileWork.get(0).getColumnTypes();
-
-    String resFileFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYRESULTFILEFORMAT);
-    TableDesc resultTab = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, resFileFormat);
-
-    fetch = new FetchWork(new Path(loadFileWork.get(0).getSourceDir()).toString(),
-        resultTab, qb.getParseInfo().getOuterQueryLimit());
-
-    ColumnStatsDesc cStatsDesc = new ColumnStatsDesc(tableName, partName,
-        colName, colType, isTblLevel);
-    cStatsWork = new ColumnStatsWork(fetch, cStatsDesc);
-    cStatsTask = (ColumnStatsTask) TaskFactory.get(cStatsWork, conf);
-    rootTasks.add(cStatsTask);
-  }
-
-  @SuppressWarnings("nls")
-  private void genMapRedTasks(ParseContext pCtx) throws SemanticException {
-    boolean isCStats = qb.isAnalyzeRewrite();
-
-    if (pCtx.getFetchTask() != null) {
-      // replaced by single fetch task
-      initParseCtx(pCtx);
-      return;
-    }
-
-    initParseCtx(pCtx);
-    List<Task<MoveWork>> mvTask = new ArrayList<Task<MoveWork>>();
-
-    /*
-     * In case of a select, use a fetch task instead of a move task.
-     * If the select is from analyze table column rewrite, don't create a fetch task. Instead create
-     * a column stats task later.
-     */
-    if (qb.getIsQuery() && !isCStats) {
-      if ((!loadTableWork.isEmpty()) || (loadFileWork.size() != 1)) {
-        throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg());
-      }
-      String cols = loadFileWork.get(0).getColumns();
-      String colTypes = loadFileWork.get(0).getColumnTypes();
-
-      String resFileFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYRESULTFILEFORMAT);
-      TableDesc resultTab = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, resFileFormat);
-
-      FetchWork fetch = new FetchWork(new Path(loadFileWork.get(0).getSourceDir()).toString(),
-          resultTab, qb.getParseInfo().getOuterQueryLimit());
-
-      FetchTask fetchTask = (FetchTask) TaskFactory.get(fetch, conf);
-      setFetchTask(fetchTask);
-
-      // For the FetchTask, the limit optimiztion requires we fetch all the rows
-      // in memory and count how many rows we get. It's not practical if the
-      // limit factor is too big
-      int fetchLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVELIMITOPTMAXFETCH);
-      if (globalLimitCtx.isEnable() && globalLimitCtx.getGlobalLimit() > fetchLimit) {
-        LOG.info("For FetchTask, LIMIT " + globalLimitCtx.getGlobalLimit() + " > " + fetchLimit
-            + ". Doesn't qualify limit optimiztion.");
-        globalLimitCtx.disableOpt();
-      }
-    } else if (!isCStats) {
-      for (LoadTableDesc ltd : loadTableWork) {
-        Task<MoveWork> tsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false),
-            conf);
-        mvTask.add(tsk);
-        // Check to see if we are stale'ing any indexes and auto-update them if we want
-        if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEINDEXAUTOUPDATE)) {
-          IndexUpdater indexUpdater = new IndexUpdater(loadTableWork, getInputs(), conf);
-          try {
-            List<Task<? extends Serializable>> indexUpdateTasks = indexUpdater
-                .generateUpdateTasks();
-            for (Task<? extends Serializable> updateTask : indexUpdateTasks) {
-              tsk.addDependentTask(updateTask);
-            }
-          } catch (HiveException e) {
-            console
-                .printInfo("WARNING: could not auto-update stale indexes, which are not in sync");
-          }
-        }
-      }
-
-      boolean oneLoadFile = true;
-      for (LoadFileDesc lfd : loadFileWork) {
-        if (qb.isCTAS()) {
-          assert (oneLoadFile); // should not have more than 1 load file for
-          // CTAS
-          // make the movetask's destination directory the table's destination.
-          String location = qb.getTableDesc().getLocation();
-          if (location == null) {
-            // get the table's default location
-            Table dumpTable;
-            Path targetPath;
-            try {
-              dumpTable = db.newTable(qb.getTableDesc().getTableName());
-              if (!db.databaseExists(dumpTable.getDbName())) {
-                throw new SemanticException("ERROR: The database " + dumpTable.getDbName()
-                    + " does not exist.");
-              }
-              Warehouse wh = new Warehouse(conf);
-              targetPath = wh.getTablePath(db.getDatabase(dumpTable.getDbName()), dumpTable
-                  .getTableName());
-            } catch (HiveException e) {
-              throw new SemanticException(e);
-            } catch (MetaException e) {
-              throw new SemanticException(e);
-            }
-
-            location = targetPath.toString();
-          }
-          lfd.setTargetDir(location);
-
-          oneLoadFile = false;
-        }
-        mvTask.add(TaskFactory.get(new MoveWork(null, null, null, lfd, false),
-            conf));
-      }
-    }
-
-    // generate map reduce plans
-    ParseContext tempParseContext = getParseContext();
-    GenMRProcContext procCtx = new GenMRProcContext(
-        conf,
-        new HashMap<Operator<? extends OperatorDesc>, Task<? extends Serializable>>(),
-        new ArrayList<Operator<? extends OperatorDesc>>(), tempParseContext,
-        mvTask, rootTasks,
-        new LinkedHashMap<Operator<? extends OperatorDesc>, GenMapRedCtx>(),
-        inputs, outputs);
-
-    // create a walker which walks the tree in a DFS manner while maintaining
-    // the operator stack.
-    // The dispatcher generates the plan from the operator tree
-    Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
-    opRules.put(new RuleRegExp(new String("R1"),
-        TableScanOperator.getOperatorName() + "%"),
-        new GenMRTableScan1());
-    opRules.put(new RuleRegExp(new String("R2"),
-        TableScanOperator.getOperatorName() + "%.*" + ReduceSinkOperator.getOperatorName() + "%"),
-        new GenMRRedSink1());
-    opRules.put(new RuleRegExp(new String("R3"),
-        ReduceSinkOperator.getOperatorName() + "%.*" + ReduceSinkOperator.getOperatorName() + "%"),
-        new GenMRRedSink2());
-    opRules.put(new RuleRegExp(new String("R4"),
-        FileSinkOperator.getOperatorName() + "%"),
-        new GenMRFileSink1());
-    opRules.put(new RuleRegExp(new String("R5"),
-        UnionOperator.getOperatorName() + "%"),
-        new GenMRUnion1());
-    opRules.put(new RuleRegExp(new String("R6"),
-        UnionOperator.getOperatorName() + "%.*" + ReduceSinkOperator.getOperatorName() + "%"),
-        new GenMRRedSink3());
-    opRules.put(new RuleRegExp(new String("R7"),
-        MapJoinOperator.getOperatorName() + "%"),
-        MapJoinFactory.getTableScanMapJoin());
-
-    // The dispatcher fires the processor corresponding to the closest matching
-    // rule and passes the context along
-    Dispatcher disp = new DefaultRuleDispatcher(new GenMROperator(), opRules,
-        procCtx);
-
-    GraphWalker ogw = new GenMapRedWalker(disp);
-    ArrayList<Node> topNodes = new ArrayList<Node>();
-    topNodes.addAll(topOps.values());
-    ogw.startWalking(topNodes, null);
-
-    /*
-     * If the query was the result of analyze table column compute statistics rewrite, create
-     * a column stats task instead of a fetch task to persist stats to the metastore.
-     */
-    if (isCStats) {
-      genColumnStatsTask(qb);
-    }
-
-    // reduce sink does not have any kids - since the plan by now has been
-    // broken up into multiple
-    // tasks, iterate over all tasks.
-    // For each task, go over all operators recursively
-    for (Task<? extends Serializable> rootTask : rootTasks) {
-      breakTaskTree(rootTask);
-    }
-
-    // For each task, set the key descriptor for the reducer
-    for (Task<? extends Serializable> rootTask : rootTasks) {
-      GenMapRedUtils.setKeyAndValueDescForTaskTree(rootTask);
-    }
-
-    // If a task contains an operator which instructs bucketizedhiveinputformat
-    // to be used, please do so
-    for (Task<? extends Serializable> rootTask : rootTasks) {
-      setInputFormat(rootTask);
-    }
-
-    PhysicalContext physicalContext = new PhysicalContext(conf,
-        getParseContext(), ctx, rootTasks, fetchTask);
-    PhysicalOptimizer physicalOptimizer = new PhysicalOptimizer(
-        physicalContext, conf);
-    physicalOptimizer.optimize();
-
-    // For each operator, generate the counters if needed
-    if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEJOBPROGRESS)) {
-      for (Task<? extends Serializable> rootTask : rootTasks) {
-        generateCountersTask(rootTask);
-      }
-    }
-
-    decideExecMode(rootTasks, ctx, globalLimitCtx);
-
-    if (qb.isCTAS()) {
-      // generate a DDL task and make it a dependent task of the leaf
-      CreateTableDesc crtTblDesc = qb.getTableDesc();
-
-      crtTblDesc.validate();
-
-      // Clear the output for CTAS since we don't need the output from the
-      // mapredWork, the
-      // DDLWork at the tail of the chain will have the output
-      getOutputs().clear();
-
-      Task<? extends Serializable> crtTblTask = TaskFactory.get(new DDLWork(
-          getInputs(), getOutputs(), crtTblDesc), conf);
-
-      // find all leaf tasks and make the DDLTask as a dependent task of all of
-      // them
-      HashSet<Task<? extends Serializable>> leaves = new HashSet<Task<? extends Serializable>>();
-      getLeafTasks(rootTasks, leaves);
-      assert (leaves.size() > 0);
-      for (Task<? extends Serializable> task : leaves) {
-        if (task instanceof StatsTask) {
-          // StatsTask require table to already exist
-          for (Task<? extends Serializable> parentOfStatsTask : task.getParentTasks()) {
-            parentOfStatsTask.addDependentTask(crtTblTask);
-          }
-          for (Task<? extends Serializable> parentOfCrtTblTask : crtTblTask.getParentTasks()) {
-            parentOfCrtTblTask.removeDependentTask(task);
-          }
-          crtTblTask.addDependentTask(task);
-        } else {
-          task.addDependentTask(crtTblTask);
-        }
-      }
-    }
-
-    if (globalLimitCtx.isEnable() && fetchTask != null) {
-      int fetchLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVELIMITOPTMAXFETCH);
-      LOG.info("set least row check for FetchTask: " + globalLimitCtx.getGlobalLimit());
-      fetchTask.getWork().setLeastNumRows(globalLimitCtx.getGlobalLimit());
-    }
-
-    if (globalLimitCtx.isEnable() && globalLimitCtx.getLastReduceLimitDesc() != null) {
-      LOG.info("set least row check for LimitDesc: " + globalLimitCtx.getGlobalLimit());
-      globalLimitCtx.getLastReduceLimitDesc().setLeastRows(globalLimitCtx.getGlobalLimit());
-      List<ExecDriver> mrTasks = Utilities.getMRTasks(rootTasks);
-      for (ExecDriver tsk : mrTasks) {
-        tsk.setRetryCmdWhenFail(true);
-      }
-    }
-  }
-
-  /**
-   * Find all leaf tasks of the list of root tasks.
-   */
-  private void getLeafTasks(List<Task<? extends Serializable>> rootTasks,
-      HashSet<Task<? extends Serializable>> leaves) {
-
-    for (Task<? extends Serializable> root : rootTasks) {
-      getLeafTasks(root, leaves);
-    }
-  }
-
-  private void getLeafTasks(Task<? extends Serializable> task,
-      HashSet<Task<? extends Serializable>> leaves) {
-    if (task.getDependentTasks() == null) {
-      if (!leaves.contains(task)) {
-        leaves.add(task);
-      }
-    } else {
-      getLeafTasks(task.getDependentTasks(), leaves);
-    }
-  }
-
-  // loop over all the tasks recursviely
-  private void generateCountersTask(Task<? extends Serializable> task) {
-    if (task instanceof ExecDriver) {
-      HashMap<String, Operator<? extends OperatorDesc>> opMap = ((MapredWork) task
-          .getWork()).getAliasToWork();
-      if (!opMap.isEmpty()) {
-        for (Operator<? extends OperatorDesc> op : opMap.values()) {
-          generateCountersOperator(op);
-        }
-      }
-
-      Operator<? extends OperatorDesc> reducer = ((MapredWork) task.getWork())
-          .getReducer();
-      if (reducer != null) {
-        LOG.info("Generating counters for operator " + reducer);
-        generateCountersOperator(reducer);
-      }
-    } else if (task instanceof ConditionalTask) {
-      List<Task<? extends Serializable>> listTasks = ((ConditionalTask) task)
-          .getListTasks();
-      for (Task<? extends Serializable> tsk : listTasks) {
-        generateCountersTask(tsk);
-      }
-    }
-
-    // Start the counters from scratch - a hack for hadoop 17.
-    Operator.resetLastEnumUsed();
-
-    if (task.getChildTasks() == null) {
-      return;
-    }
-
-    for (Task<? extends Serializable> childTask : task.getChildTasks()) {
-      generateCountersTask(childTask);
-    }
-  }
-
-  private void generateCountersOperator(Operator<? extends OperatorDesc> op) {
-    op.assignCounterNameToEnum();
-
-    if (op.getChildOperators() == null) {
-      return;
-    }
-
-    for (Operator<? extends OperatorDesc> child : op.getChildOperators()) {
-      generateCountersOperator(child);
-    }
-  }
-
-  // loop over all the tasks recursviely
-  private void breakTaskTree(Task<? extends Serializable> task) {
-
-    if (task instanceof ExecDriver) {
-      HashMap<String, Operator<? extends OperatorDesc>> opMap = ((MapredWork) task
-          .getWork()).getAliasToWork();
-      if (!opMap.isEmpty()) {
-        for (Operator<? extends OperatorDesc> op : opMap.values()) {
-          breakOperatorTree(op);
-        }
-      }
-    } else if (task instanceof ConditionalTask) {
-      List<Task<? extends Serializable>> listTasks = ((ConditionalTask) task)
-          .getListTasks();
-      for (Task<? extends Serializable> tsk : listTasks) {
-        breakTaskTree(tsk);
-      }
-    }
-
-    if (task.getChildTasks() == null) {
-      return;
-    }
-
-    for (Task<? extends Serializable> childTask : task.getChildTasks()) {
-      breakTaskTree(childTask);
-    }
-  }
-
-  // loop over all the operators recursviely
-  private void breakOperatorTree(Operator<? extends OperatorDesc> topOp) {
-    if (topOp instanceof ReduceSinkOperator) {
-      topOp.setChildOperators(null);
-    }
-
-    if (topOp.getChildOperators() == null) {
-      return;
-    }
-
-    for (Operator<? extends OperatorDesc> op : topOp.getChildOperators()) {
-      breakOperatorTree(op);
-    }
-  }
-
-  private void setInputFormat(MapredWork work, Operator<? extends OperatorDesc> op) {
-    if (op.isUseBucketizedHiveInputFormat()) {
-      work.setUseBucketizedHiveInputFormat(true);
-      return;
-    }
-
-    if (op.getChildOperators() != null) {
-      for (Operator<? extends OperatorDesc> childOp : op.getChildOperators()) {
-        setInputFormat(work, childOp);
-      }
-    }
-  }
-
-  // loop over all the tasks recursviely
-  private void setInputFormat(Task<? extends Serializable> task) {
-    if (task instanceof ExecDriver) {
-      MapredWork work = (MapredWork) task.getWork();
-      HashMap<String, Operator<? extends OperatorDesc>> opMap = work.getAliasToWork();
-      if (!opMap.isEmpty()) {
-        for (Operator<? extends OperatorDesc> op : opMap.values()) {
-          setInputFormat(work, op);
-        }
-      }
-    } else if (task instanceof ConditionalTask) {
-      List<Task<? extends Serializable>> listTasks = ((ConditionalTask) task).getListTasks();
-      for (Task<? extends Serializable> tsk : listTasks) {
-        setInputFormat(tsk);
-      }
-    }
-
-    if (task.getChildTasks() != null) {
-      for (Task<? extends Serializable> childTask : task.getChildTasks()) {
-        setInputFormat(childTask);
-      }
-    }
-  }
-
   @SuppressWarnings("nls")
   public Phase1Ctx initPhase1Ctx() {
 
@@ -8745,7 +8292,7 @@ public class SemanticAnalyzer extends Ba
 
     ParseContext pCtx = new ParseContext(conf, qb, child, opToPartPruner,
         opToPartList, topOps, topSelOps, opParseCtx, joinContext, smbMapJoinContext,
-        topToTable, fsopToTable,
+        topToTable, topToTableProps, fsopToTable,
         loadTableWork, loadFileWork, ctx, idToTableNameMap, destTableId, uCtx,
         listMapJoinOpsNoReducer, groupOpToInputTables, prunedPartitions,
         opToSamplePruner, globalLimitCtx, nameToSplitSample, inputs, rootTasks,
@@ -8778,9 +8325,14 @@ public class SemanticAnalyzer extends Ba
       setColumnAccessInfo(columnAccessAnalyzer.analyzeColumnAccess());
     }
 
-    // At this point we have the complete operator tree
-    // from which we want to find the reduce operator
-    genMapRedTasks(pCtx);
+    if (!ctx.getExplainLogical()) {
+      // At this point we have the complete operator tree
+      // from which we want to create the map-reduce plan
+      MapReduceCompiler compiler = new MapReduceCompiler();
+      compiler.init(conf, console, db);
+      compiler.compile(pCtx, rootTasks, inputs, outputs);
+      fetchTask = pCtx.getFetchTask();
+    }
 
     LOG.info("Completed plan generation");
 
@@ -9568,98 +9120,6 @@ public class SemanticAnalyzer extends Ba
     }
   }
 
-  private void decideExecMode(List<Task<? extends Serializable>> rootTasks, Context ctx,
-      GlobalLimitCtx globalLimitCtx)
-      throws SemanticException {
-
-    // bypass for explain queries for now
-    if (ctx.getExplain()) {
-      return;
-    }
-
-    // user has told us to run in local mode or doesn't want auto-local mode
-    if (ctx.isLocalOnlyExecutionMode() ||
-        !conf.getBoolVar(HiveConf.ConfVars.LOCALMODEAUTO)) {
-      return;
-    }
-
-    final Context lCtx = ctx;
-    PathFilter p = new PathFilter() {
-      public boolean accept(Path file) {
-        return !lCtx.isMRTmpFileURI(file.toUri().getPath());
-      }
-    };
-    List<ExecDriver> mrtasks = Utilities.getMRTasks(rootTasks);
-
-    // map-reduce jobs will be run locally based on data size
-    // first find out if any of the jobs needs to run non-locally
-    boolean hasNonLocalJob = false;
-    for (ExecDriver mrtask : mrtasks) {
-      try {
-        ContentSummary inputSummary = Utilities.getInputSummary
-            (ctx, (MapredWork) mrtask.getWork(), p);
-        int numReducers = getNumberOfReducers(mrtask.getWork(), conf);
-
-        long estimatedInput;
-
-        if (globalLimitCtx != null && globalLimitCtx.isEnable()) {
-          // If the global limit optimization is triggered, we will
-          // estimate input data actually needed based on limit rows.
-          // estimated Input = (num_limit * max_size_per_row) * (estimated_map + 2)
-          //
-          long sizePerRow = HiveConf.getLongVar(conf,
-              HiveConf.ConfVars.HIVELIMITMAXROWSIZE);
-          estimatedInput = globalLimitCtx.getGlobalLimit() * sizePerRow;
-          long minSplitSize = HiveConf.getLongVar(conf,
-              HiveConf.ConfVars.MAPREDMINSPLITSIZE);
-          long estimatedNumMap = inputSummary.getLength() / minSplitSize + 1;
-          estimatedInput = estimatedInput * (estimatedNumMap + 1);
-        } else {
-          estimatedInput = inputSummary.getLength();
-        }
-
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Task: " + mrtask.getId() + ", Summary: " +
-              inputSummary.getLength() + "," + inputSummary.getFileCount() + ","
-              + numReducers + ", estimated Input: " + estimatedInput);
-        }
-
-        if (MapRedTask.isEligibleForLocalMode(conf, numReducers,
-            estimatedInput, inputSummary.getFileCount()) != null) {
-          hasNonLocalJob = true;
-          break;
-        } else {
-          mrtask.setLocalMode(true);
-        }
-      } catch (IOException e) {
-        throw new SemanticException(e);
-      }
-    }
-
-    if (!hasNonLocalJob) {
-      // Entire query can be run locally.
-      // Save the current tracker value and restore it when done.
-      ctx.setOriginalTracker(ShimLoader.getHadoopShims().getJobLauncherRpcAddress(conf));
-      ShimLoader.getHadoopShims().setJobLauncherRpcAddress(conf, "local");
-      console.printInfo("Automatically selecting local only mode for query");
-    }
-  }
-
-  /**
-   * Make a best guess at trying to find the number of reducers
-   */
-  private static int getNumberOfReducers(MapredWork mrwork, HiveConf conf) {
-    if (mrwork.getReducer() == null) {
-      return 0;
-    }
-
-    if (mrwork.getNumReduceTasks() >= 0) {
-      return mrwork.getNumReduceTasks();
-    }
-
-    return conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS);
-  }
-
   // Process the position alias in GROUPBY and ORDERBY
   private void processPositionAlias(ASTNode ast) throws SemanticException {
     if (HiveConf.getBoolVar(conf,

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java Mon Jul 29 21:08:03 2013
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
+import java.sql.Date;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -152,7 +153,8 @@ public final class TypeCheckProcFactory 
         getStrExprProcessor());
     opRules.put(new RuleRegExp("R4", HiveParser.KW_TRUE + "%|"
         + HiveParser.KW_FALSE + "%"), getBoolExprProcessor());
-    opRules.put(new RuleRegExp("R5", HiveParser.TOK_TABLE_OR_COL + "%"),
+    opRules.put(new RuleRegExp("R5", HiveParser.TOK_DATELITERAL + "%"), getDateExprProcessor());
+    opRules.put(new RuleRegExp("R6", HiveParser.TOK_TABLE_OR_COL + "%"),
         getColumnExprProcessor());
 
     // The dispatcher fires the processor corresponding to the closest matching
@@ -388,6 +390,51 @@ public final class TypeCheckProcFactory 
   }
 
   /**
+   * Processor for date constants.
+   */
+  public static class DateExprProcessor implements NodeProcessor {
+
+    @Override
+    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
+        Object... nodeOutputs) throws SemanticException {
+
+      TypeCheckCtx ctx = (TypeCheckCtx) procCtx;
+      if (ctx.getError() != null) {
+        return null;
+      }
+
+      ExprNodeDesc desc = TypeCheckProcFactory.processGByExpr(nd, procCtx);
+      if (desc != null) {
+        return desc;
+      }
+
+      ASTNode expr = (ASTNode) nd;
+
+      // Get the string value and convert to a Date value.
+      if (expr.getChildCount() == 1) {
+        try {
+          String dateString = BaseSemanticAnalyzer.stripQuotes(expr.getChild(0).getText());
+          Date date = Date.valueOf(dateString);
+          return new ExprNodeConstantDesc(TypeInfoFactory.dateTypeInfo, date);
+        } catch (IllegalArgumentException err) {
+          throw new SemanticException("Unable to convert date literal string to date value.", err);
+        }
+      } else {
+        throw new SemanticException("Expected date string after DATE keyword");
+      }
+    }
+  }
+
+  /**
+   * Factory method to get DateExprProcessor.
+   *
+   * @return DateExprProcessor.
+   */
+  public static DateExprProcessor getDateExprProcessor() {
+    return new DateExprProcessor();
+  }
+
+  /**
    * Processor for table columns.
    */
   public static class ColumnExprProcessor implements NodeProcessor {
@@ -519,6 +566,8 @@ public final class TypeCheckProcFactory 
           serdeConstants.STRING_TYPE_NAME);
       conversionFunctionTextHashMap.put(HiveParser.TOK_BINARY,
           serdeConstants.BINARY_TYPE_NAME);
+      conversionFunctionTextHashMap.put(HiveParser.TOK_DATE,
+          serdeConstants.DATE_TYPE_NAME);
       conversionFunctionTextHashMap.put(HiveParser.TOK_TIMESTAMP,
           serdeConstants.TIMESTAMP_TYPE_NAME);
       conversionFunctionTextHashMap.put(HiveParser.TOK_DECIMAL,
@@ -764,10 +813,9 @@ public final class TypeCheckProcFactory 
 
           if (inferTypes.contains(constType) && inferTypes.contains(columnType)
               && !columnType.equalsIgnoreCase(constType)) {
-            String constValue =
-                ((ExprNodeConstantDesc) children.get(constIdx)).getValue().toString();
+            Object originalValue =  ((ExprNodeConstantDesc) children.get(constIdx)).getValue();
+            String constValue = originalValue.toString();
             boolean triedDouble = false;
-
             Number value = null;
             try {
               if (columnType.equalsIgnoreCase(serdeConstants.TINYINT_TYPE_NAME)) {
@@ -780,12 +828,17 @@ public final class TypeCheckProcFactory 
                 value = new Long(constValue);
               } else if (columnType.equalsIgnoreCase(serdeConstants.FLOAT_TYPE_NAME)) {
                 value = new Float(constValue);
-              } else if (columnType.equalsIgnoreCase(serdeConstants.DOUBLE_TYPE_NAME)
-                  || (columnType.equalsIgnoreCase(serdeConstants.STRING_TYPE_NAME)
-                     && !constType.equalsIgnoreCase(serdeConstants.BIGINT_TYPE_NAME))) {
-                // no smart inference for queries like "str_col = bigint_const"
+              } else if (columnType.equalsIgnoreCase(serdeConstants.DOUBLE_TYPE_NAME)) {
                 triedDouble = true;
                 value = new Double(constValue);
+              } else if (columnType.equalsIgnoreCase(serdeConstants.STRING_TYPE_NAME)) {
+                // Don't scramble the const type information if comparing to a string column,
+                // It's not useful to do so; as of now, there is also a hack in
+                // SemanticAnalyzer#genTablePlan that causes every column to look like a string
+                // a string down here, so number type information is always lost otherwise.
+                boolean isNumber = (originalValue instanceof Number);
+                triedDouble = !isNumber;
+                value = isNumber ? (Number)originalValue : new Double(constValue);
               }
             } catch (NumberFormatException nfe) {
               // this exception suggests the precise type inference did not succeed

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java Mon Jul 29 21:08:03 2013
@@ -94,61 +94,35 @@ class UnparseTranslator {
 
     int tokenStartIndex = node.getTokenStartIndex();
     int tokenStopIndex = node.getTokenStopIndex();
-
     Translation translation = new Translation();
     translation.tokenStopIndex = tokenStopIndex;
     translation.replacementText = replacementText;
 
     // Sanity check for overlap with regions already being expanded
     assert (tokenStopIndex >= tokenStartIndex);
-    Map.Entry<Integer, Translation> existingEntry;
-    existingEntry = translations.floorEntry(tokenStartIndex);
-    boolean prefix = false;
-    if (existingEntry != null) {
-      if (existingEntry.getKey().equals(tokenStartIndex)) {
-        if (existingEntry.getValue().tokenStopIndex == tokenStopIndex) {
-          if (existingEntry.getValue().replacementText.equals(replacementText)) {
-            // exact match for existing mapping: somebody is doing something
-            // redundant, but we'll let it pass
-            return;
-          }
-        } else if (tokenStopIndex > existingEntry.getValue().tokenStopIndex) {
-          // is existing mapping a prefix for new mapping? if so, that's also
-          // redundant, but in this case we need to expand it
-          prefix = replacementText.startsWith(
-            existingEntry.getValue().replacementText);
-          assert(prefix);
-        } else {
-          // new mapping is a prefix for existing mapping:  ignore it
-          prefix = existingEntry.getValue().replacementText.startsWith(
-            replacementText);
-          assert(prefix);
-          return;
-        }
-      }
-      if (!prefix) {
-        assert (existingEntry.getValue().tokenStopIndex < tokenStartIndex);
-      }
-    }
-    if (!prefix) {
-      existingEntry = translations.ceilingEntry(tokenStartIndex);
-      if (existingEntry != null) {
-        assert (existingEntry.getKey() > tokenStopIndex);
-      }
-    }
 
-    // Is existing entry a suffix of the newer entry and a subset of it?
-    existingEntry = translations.floorEntry(tokenStopIndex);
-    if (existingEntry != null) {
-      if (existingEntry.getKey().equals(tokenStopIndex)) {
-        if (tokenStartIndex < existingEntry.getKey() &&
-            tokenStopIndex == existingEntry.getKey()) {
-          // Seems newer entry is a super-set of existing entry, remove existing entry
-          assert (replacementText.endsWith(existingEntry.getValue().replacementText));
-          translations.remove(tokenStopIndex);
-        }
+    List<Integer> subsetEntries = new ArrayList<Integer>();
+    // Is the existing entry and newer entry are subset of one another ?
+    for (Map.Entry<Integer, Translation> existingEntry :
+          translations.headMap(tokenStopIndex, true).entrySet()) {
+      // check if the new entry contains the existing
+      if (existingEntry.getValue().tokenStopIndex <= tokenStopIndex &&
+            existingEntry.getKey() >= tokenStartIndex) {
+        // Collect newer entry is if a super-set of existing entry,
+        assert (replacementText.contains(existingEntry.getValue().replacementText));
+        subsetEntries.add(existingEntry.getKey());
+        // check if the existing entry contains the new
+      } else if (existingEntry.getValue().tokenStopIndex >= tokenStopIndex &&
+            existingEntry.getKey() <= tokenStartIndex) {
+        assert (existingEntry.getValue().replacementText.contains(replacementText));
+        // we don't need to add this new entry since there's already an overlapping one
+        return;
       }
     }
+    // remove any existing entries that are contained by the new one
+    for (Integer index : subsetEntries) {
+      translations.remove(index);
+    }
 
     // It's all good: create a new entry in the map (or update existing one)
     translations.put(tokenStartIndex, translation);

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java Mon Jul 29 21:08:03 2013
@@ -144,7 +144,12 @@ public class ConditionalResolverMergeFil
 
       if (inpFs.exists(dirPath)) {
         // For each dynamic partition, check if it needs to be merged.
-        MapredWork work = (MapredWork) mrTask.getWork();
+        MapWork work;
+        if (mrTask.getWork() instanceof MapredWork) {
+          work = ((MapredWork) mrTask.getWork()).getMapWork();
+        } else {
+          work = (MapWork) mrTask.getWork();
+        }
 
         int lbLevel = (ctx.getLbCtx() == null) ? 0 : ctx.getLbCtx().calculateListBucketingLevel();
 
@@ -222,7 +227,7 @@ public class ConditionalResolverMergeFil
   private void generateActualTasks(HiveConf conf, List<Task<? extends Serializable>> resTsks,
       long trgtSize, long avgConditionSize, Task<? extends Serializable> mvTask,
       Task<? extends Serializable> mrTask, Task<? extends Serializable> mrAndMvTask, Path dirPath,
-      FileSystem inpFs, ConditionalResolverMergeFilesCtx ctx, MapredWork work, int dpLbLevel)
+      FileSystem inpFs, ConditionalResolverMergeFilesCtx ctx, MapWork work, int dpLbLevel)
       throws IOException {
     DynamicPartitionCtx dpCtx = ctx.getDPCtx();
     // get list of dynamic partitions
@@ -319,18 +324,11 @@ public class ConditionalResolverMergeFil
     return pDesc;
   }
 
-  private void setupMapRedWork(HiveConf conf, MapredWork work, long targetSize, long totalSize) {
-    if (work.getNumReduceTasks() > 0) {
-      int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS);
-      int reducers = (int) ((totalSize + targetSize - 1) / targetSize);
-      reducers = Math.max(1, reducers);
-      reducers = Math.min(maxReducers, reducers);
-      work.setNumReduceTasks(reducers);
-    }
-    work.setMaxSplitSize(targetSize);
-    work.setMinSplitSize(targetSize);
-    work.setMinSplitSizePerNode(targetSize);
-    work.setMinSplitSizePerRack(targetSize);
+  private void setupMapRedWork(HiveConf conf, MapWork mWork, long targetSize, long totalSize) {
+    mWork.setMaxSplitSize(targetSize);
+    mWork.setMinSplitSize(targetSize);
+    mWork.setMinSplitSizePerNode(targetSize);
+    mWork.setMinSplitSizePerRack(targetSize);
   }
 
   private static class AverageSize {
@@ -352,7 +350,6 @@ public class ConditionalResolverMergeFil
   }
 
   private AverageSize getAverageSize(FileSystem inpFs, Path dirPath) {
-    AverageSize dummy = new AverageSize(0, 0);
     AverageSize error = new AverageSize(-1, -1);
     try {
       FileStatus[] fStats = inpFs.listStatus(dirPath);

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/Explain.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/Explain.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/Explain.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/Explain.java Mon Jul 29 21:08:03 2013
@@ -32,4 +32,6 @@ public @interface Explain {
   boolean normalExplain() default true;
 
   boolean displayOnlyOnTrue() default false;
+
+  boolean skipHeader() default false;
 }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java Mon Jul 29 21:08:03 2013
@@ -25,6 +25,7 @@ import java.util.List;
 
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
+import org.apache.hadoop.hive.ql.parse.ParseContext;
 
 /**
  * ExplainWork.
@@ -37,20 +38,26 @@ public class ExplainWork implements Seri
   private ArrayList<Task<? extends Serializable>> rootTasks;
   private String astStringTree;
   private HashSet<ReadEntity> inputs;
+  private ParseContext pCtx;
+
   boolean extended;
   boolean formatted;
   boolean dependency;
+  boolean logical;
+
 
   public ExplainWork() {
   }
 
   public ExplainWork(String resFile,
+      ParseContext pCtx,
       List<Task<? extends Serializable>> rootTasks,
       String astStringTree,
       HashSet<ReadEntity> inputs,
       boolean extended,
       boolean formatted,
-      boolean dependency) {
+      boolean dependency,
+      boolean logical) {
     this.resFile = resFile;
     this.rootTasks = new ArrayList<Task<? extends Serializable>>(rootTasks);
     this.astStringTree = astStringTree;
@@ -58,6 +65,8 @@ public class ExplainWork implements Seri
     this.extended = extended;
     this.formatted = formatted;
     this.dependency = dependency;
+    this.logical = logical;
+    this.pCtx = pCtx;
   }
 
   public String getResFile() {
@@ -115,4 +124,21 @@ public class ExplainWork implements Seri
   public void setFormatted(boolean formatted) {
     this.formatted = formatted;
   }
+
+  public ParseContext getParseContext() {
+    return pCtx;
+  }
+
+  public void setParseContext(ParseContext pCtx) {
+    this.pCtx = pCtx;
+  }
+
+  public boolean isLogical() {
+    return logical;
+  }
+
+  public void setLogical(boolean logical) {
+    this.logical = logical;
+  }
+
 }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java Mon Jul 29 21:08:03 2013
@@ -103,7 +103,11 @@ public class ExprNodeConstantDesc extend
     if (!typeInfo.equals(dest.getTypeInfo())) {
       return false;
     }
-    if (!value.equals(dest.getValue())) {
+    if (value == null) {
+      if (dest.getValue() != null) {
+        return false;
+      }
+    } else if (!value.equals(dest.getValue())) {
       return false;
     }
 

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java Mon Jul 29 21:08:03 2013
@@ -204,7 +204,7 @@ public class ExprNodeDescUtils {
       return backtrack(column, parent, terminal);
     }
     if (source instanceof ExprNodeFieldDesc) {
-      // field epression should be resolved
+      // field expression should be resolved
       ExprNodeFieldDesc field = (ExprNodeFieldDesc) source.clone();
       field.setDesc(backtrack(field.getDesc(), current, terminal));
       return field;

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java Mon Jul 29 21:08:03 2013
@@ -20,28 +20,13 @@ package org.apache.hadoop.hive.ql.plan;
 
 import java.io.ByteArrayOutputStream;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
 import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
 
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.exec.OperatorUtils;
 import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol;
-import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.SortCol;
-import org.apache.hadoop.hive.ql.parse.OpParseContext;
-import org.apache.hadoop.hive.ql.parse.QBJoinTree;
-import org.apache.hadoop.hive.ql.parse.SplitSample;
 import org.apache.hadoop.mapred.JobConf;
 
+
 /**
  * MapredWork.
  *
@@ -49,549 +34,58 @@ import org.apache.hadoop.mapred.JobConf;
 @Explain(displayName = "Map Reduce")
 public class MapredWork extends AbstractOperatorDesc {
   private static final long serialVersionUID = 1L;
-  private String command;
-  // map side work
-  // use LinkedHashMap to make sure the iteration order is
-  // deterministic, to ease testing
-  private LinkedHashMap<String, ArrayList<String>> pathToAliases;
-
-  private LinkedHashMap<String, PartitionDesc> pathToPartitionInfo;
-
-  private LinkedHashMap<String, Operator<? extends OperatorDesc>> aliasToWork;
-
-  private LinkedHashMap<String, PartitionDesc> aliasToPartnInfo;
-
-  private HashMap<String, SplitSample> nameToSplitSample;
-
-  // map<->reduce interface
-  // schema of the map-reduce 'key' object - this is homogeneous
-  private TableDesc keyDesc;
-
-  // schema of the map-reduce 'val' object - this is heterogeneous
-  private List<TableDesc> tagToValueDesc;
-
-  private Operator<?> reducer;
-
-  private Integer numReduceTasks;
-  private Integer numMapTasks;
-  private Long maxSplitSize;
-  private Long minSplitSize;
-  private Long minSplitSizePerNode;
-  private Long minSplitSizePerRack;
-
-  private boolean needsTagging;
-  private boolean hadoopSupportsSplittable;
-
-  private MapredLocalWork mapLocalWork;
-  private String inputformat;
-  private String indexIntermediateFile;
-  private boolean gatheringStats;
-
-  private String tmpHDFSFileURI;
 
-  private LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext> opParseCtxMap;
+  private MapWork mapWork = new MapWork();
+  private ReduceWork reduceWork = null;
 
-  private QBJoinTree joinTree;
-
-  private boolean mapperCannotSpanPartns;
-
-  // used to indicate the input is sorted, and so a BinarySearchRecordReader shoudl be used
-  private boolean inputFormatSorted = false;
-
-  private transient boolean useBucketizedHiveInputFormat;
-
-  // if this is true, this means that this is the map reduce task which writes the final data,
-  // ignoring the optional merge task
-  private boolean finalMapRed = false;
-
-  // If this map reduce task has a FileSinkOperator, and bucketing/sorting metadata can be
-  // inferred about the data being written by that operator, these are mappings from the directory
-  // that operator writes into to the bucket/sort columns for that data.
-  private final Map<String, List<BucketCol>> bucketedColsByDirectory =
-      new HashMap<String, List<BucketCol>>();
-  private final Map<String, List<SortCol>> sortedColsByDirectory =
-      new HashMap<String, List<SortCol>>();
-
-  public MapredWork() {
-    aliasToPartnInfo = new LinkedHashMap<String, PartitionDesc>();
-  }
-
-  public MapredWork(
-      final String command,
-      final LinkedHashMap<String, ArrayList<String>> pathToAliases,
-      final LinkedHashMap<String, PartitionDesc> pathToPartitionInfo,
-      final LinkedHashMap<String, Operator<? extends OperatorDesc>> aliasToWork,
-      final TableDesc keyDesc, List<TableDesc> tagToValueDesc,
-      final Operator<?> reducer, final Integer numReduceTasks,
-      final MapredLocalWork mapLocalWork,
-      final boolean hadoopSupportsSplittable) {
-    this.command = command;
-    this.pathToAliases = pathToAliases;
-    this.pathToPartitionInfo = pathToPartitionInfo;
-    this.aliasToWork = aliasToWork;
-    this.keyDesc = keyDesc;
-    this.tagToValueDesc = tagToValueDesc;
-    this.reducer = reducer;
-    this.numReduceTasks = numReduceTasks;
-    this.mapLocalWork = mapLocalWork;
-    aliasToPartnInfo = new LinkedHashMap<String, PartitionDesc>();
-    this.hadoopSupportsSplittable = hadoopSupportsSplittable;
-    maxSplitSize = null;
-    minSplitSize = null;
-    minSplitSizePerNode = null;
-    minSplitSizePerRack = null;
-  }
-
-  public String getCommand() {
-    return command;
-  }
+  private boolean finalMapRed;
 
-  public void setCommand(final String command) {
-    this.command = command;
+  @Explain(skipHeader = true, displayName = "Map")
+  public MapWork getMapWork() {
+    return mapWork;
   }
 
-  @Explain(displayName = "Path -> Alias", normalExplain = false)
-  public LinkedHashMap<String, ArrayList<String>> getPathToAliases() {
-    return pathToAliases;
-  }
-
-  public void setPathToAliases(
-      final LinkedHashMap<String, ArrayList<String>> pathToAliases) {
-    this.pathToAliases = pathToAliases;
-  }
-
-  @Explain(displayName = "Truncated Path -> Alias", normalExplain = false)
-  /**
-   * This is used to display and verify output of "Path -> Alias" in test framework.
-   *
-   * {@link QTestUtil} masks "Path -> Alias" and makes verification impossible.
-   * By keeping "Path -> Alias" intact and adding a new display name which is not
-   * masked by {@link QTestUtil} by removing prefix.
-   *
-   * Notes: we would still be masking for intermediate directories.
-   *
-   * @return
-   */
-  public Map<String, ArrayList<String>> getTruncatedPathToAliases() {
-    Map<String, ArrayList<String>> trunPathToAliases = new LinkedHashMap<String,
-        ArrayList<String>>();
-    Iterator<Entry<String, ArrayList<String>>> itr = this.pathToAliases.entrySet().iterator();
-    while (itr.hasNext()) {
-      final Entry<String, ArrayList<String>> entry = itr.next();
-      String origiKey = entry.getKey();
-      String newKey = PlanUtils.removePrefixFromWarehouseConfig(origiKey);
-      ArrayList<String> value = entry.getValue();
-      trunPathToAliases.put(newKey, value);
-    }
-    return trunPathToAliases;
-  }
-
-
-
-  @Explain(displayName = "Path -> Partition", normalExplain = false)
-  public LinkedHashMap<String, PartitionDesc> getPathToPartitionInfo() {
-    return pathToPartitionInfo;
-  }
-
-  public void setPathToPartitionInfo(
-      final LinkedHashMap<String, PartitionDesc> pathToPartitionInfo) {
-    this.pathToPartitionInfo = pathToPartitionInfo;
-  }
-
-  /**
-   * @return the aliasToPartnInfo
-   */
-  public LinkedHashMap<String, PartitionDesc> getAliasToPartnInfo() {
-    return aliasToPartnInfo;
-  }
-
-  /**
-   * @param aliasToPartnInfo
-   *          the aliasToPartnInfo to set
-   */
-  public void setAliasToPartnInfo(
-      LinkedHashMap<String, PartitionDesc> aliasToPartnInfo) {
-    this.aliasToPartnInfo = aliasToPartnInfo;
-  }
-
-  @Explain(displayName = "Alias -> Map Operator Tree")
-  public LinkedHashMap<String, Operator<? extends OperatorDesc>> getAliasToWork() {
-    return aliasToWork;
-  }
-
-  public void setAliasToWork(
-      final LinkedHashMap<String, Operator<? extends OperatorDesc>> aliasToWork) {
-    this.aliasToWork = aliasToWork;
-  }
-
-  public void mergeAliasedInput(String alias, String pathDir, PartitionDesc partitionInfo) {
-    ArrayList<String> aliases = pathToAliases.get(pathDir);
-    if (aliases == null) {
-      aliases = new ArrayList<String>(Arrays.asList(alias));
-      pathToAliases.put(pathDir, aliases);
-      pathToPartitionInfo.put(pathDir, partitionInfo);
-    } else {
-      aliases.add(alias);
-    }
+  public void setMapWork(MapWork mapWork) {
+    this.mapWork = mapWork;
   }
 
-  /**
-   * @return the mapredLocalWork
-   */
-  @Explain(displayName = "Local Work")
-  public MapredLocalWork getMapLocalWork() {
-    return mapLocalWork;
+  @Explain(skipHeader = true, displayName = "Reduce")
+  public ReduceWork getReduceWork() {
+    return reduceWork;
   }
 
-  /**
-   * @param mapLocalWork
-   *          the mapredLocalWork to set
-   */
-  public void setMapLocalWork(final MapredLocalWork mapLocalWork) {
-    this.mapLocalWork = mapLocalWork;
+  public void setReduceWork(ReduceWork reduceWork) {
+    this.reduceWork = reduceWork;
   }
 
-  public TableDesc getKeyDesc() {
-    return keyDesc;
-  }
-
-  /**
-   * If the plan has a reducer and correspondingly a reduce-sink, then store the TableDesc pointing
-   * to keySerializeInfo of the ReduceSink
-   *
-   * @param keyDesc
-   */
-  public void setKeyDesc(final TableDesc keyDesc) {
-    this.keyDesc = keyDesc;
-  }
-
-  public List<TableDesc> getTagToValueDesc() {
-    return tagToValueDesc;
-  }
-
-  public void setTagToValueDesc(final List<TableDesc> tagToValueDesc) {
-    this.tagToValueDesc = tagToValueDesc;
-  }
-
-  @Explain(displayName = "Reduce Operator Tree")
-  public Operator<?> getReducer() {
-    return reducer;
-  }
-
-  @Explain(displayName = "Split Sample")
-  public HashMap<String, SplitSample> getNameToSplitSample() {
-    return nameToSplitSample;
-  }
-
-  public void setNameToSplitSample(HashMap<String, SplitSample> nameToSplitSample) {
-    this.nameToSplitSample = nameToSplitSample;
-  }
-
-  public void setReducer(final Operator<?> reducer) {
-    this.reducer = reducer;
-  }
-
-  public Integer getNumMapTasks() {
-    return numMapTasks;
-  }
-
-  public void setNumMapTasks(Integer numMapTasks) {
-    this.numMapTasks = numMapTasks;
-  }
-
-  /**
-   * If the number of reducers is -1, the runtime will automatically figure it
-   * out by input data size.
-   *
-   * The number of reducers will be a positive number only in case the target
-   * table is bucketed into N buckets (through CREATE TABLE). This feature is
-   * not supported yet, so the number of reducers will always be -1 for now.
-   */
-  public Integer getNumReduceTasks() {
-    return numReduceTasks;
-  }
-
-  public void setNumReduceTasks(final Integer numReduceTasks) {
-    this.numReduceTasks = numReduceTasks;
-  }
-
-  @Explain(displayName = "Path -> Bucketed Columns", normalExplain = false)
-  public Map<String, List<BucketCol>> getBucketedColsByDirectory() {
-    return bucketedColsByDirectory;
+  public boolean isFinalMapRed() {
+    return finalMapRed;
   }
 
-  @Explain(displayName = "Path -> Sorted Columns", normalExplain = false)
-  public Map<String, List<SortCol>> getSortedColsByDirectory() {
-    return sortedColsByDirectory;
+  public void setFinalMapRed(boolean finalMapRed) {
+    this.finalMapRed = finalMapRed;
   }
 
-  @SuppressWarnings("nls")
-  public void addMapWork(String path, String alias, Operator<?> work,
-      PartitionDesc pd) {
-    ArrayList<String> curAliases = pathToAliases.get(path);
-    if (curAliases == null) {
-      assert (pathToPartitionInfo.get(path) == null);
-      curAliases = new ArrayList<String>();
-      pathToAliases.put(path, curAliases);
-      pathToPartitionInfo.put(path, pd);
-    } else {
-      assert (pathToPartitionInfo.get(path) != null);
+  public void configureJobConf(JobConf job) {
+    mapWork.configureJobConf(job);
+    if (reduceWork != null) {
+      reduceWork.configureJobConf(job);
     }
-
-    for (String oneAlias : curAliases) {
-      if (oneAlias.equals(alias)) {
-        throw new RuntimeException("Multiple aliases named: " + alias
-            + " for path: " + path);
-      }
-    }
-    curAliases.add(alias);
-
-    if (aliasToWork.get(alias) != null) {
-      throw new RuntimeException("Existing work for alias: " + alias);
-    }
-    aliasToWork.put(alias, work);
   }
 
-  @SuppressWarnings("nls")
-  public String isInvalid() {
-    if ((getNumReduceTasks() >= 1) && (getReducer() == null)) {
-      return "Reducers > 0 but no reduce operator";
-    }
-
-    if ((getNumReduceTasks() == 0) && (getReducer() != null)) {
-      return "Reducers == 0 but reduce operator specified";
+  public List<Operator<?>> getAllOperators() {
+    List<Operator<?>> ops = new ArrayList<Operator<?>>();
+    ops.addAll(mapWork.getAllOperators());
+    if (reduceWork != null) {
+      ops.addAll(reduceWork.getAllOperators());
     }
 
-    return null;
+    return ops;
   }
 
   public String toXML() {
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
-    Utilities.serializeMapRedWork(this, baos);
+    Utilities.serializeObject(this, baos);
     return (baos.toString());
   }
-
-  // non bean
-
-  /**
-   * For each map side operator - stores the alias the operator is working on
-   * behalf of in the operator runtime state. This is used by reducesink
-   * operator - but could be useful for debugging as well.
-   */
-  private void setAliases() {
-    if(aliasToWork == null) {
-      return;
-    }
-    for (String oneAlias : aliasToWork.keySet()) {
-      aliasToWork.get(oneAlias).setAlias(oneAlias);
-    }
-  }
-
-  /**
-   * Derive additional attributes to be rendered by EXPLAIN.
-   */
-  public void deriveExplainAttributes() {
-    if (pathToPartitionInfo != null) {
-      for (Map.Entry<String, PartitionDesc> entry : pathToPartitionInfo
-          .entrySet()) {
-        entry.getValue().deriveBaseFileName(entry.getKey());
-      }
-    }
-    if (mapLocalWork != null) {
-      mapLocalWork.deriveExplainAttributes();
-    }
-  }
-
-  public void initialize() {
-    setAliases();
-  }
-
-  @Explain(displayName = "Needs Tagging", normalExplain = false)
-  public boolean getNeedsTagging() {
-    return needsTagging;
-  }
-
-  public void setNeedsTagging(boolean needsTagging) {
-    this.needsTagging = needsTagging;
-  }
-
-  public boolean getHadoopSupportsSplittable() {
-    return hadoopSupportsSplittable;
-  }
-
-  public void setHadoopSupportsSplittable(boolean hadoopSupportsSplittable) {
-    this.hadoopSupportsSplittable = hadoopSupportsSplittable;
-  }
-
-  public Long getMaxSplitSize() {
-    return maxSplitSize;
-  }
-
-  public void setMaxSplitSize(Long maxSplitSize) {
-    this.maxSplitSize = maxSplitSize;
-  }
-
-  public Long getMinSplitSize() {
-    return minSplitSize;
-  }
-
-  public void setMinSplitSize(Long minSplitSize) {
-    this.minSplitSize = minSplitSize;
-  }
-
-  public Long getMinSplitSizePerNode() {
-    return minSplitSizePerNode;
-  }
-
-  public void setMinSplitSizePerNode(Long minSplitSizePerNode) {
-    this.minSplitSizePerNode = minSplitSizePerNode;
-  }
-
-  public Long getMinSplitSizePerRack() {
-    return minSplitSizePerRack;
-  }
-
-  public void setMinSplitSizePerRack(Long minSplitSizePerRack) {
-    this.minSplitSizePerRack = minSplitSizePerRack;
-  }
-
-  public String getInputformat() {
-    return inputformat;
-  }
-
-  public void setInputformat(String inputformat) {
-    this.inputformat = inputformat;
-  }
-
-  public String getIndexIntermediateFile() {
-    return indexIntermediateFile;
-  }
-
-  public void addIndexIntermediateFile(String fileName) {
-    if (this.indexIntermediateFile == null) {
-      this.indexIntermediateFile = fileName;
-    } else {
-      this.indexIntermediateFile += "," + fileName;
-    }
-  }
-
-  public void setGatheringStats(boolean gatherStats) {
-    this.gatheringStats = gatherStats;
-  }
-
-  public boolean isGatheringStats() {
-    return this.gatheringStats;
-  }
-
-  public void setMapperCannotSpanPartns(boolean mapperCannotSpanPartns) {
-    this.mapperCannotSpanPartns = mapperCannotSpanPartns;
-  }
-
-  public boolean isMapperCannotSpanPartns() {
-    return this.mapperCannotSpanPartns;
-  }
-
-  public String getTmpHDFSFileURI() {
-    return tmpHDFSFileURI;
-  }
-
-  public void setTmpHDFSFileURI(String tmpHDFSFileURI) {
-    this.tmpHDFSFileURI = tmpHDFSFileURI;
-  }
-
-
-  public QBJoinTree getJoinTree() {
-    return joinTree;
-  }
-
-  public void setJoinTree(QBJoinTree joinTree) {
-    this.joinTree = joinTree;
-  }
-
-  public
-    LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext> getOpParseCtxMap() {
-    return opParseCtxMap;
-  }
-
-  public void setOpParseCtxMap(
-    LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext> opParseCtxMap) {
-    this.opParseCtxMap = opParseCtxMap;
-  }
-
-  public boolean isInputFormatSorted() {
-    return inputFormatSorted;
-  }
-
-  public void setInputFormatSorted(boolean inputFormatSorted) {
-    this.inputFormatSorted = inputFormatSorted;
-  }
-
-  public void resolveDynamicPartitionStoredAsSubDirsMerge(HiveConf conf, Path path,
-      TableDesc tblDesc, ArrayList<String> aliases, PartitionDesc partDesc) {
-    pathToAliases.put(path.toString(), aliases);
-    pathToPartitionInfo.put(path.toString(), partDesc);
-  }
-
-  public List<Operator<?>> getAllOperators() {
-    ArrayList<Operator<?>> opList = new ArrayList<Operator<?>>();
-    ArrayList<Operator<?>> returnList = new ArrayList<Operator<?>>();
-
-    if (getReducer() != null) {
-      opList.add(getReducer());
-    }
-
-    Map<String, ArrayList<String>> pa = getPathToAliases();
-    if (pa != null) {
-      for (List<String> ls : pa.values()) {
-        for (String a : ls) {
-          Operator<?> op = getAliasToWork().get(a);
-          if (op != null ) {
-            opList.add(op);
-          }
-        }
-      }
-    }
-
-    //recursively add all children
-    while (!opList.isEmpty()) {
-      Operator<?> op = opList.remove(0);
-      if (op.getChildOperators() != null) {
-        opList.addAll(op.getChildOperators());
-      }
-      returnList.add(op);
-    }
-
-    return returnList;
-  }
-
-  public boolean isUseBucketizedHiveInputFormat() {
-    return useBucketizedHiveInputFormat;
-  }
-
-  public void setUseBucketizedHiveInputFormat(boolean useBucketizedHiveInputFormat) {
-    this.useBucketizedHiveInputFormat = useBucketizedHiveInputFormat;
-  }
-
-  public boolean isFinalMapRed() {
-    return finalMapRed;
-  }
-
-  public void setFinalMapRed(boolean finalMapRed) {
-    this.finalMapRed = finalMapRed;
-  }
-
-  public void configureJobConf(JobConf jobConf) {
-    for (PartitionDesc partition : aliasToPartnInfo.values()) {
-      PlanUtils.configureJobConf(partition.getTableDesc(), jobConf);
-    }
-    Collection<Operator<?>> mappers = aliasToWork.values();
-    for (FileSinkOperator fs : OperatorUtils.findOperators(mappers, FileSinkOperator.class)) {
-      PlanUtils.configureJobConf(fs.getConf().getTableInfo(), jobConf);
-    }
-    if (reducer != null) {
-      for (FileSinkOperator fs : OperatorUtils.findOperators(reducer, FileSinkOperator.class)) {
-        PlanUtils.configureJobConf(fs.getConf().getTableInfo(), jobConf);
-      }
-    }
-  }
 }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDeserializer.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDeserializer.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDeserializer.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDeserializer.java Mon Jul 29 21:08:03 2013
@@ -80,21 +80,19 @@ public class PTFDeserializer {
   public void initializePTFChain(PartitionedTableFunctionDef tblFnDef) throws HiveException {
     Stack<PTFInputDef> ptfChain = new Stack<PTFInputDef>();
     PTFInputDef currentDef = tblFnDef;
-    while (currentDef != null ) {
+    while (currentDef != null) {
       ptfChain.push(currentDef);
       currentDef = currentDef.getInput();
     }
 
-    while ( !ptfChain.isEmpty() ) {
+    while (!ptfChain.isEmpty()) {
       currentDef = ptfChain.pop();
-      if ( currentDef instanceof PTFQueryInputDef) {
-        initialize((PTFQueryInputDef)currentDef, inputOI);
-      }
-      else if ( currentDef instanceof WindowTableFunctionDef) {
-        initializeWindowing((WindowTableFunctionDef)currentDef);
-      }
-      else {
-        initialize((PartitionedTableFunctionDef)currentDef);
+      if (currentDef instanceof PTFQueryInputDef) {
+        initialize((PTFQueryInputDef) currentDef, inputOI);
+      } else if (currentDef instanceof WindowTableFunctionDef) {
+        initializeWindowing((WindowTableFunctionDef) currentDef);
+      } else {
+        initialize((PartitionedTableFunctionDef) currentDef);
       }
     }
   }
@@ -114,16 +112,16 @@ public class PTFDeserializer {
     /*
      * 2. initialize WFns.
      */
-    if ( def.getWindowFunctions() != null ) {
-      for(WindowFunctionDef wFnDef : def.getWindowFunctions() ) {
+    if (def.getWindowFunctions() != null) {
+      for (WindowFunctionDef wFnDef : def.getWindowFunctions()) {
 
-        if ( wFnDef.getArgs() != null ) {
-          for(PTFExpressionDef arg : wFnDef.getArgs()) {
+        if (wFnDef.getArgs() != null) {
+          for (PTFExpressionDef arg : wFnDef.getArgs()) {
             initialize(arg, inpShape);
           }
         }
 
-        if ( wFnDef.getWindowFrame() != null ) {
+        if (wFnDef.getWindowFrame() != null) {
           WindowFrameDef wFrmDef = wFnDef.getWindowFrame();
           initialize(wFrmDef.getStart(), inpShape);
           initialize(wFrmDef.getEnd(), inpShape);
@@ -132,10 +130,10 @@ public class PTFDeserializer {
       }
       ArrayList<String> aliases = new ArrayList<String>();
       ArrayList<ObjectInspector> fieldOIs = new ArrayList<ObjectInspector>();
-      for(WindowFunctionDef wFnDef : def.getWindowFunctions()) {
+      for (WindowFunctionDef wFnDef : def.getWindowFunctions()) {
         aliases.add(wFnDef.getAlias());
-        if ( wFnDef.isPivotResult() ) {
-          fieldOIs.add(((ListObjectInspector)wFnDef.getOI()).getListElementObjectInspector());
+        if (wFnDef.isPivotResult()) {
+          fieldOIs.add(((ListObjectInspector) wFnDef.getOI()).getListElementObjectInspector());
         } else {
           fieldOIs.add(wFnDef.getOI());
         }
@@ -145,8 +143,7 @@ public class PTFDeserializer {
           aliases, fieldOIs);
       tResolver.setWdwProcessingOutputOI(wdwOutOI);
       initialize(def.getOutputFromWdwFnProcessing(), wdwOutOI);
-    }
-    else {
+    } else {
       def.setOutputFromWdwFnProcessing(inpShape);
     }
 
@@ -155,8 +152,8 @@ public class PTFDeserializer {
     /*
      * 3. initialize WExprs. + having clause
      */
-    if ( def.getWindowExpressions() != null ) {
-      for(WindowExpressionDef wEDef : def.getWindowExpressions()) {
+    if (def.getWindowExpressions() != null) {
+      for (WindowExpressionDef wEDef : def.getWindowExpressions()) {
         initialize(wEDef, inpShape);
       }
     }
@@ -171,7 +168,7 @@ public class PTFDeserializer {
      * If we have windowExpressions then we convert to Std. Object to process;
      * we just stream these rows; no need to put in an output Partition.
      */
-    if ( def.getWindowExpressions().size() > 0  ) {
+    if (def.getWindowExpressions().size() > 0) {
       StructObjectInspector oi = (StructObjectInspector)
           ObjectInspectorUtils.getStandardObjectInspector(def.getOutputShape().getOI());
       def.getOutputShape().setOI(oi);
@@ -189,8 +186,8 @@ public class PTFDeserializer {
     /*
      * 1. initialize args
      */
-    if (def.getArgs() != null ) {
-      for(PTFExpressionDef arg : def.getArgs()) {
+    if (def.getArgs() != null) {
+      for (PTFExpressionDef arg : def.getArgs()) {
         initialize(arg, inpShape);
       }
     }
@@ -199,19 +196,17 @@ public class PTFDeserializer {
      * 2. setup resolve, make connections
      */
     TableFunctionEvaluator tEval = def.getTFunction();
-    //TableFunctionResolver tResolver = FunctionRegistry.getTableFunctionResolver(def.getName());
+    // TableFunctionResolver tResolver = FunctionRegistry.getTableFunctionResolver(def.getName());
     TableFunctionResolver tResolver = constructResolver(def.getResolverClassName());
     tResolver.initialize(ptfDesc, def, tEval);
 
     /*
      * 3. give Evaluator chance to setup for RawInput execution; setup RawInput shape
      */
-    if (tEval.isTransformsRawInput())
-    {
+    if (tEval.isTransformsRawInput()) {
       tResolver.initializeRawInputOI();
       initialize(def.getRawInputShape(), tEval.getRawInputOI());
-    }
-    else {
+    } else {
       def.setRawInputShape(inpShape);
     }
 
@@ -224,8 +219,7 @@ public class PTFDeserializer {
     initialize(def.getOutputShape(), tEval.getOutputOI());
   }
 
-  static void setupWdwFnEvaluator(WindowFunctionDef def) throws HiveException
-  {
+  static void setupWdwFnEvaluator(WindowFunctionDef def) throws HiveException {
     ArrayList<PTFExpressionDef> args = def.getArgs();
     ArrayList<ObjectInspector> argOIs = new ArrayList<ObjectInspector>();
     ObjectInspector[] funcArgOIs = null;
@@ -245,7 +239,7 @@ public class PTFDeserializer {
   }
 
   protected void initialize(BoundaryDef def, ShapeDetails inpShape) throws HiveException {
-    if ( def instanceof ValueBoundaryDef ) {
+    if (def instanceof ValueBoundaryDef) {
       ValueBoundaryDef vDef = (ValueBoundaryDef) def;
       initialize(vDef.getExpressionDef(), inpShape);
     }
@@ -262,8 +256,7 @@ public class PTFDeserializer {
   private ObjectInspector initExprNodeEvaluator(ExprNodeEvaluator exprEval,
       ExprNodeDesc exprNode,
       ShapeDetails inpShape)
-      throws HiveException
-  {
+      throws HiveException {
     ObjectInspector outOI;
     outOI = exprEval.initialize(inpShape.getOI());
 
@@ -274,10 +267,8 @@ public class PTFDeserializer {
      * evaluator on the LLUDF instance.
      */
     List<ExprNodeGenericFuncDesc> llFuncExprs = llInfo.getLLFuncExprsInTopExpr(exprNode);
-    if (llFuncExprs != null)
-    {
-      for (ExprNodeGenericFuncDesc llFuncExpr : llFuncExprs)
-      {
+    if (llFuncExprs != null) {
+      for (ExprNodeGenericFuncDesc llFuncExpr : llFuncExprs) {
         ExprNodeDesc firstArg = llFuncExpr.getChildren().get(0);
         ExprNodeEvaluator dupExprEval = WindowingExprNodeEvaluatorFactory.get(llInfo, firstArg);
         dupExprEval.initialize(inpShape.getOI());
@@ -302,8 +293,7 @@ public class PTFDeserializer {
       serDe.initialize(hConf, serDeProps);
       shp.setSerde(serDe);
       shp.setOI((StructObjectInspector) serDe.getObjectInspector());
-    }
-    catch (SerDeException se)
+    } catch (SerDeException se)
     {
       throw new HiveException(se);
     }
@@ -324,19 +314,18 @@ public class PTFDeserializer {
     try {
       @SuppressWarnings("unchecked")
       Class<? extends TableFunctionResolver> rCls = (Class<? extends TableFunctionResolver>)
-        Class.forName(className);
+          Class.forName(className);
       return (TableFunctionResolver) ReflectionUtils.newInstance(rCls, null);
-    }
-    catch(Exception e) {
+    } catch (Exception e) {
       throw new HiveException(e);
     }
   }
 
   @SuppressWarnings({"unchecked"})
   public static void addOIPropertiestoSerDePropsMap(StructObjectInspector OI,
-      Map<String,String> serdePropsMap) {
+      Map<String, String> serdePropsMap) {
 
-    if ( serdePropsMap == null ) {
+    if (serdePropsMap == null) {
       return;
     }
 
@@ -368,7 +357,7 @@ public class PTFDeserializer {
     ArrayList<String> fnames = t.getAllStructFieldNames();
     ArrayList<TypeInfo> fields = t.getAllStructFieldTypeInfos();
     return new ArrayList<?>[]
-    { fnames, fields };
+    {fnames, fields};
   }
 
 }



Mime
View raw message