hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From amareshw...@apache.org
Subject svn commit: r1476039 [9/22] - in /hive/branches/HIVE-4115: ./ beeline/ beeline/src/java/org/apache/hive/beeline/ bin/ builtins/ cli/ common/src/java/org/apache/hadoop/hive/conf/ conf/ data/files/ eclipse-templates/ hbase-handler/ hbase-handler/src/java...
Date Fri, 26 Apr 2013 04:59:58 GMT
Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java Fri Apr 26 04:59:50 2013
@@ -44,6 +44,7 @@ import org.apache.hadoop.hive.ql.exec.Op
 import org.apache.hadoop.hive.ql.exec.OperatorFactory;
 import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
 import org.apache.hadoop.hive.ql.exec.RowSchema;
+import org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.ScriptOperator;
 import org.apache.hadoop.hive.ql.exec.SelectOperator;
 import org.apache.hadoop.hive.ql.exec.UnionOperator;
@@ -74,6 +75,7 @@ import org.apache.hadoop.hive.ql.plan.Op
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
 import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
+import org.apache.hadoop.hive.ql.plan.SMBJoinDesc;
 import org.apache.hadoop.hive.ql.plan.SelectDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
 import org.apache.hadoop.hive.serde.serdeConstants;
@@ -88,6 +90,10 @@ import org.apache.hadoop.hive.serde2.typ
 public class MapJoinProcessor implements Transform {
 
   private static final Log LOG = LogFactory.getLog(MapJoinProcessor.class.getName());
+  // mapjoin table descriptor contains a key descriptor which needs the field schema
+  // (column type + column name). The column name is not really used anywhere, but it
+  // needs to be passed. Use the string defined below for that.
+  private static final String MAPJOINKEY_FIELDPREFIX = "mapjoinkey";
 
   private ParseContext pGraphContext;
 
@@ -107,9 +113,11 @@ public class MapJoinProcessor implements
   }
 
   /**
-   * Generate the MapRed Local Work
+   * Generate the MapRed Local Work for the given map-join operator
+   *
    * @param newWork
    * @param mapJoinOp
+   *          map-join operator for which local work needs to be generated.
    * @param bigTablePos
    * @return
    * @throws SemanticException
@@ -219,15 +227,31 @@ public class MapJoinProcessor implements
     return bigTableAlias;
   }
 
+  /**
+   * Convert the join to a map-join and also generate any local work needed.
+   *
+   * @param newWork MapredWork in which the conversion is to happen
+   * @param op
+   *          The join operator that needs to be converted to map-join
+   * @param bigTablePos
+   * @return the alias to the big table
+   * @throws SemanticException
+   */
   public static String genMapJoinOpAndLocalWork(MapredWork newWork, JoinOperator op, int mapJoinPos)
-    throws SemanticException {
-    try {
-      LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext> opParseCtxMap =
+      throws SemanticException {
+    LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext> opParseCtxMap =
         newWork.getOpParseCtxMap();
-      QBJoinTree newJoinTree = newWork.getJoinTree();
-      // generate the map join operator; already checked the map join
-      MapJoinOperator newMapJoinOp = MapJoinProcessor.convertMapJoin(opParseCtxMap, op,
-          newJoinTree, mapJoinPos, true, false);
+    QBJoinTree newJoinTree = newWork.getJoinTree();
+    // generate the map join operator; already checked the map join
+    MapJoinOperator newMapJoinOp = MapJoinProcessor.convertMapJoin(opParseCtxMap, op,
+        newJoinTree, mapJoinPos, true, false);
+    return genLocalWorkForMapJoin(newWork, newMapJoinOp, mapJoinPos);
+  }
+
+  public static String genLocalWorkForMapJoin(MapredWork newWork, MapJoinOperator newMapJoinOp,
+      int mapJoinPos)
+      throws SemanticException {
+    try {
       // generate the local work and return the big table alias
       String bigTableAlias = MapJoinProcessor
           .genMapJoinLocalWork(newWork, newMapJoinOp, mapJoinPos);
@@ -422,7 +446,7 @@ public class MapJoinProcessor implements
     }
 
     TableDesc keyTableDesc = PlanUtils.getMapJoinKeyTableDesc(PlanUtils
-        .getFieldSchemasFromColumnList(keyCols, "mapjoinkey"));
+        .getFieldSchemasFromColumnList(keyCols, MAPJOINKEY_FIELDPREFIX));
 
     List<TableDesc> valueTableDescs = new ArrayList<TableDesc>();
     List<TableDesc> valueFiltedTableDescs = new ArrayList<TableDesc>();
@@ -501,6 +525,65 @@ public class MapJoinProcessor implements
     return mapJoinOp;
   }
 
+  /**
+   * convert a sortmerge join to a a map-side join.
+   *
+   * @param opParseCtxMap
+   * @param smbJoinOp
+   *          join operator
+   * @param joinTree
+   *          qb join tree
+   * @param bigTablePos
+   *          position of the source to be read as part of map-reduce framework. All other sources
+   *          are cached in memory
+   * @param noCheckOuterJoin
+   */
+  public static MapJoinOperator convertSMBJoinToMapJoin(
+    Map<Operator<? extends OperatorDesc>, OpParseContext> opParseCtxMap,
+    SMBMapJoinOperator smbJoinOp, QBJoinTree joinTree, int bigTablePos, boolean noCheckOuterJoin)
+    throws SemanticException {
+    // Create a new map join operator
+    SMBJoinDesc smbJoinDesc = smbJoinOp.getConf();
+    List<ExprNodeDesc> keyCols = smbJoinDesc.getKeys().get(Byte.valueOf((byte) 0));
+    TableDesc keyTableDesc = PlanUtils.getMapJoinKeyTableDesc(PlanUtils
+        .getFieldSchemasFromColumnList(keyCols, MAPJOINKEY_FIELDPREFIX));
+    MapJoinDesc mapJoinDesc = new MapJoinDesc(smbJoinDesc.getKeys(),
+        keyTableDesc, smbJoinDesc.getExprs(),
+        smbJoinDesc.getValueTblDescs(), smbJoinDesc.getValueTblDescs(),
+        smbJoinDesc.getOutputColumnNames(),
+        bigTablePos, smbJoinDesc.getConds(),
+        smbJoinDesc.getFilters(), smbJoinDesc.isNoOuterJoin(), smbJoinDesc.getDumpFilePrefix());
+
+    RowResolver joinRS = opParseCtxMap.get(smbJoinOp).getRowResolver();
+    // The mapjoin has the same schema as the join operator
+    MapJoinOperator mapJoinOp = (MapJoinOperator) OperatorFactory.getAndMakeChild(
+        mapJoinDesc, joinRS.getRowSchema(),
+        new ArrayList<Operator<? extends OperatorDesc>>());
+
+    OpParseContext ctx = new OpParseContext(joinRS);
+    opParseCtxMap.put(mapJoinOp, ctx);
+
+    // change the children of the original join operator to point to the map
+    // join operator
+    List<Operator<? extends OperatorDesc>> childOps = smbJoinOp.getChildOperators();
+    for (Operator<? extends OperatorDesc> childOp : childOps) {
+      childOp.replaceParent(smbJoinOp, mapJoinOp);
+    }
+    mapJoinOp.setChildOperators(childOps);
+    smbJoinOp.setChildOperators(null);
+
+    // change the parent of the original SMBjoin operator to point to the map
+    // join operator
+    List<Operator<? extends OperatorDesc>> parentOps = smbJoinOp.getParentOperators();
+    for (Operator<? extends OperatorDesc> parentOp : parentOps) {
+      parentOp.replaceChild(smbJoinOp, mapJoinOp);
+    }
+    mapJoinOp.setParentOperators(parentOps);
+    smbJoinOp.setParentOperators(null);
+
+    return mapJoinOp;
+  }
+
   public MapJoinOperator generateMapJoinOperator(ParseContext pctx, JoinOperator op,
       QBJoinTree joinTree, int mapJoinPos) throws SemanticException {
     HiveConf hiveConf = pctx.getConf();

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SizeBasedBigTableSelectorForAutoSMJ.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SizeBasedBigTableSelectorForAutoSMJ.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SizeBasedBigTableSelectorForAutoSMJ.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SizeBasedBigTableSelectorForAutoSMJ.java Fri Apr 26 04:59:50 2013
@@ -23,6 +23,7 @@ import java.util.List;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.CommonJoinOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.metadata.Partition;
@@ -43,9 +44,10 @@ public abstract class SizeBasedBigTableS
 
     for (Operator<? extends OperatorDesc> parentOp : op.getParentOperators()) {
       if (parentOp instanceof TableScanOperator) {
-        topOps.add((TableScanOperator)parentOp);
-      }
-      else {
+        topOps.add((TableScanOperator) parentOp);
+      } else if (parentOp instanceof CommonJoinOperator) {
+        topOps.add(null);
+      } else {
         getListTopOps(parentOp, topOps);
       }
     }

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TableSizeBasedBigTableSelectorForAutoSMJ.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TableSizeBasedBigTableSelectorForAutoSMJ.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TableSizeBasedBigTableSelectorForAutoSMJ.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TableSizeBasedBigTableSelectorForAutoSMJ.java Fri Apr 26 04:59:50 2013
@@ -49,6 +49,9 @@ implements BigTableSelectorForAutoSMJ {
       getListTopOps(joinOp, topOps);
       int currentPos = 0;
       for (TableScanOperator topOp : topOps) {
+        if (topOp == null) {
+          return -1;
+        }
         Table table = parseCtx.getTopToTable().get(topOp);
         long currentSize = 0;
 

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinResolver.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinResolver.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinResolver.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinResolver.java Fri Apr 26 04:59:50 2013
@@ -17,46 +17,13 @@
  */
 package org.apache.hadoop.hive.ql.optimizer.physical;
 
-import java.io.ByteArrayInputStream;
-import java.io.InputStream;
-import java.io.Serializable;
-import java.io.UnsupportedEncodingException;
 import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
-import java.util.Map;
-import java.util.Stack;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.hive.common.ObjectPair;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.Context;
-import org.apache.hadoop.hive.ql.exec.ConditionalTask;
-import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
-import org.apache.hadoop.hive.ql.exec.JoinOperator;
-import org.apache.hadoop.hive.ql.exec.MapRedTask;
-import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.exec.TaskFactory;
-import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.lib.Dispatcher;
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.lib.TaskGraphWalker;
-import org.apache.hadoop.hive.ql.lib.TaskGraphWalker.TaskGraphWalkerContext;
-import org.apache.hadoop.hive.ql.optimizer.MapJoinProcessor;
-import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.parse.QBJoinTree;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.ConditionalResolverCommonJoin;
-import org.apache.hadoop.hive.ql.plan.ConditionalResolverCommonJoin.ConditionalResolverCommonJoinCtx;
-import org.apache.hadoop.hive.ql.plan.ConditionalWork;
-import org.apache.hadoop.hive.ql.plan.JoinDesc;
-import org.apache.hadoop.hive.ql.plan.MapredLocalWork;
-import org.apache.hadoop.hive.ql.plan.MapredWork;
-import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-import org.apache.hadoop.hive.ql.plan.PartitionDesc;
 
 /*
  * Convert tasks involving JOIN into MAPJOIN.
@@ -105,531 +72,11 @@ public class CommonJoinResolver implemen
     TaskGraphWalker ogw = new TaskGraphWalker(disp);
 
     // get all the tasks nodes from root task
-    ArrayList<Node> topNodes = new ArrayList<Node>();
+    List<Node> topNodes = new ArrayList<Node>();
     topNodes.addAll(pctx.rootTasks);
 
     // begin to walk through the task tree.
     ogw.startWalking(topNodes, null);
     return pctx;
   }
-
-  /**
-   * Iterator each tasks. If this task has a local work,create a new task for this local work, named
-   * MapredLocalTask. then make this new generated task depends on current task's parent task, and
-   * make current task depends on this new generated task
-   */
-  class CommonJoinTaskDispatcher implements Dispatcher {
-
-    HashMap<String, Long> aliasToSize = null;
-
-    private final PhysicalContext physicalContext;
-
-    public CommonJoinTaskDispatcher(PhysicalContext context) {
-      super();
-      physicalContext = context;
-    }
-
-    // Get the position of the big table for this join operator and the given alias
-    private int getPosition(MapredWork work, Operator<? extends OperatorDesc> joinOp,
-        String alias) {
-      Operator<? extends OperatorDesc> parentOp = work.getAliasToWork().get(alias);
-
-      // reduceSinkOperator's child is null, but joinOperator's parents is reduceSink
-      while ((parentOp.getChildOperators() != null) &&
-          (!parentOp.getChildOperators().isEmpty())) {
-        parentOp = parentOp.getChildOperators().get(0);
-      }
-
-      return joinOp.getParentOperators().indexOf(parentOp);
-    }
-
-    /*
-     * A task and its child task has been converted from join to mapjoin.
-     * See if the two tasks can be merged.
-     */
-    private void mergeMapJoinTaskWithChildMapJoinTask(MapRedTask task, Configuration conf) {
-      MapRedTask childTask = (MapRedTask)task.getChildTasks().get(0);
-      MapredWork work = task.getWork();
-      MapredLocalWork localWork = work.getMapLocalWork();
-      MapredWork childWork = childTask.getWork();
-      MapredLocalWork childLocalWork = childWork.getMapLocalWork();
-
-      // Can this be merged
-      Map<String, Operator<? extends OperatorDesc>> aliasToWork = work.getAliasToWork();
-      if (aliasToWork.size() > 1) {
-        return;
-      }
-
-      Operator<? extends OperatorDesc> op = aliasToWork.values().iterator().next();
-      while (op.getChildOperators() != null) {
-        // Dont perform this optimization for multi-table inserts
-        if (op.getChildOperators().size() > 1) {
-          return;
-        }
-        op = op.getChildOperators().get(0);
-      }
-
-      if (!(op instanceof FileSinkOperator)) {
-        return;
-      }
-
-      FileSinkOperator fop = (FileSinkOperator)op;
-      String workDir = fop.getConf().getDirName();
-
-      Map<String, ArrayList<String>> childPathToAliases = childWork.getPathToAliases();
-      if (childPathToAliases.size() > 1) {
-        return;
-      }
-
-      // The filesink writes to a different directory
-      if (!childPathToAliases.keySet().iterator().next().equals(workDir)) {
-        return;
-      }
-
-      // Either of them should not be bucketed
-      if ((localWork.getBucketMapjoinContext() != null) ||
-          (childLocalWork.getBucketMapjoinContext() != null)) {
-        return;
-      }
-
-      // Merge the trees
-      if (childWork.getAliasToWork().size() > 1) {
-        return;
-      }
-      long mapJoinSize = HiveConf.getLongVar(conf,
-          HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD);
-      long localTableTotalSize = 0;
-      for (String alias : localWork.getAliasToWork().keySet()) {
-        Long tabSize = aliasToSize.get(alias);
-        if (tabSize == null) {
-          /* if the size is unavailable, we need to assume a size 1 greater than mapJoinSize
-           * this implies that merge cannot happen so we can return.
-           */
-          return;
-        }
-        localTableTotalSize += tabSize;
-      }
-
-      for (String alias : childLocalWork.getAliasToWork().keySet()) {
-        Long tabSize = aliasToSize.get(alias);
-        if (tabSize == null) {
-          /* if the size is unavailable, we need to assume a size 1 greater than mapJoinSize
-           * this implies that merge cannot happen so we can return.
-           */
-          return;
-        }
-        localTableTotalSize += tabSize;
-        if (localTableTotalSize > mapJoinSize) {
-          return;
-        }
-      }
-
-      Operator<? extends Serializable> childAliasOp =
-          childWork.getAliasToWork().values().iterator().next();
-      if (fop.getParentOperators().size() > 1) {
-        return;
-      }
-
-      // Merge the 2 trees - remove the FileSinkOperator from the first tree pass it to the
-      // top of the second
-      Operator<? extends Serializable> parentFOp = fop.getParentOperators().get(0);
-      parentFOp.getChildOperators().remove(fop);
-      parentFOp.getChildOperators().add(childAliasOp);
-      List<Operator<? extends OperatorDesc>> parentOps =
-          new ArrayList<Operator<? extends OperatorDesc>>();
-      parentOps.add(parentFOp);
-      childAliasOp.setParentOperators(parentOps);
-
-      work.getAliasToPartnInfo().putAll(childWork.getAliasToPartnInfo());
-      for (Map.Entry<String, PartitionDesc> childWorkEntry :
-        childWork.getPathToPartitionInfo().entrySet()) {
-        if (childWork.getAliasToPartnInfo().containsValue(childWorkEntry.getKey())) {
-          work.getPathToPartitionInfo().put(childWorkEntry.getKey(), childWorkEntry.getValue());
-        }
-      }
-
-      localWork.getAliasToFetchWork().putAll(childLocalWork.getAliasToFetchWork());
-      localWork.getAliasToWork().putAll(childLocalWork.getAliasToWork());
-
-      // remove the child task
-      List<Task<? extends Serializable>> oldChildTasks = childTask.getChildTasks();
-      task.setChildTasks(oldChildTasks);
-      if (oldChildTasks != null) {
-        for (Task<? extends Serializable> oldChildTask : oldChildTasks) {
-          oldChildTask.getParentTasks().remove(childTask);
-          oldChildTask.getParentTasks().add(task);
-        }
-      }
-    }
-
-    // create map join task and set big table as bigTablePosition
-    private ObjectPair<MapRedTask, String> convertTaskToMapJoinTask(MapredWork newWork,
-        int bigTablePosition) throws SemanticException {
-      // create a mapred task for this work
-      MapRedTask newTask = (MapRedTask) TaskFactory.get(newWork, physicalContext
-          .getParseContext().getConf());
-      JoinOperator newJoinOp = getJoinOp(newTask);
-
-      // optimize this newWork and assume big table position is i
-      String bigTableAlias =
-          MapJoinProcessor.genMapJoinOpAndLocalWork(newWork, newJoinOp, bigTablePosition);
-      return new ObjectPair<MapRedTask, String>(newTask, bigTableAlias);
-    }
-
-    private Task<? extends Serializable> processCurrentTask(MapRedTask currTask,
-        ConditionalTask conditionalTask, Context context)
-        throws SemanticException {
-
-      // whether it contains common join op; if contains, return this common join op
-      JoinOperator joinOp = getJoinOp(currTask);
-      if (joinOp == null || joinOp.getConf().isFixedAsSorted()) {
-        return null;
-      }
-      currTask.setTaskTag(Task.COMMON_JOIN);
-
-      MapredWork currWork = currTask.getWork();
-
-      // create conditional work list and task list
-      List<Serializable> listWorks = new ArrayList<Serializable>();
-      List<Task<? extends Serializable>> listTasks = new ArrayList<Task<? extends Serializable>>();
-
-      // create alias to task mapping and alias to input file mapping for resolver
-      HashMap<String, Task<? extends Serializable>> aliasToTask = new HashMap<String, Task<? extends Serializable>>();
-      HashMap<String, ArrayList<String>> pathToAliases = currWork.getPathToAliases();
-      Map<String, Operator<? extends OperatorDesc>> aliasToWork = currWork.getAliasToWork();
-
-      // get parseCtx for this Join Operator
-      ParseContext parseCtx = physicalContext.getParseContext();
-      QBJoinTree joinTree = parseCtx.getJoinContext().get(joinOp);
-
-      // start to generate multiple map join tasks
-      JoinDesc joinDesc = joinOp.getConf();
-      Byte[] order = joinDesc.getTagOrder();
-      int numAliases = order.length;
-
-      long aliasTotalKnownInputSize = 0;
-
-      if (aliasToSize == null) {
-        aliasToSize = new HashMap<String, Long>();
-      }
-      try {
-        // go over all the input paths, and calculate a known total size, known
-        // size for each input alias.
-        Utilities.getInputSummary(context, currWork, null).getLength();
-
-        // set alias to size mapping, this can be used to determine if one table
-        // is choosen as big table, what's the total size of left tables, which
-        // are going to be small tables.
-        for (Map.Entry<String, ArrayList<String>> entry : pathToAliases.entrySet()) {
-          String path = entry.getKey();
-          List<String> aliasList = entry.getValue();
-          ContentSummary cs = context.getCS(path);
-          if (cs != null) {
-            long size = cs.getLength();
-            for (String alias : aliasList) {
-              aliasTotalKnownInputSize += size;
-              Long es = aliasToSize.get(alias);
-              if (es == null) {
-                es = new Long(0);
-              }
-              es += size;
-              aliasToSize.put(alias, es);
-            }
-          }
-        }
-
-        HashSet<Integer> bigTableCandidates = MapJoinProcessor.getBigTableCandidates(joinDesc.getConds());
-
-        // no table could be the big table; there is no need to convert
-        if (bigTableCandidates == null) {
-          return null;
-        }
-
-        Configuration conf = context.getConf();
-
-        // If sizes of atleast n-1 tables in a n-way join is known, and their sum is smaller than
-        // the threshold size, convert the join into map-join and don't create a conditional task
-        boolean convertJoinMapJoin = HiveConf.getBoolVar(conf,
-            HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASK);
-        int bigTablePosition = -1;
-        if (convertJoinMapJoin) {
-          // This is the threshold that the user has specified to fit in mapjoin
-          long mapJoinSize = HiveConf.getLongVar(conf,
-              HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD);
-
-          boolean bigTableFound = false;
-          long largestBigTableCandidateSize = 0;
-          long sumTableSizes = 0;
-          for (String alias : aliasToWork.keySet()) {
-            int tablePosition = getPosition(currWork, joinOp, alias);
-            boolean bigTableCandidate = bigTableCandidates.contains(tablePosition);
-            Long size = aliasToSize.get(alias);
-            // The size is not available at compile time if the input is a sub-query.
-            // If the size of atleast n-1 inputs for a n-way join are available at compile time,
-            // and the sum of them is less than the specified threshold, then convert the join
-            // into a map-join without the conditional task.
-            if ((size == null) || (size > mapJoinSize)) {
-              sumTableSizes += largestBigTableCandidateSize;
-              if (bigTableFound || (sumTableSizes > mapJoinSize) || !bigTableCandidate) {
-                convertJoinMapJoin = false;
-                break;
-              }
-              bigTableFound = true;
-              bigTablePosition = tablePosition;
-              largestBigTableCandidateSize = mapJoinSize + 1;
-            } else {
-              if (bigTableCandidate && size > largestBigTableCandidateSize) {
-                bigTablePosition = tablePosition;
-                sumTableSizes += largestBigTableCandidateSize;
-                largestBigTableCandidateSize = size;
-              }
-              else {
-                sumTableSizes += size;
-              }
-
-              if (sumTableSizes > mapJoinSize) {
-                convertJoinMapJoin = false;
-                break;
-              }
-            }
-          }
-        }
-
-        String bigTableAlias = null;
-        currWork.setOpParseCtxMap(parseCtx.getOpParseCtx());
-        currWork.setJoinTree(joinTree);
-
-        if (convertJoinMapJoin) {
-          // create map join task and set big table as bigTablePosition
-          MapRedTask newTask = convertTaskToMapJoinTask(currWork, bigTablePosition).getFirst();
-
-          newTask.setTaskTag(Task.MAPJOIN_ONLY_NOBACKUP);
-          replaceTask(currTask, newTask, physicalContext);
-
-          // Can this task be merged with the child task. This can happen if a big table is being
-          // joined with multiple small tables on different keys
-          // Further optimizations are possible here, a join which has been converted to a mapjoin
-          // followed by a mapjoin can be performed in a single MR job.
-          if ((newTask.getChildTasks() != null) && (newTask.getChildTasks().size() == 1)
-              && (newTask.getChildTasks().get(0).getTaskTag() == Task.MAPJOIN_ONLY_NOBACKUP)) {
-            mergeMapJoinTaskWithChildMapJoinTask(newTask, conf);
-          }
-
-          return newTask;
-        }
-
-        long ThresholdOfSmallTblSizeSum = HiveConf.getLongVar(conf,
-            HiveConf.ConfVars.HIVESMALLTABLESFILESIZE);
-        String xml = currWork.toXML();
-        for (int i = 0; i < numAliases; i++) {
-          // this table cannot be big table
-          if (!bigTableCandidates.contains(i)) {
-            continue;
-          }
-
-          // deep copy a new mapred work from xml
-          InputStream in = new ByteArrayInputStream(xml.getBytes("UTF-8"));
-          MapredWork newWork = Utilities.deserializeMapRedWork(in, physicalContext.getConf());
-
-          // create map join task and set big table as i
-          ObjectPair<MapRedTask, String> newTaskAlias = convertTaskToMapJoinTask(newWork, i);          
-          MapRedTask newTask = newTaskAlias.getFirst();
-          bigTableAlias = newTaskAlias.getSecond();
-
-          Long aliasKnownSize = aliasToSize.get(bigTableAlias);
-          if (aliasKnownSize != null && aliasKnownSize.longValue() > 0) {
-            long smallTblTotalKnownSize = aliasTotalKnownInputSize
-                - aliasKnownSize.longValue();
-            if(smallTblTotalKnownSize > ThresholdOfSmallTblSizeSum) {
-              //this table is not good to be a big table.
-              continue;
-            }
-          }
-
-          // add into conditional task
-          listWorks.add(newTask.getWork());
-          listTasks.add(newTask);
-          newTask.setTaskTag(Task.CONVERTED_MAPJOIN);
-
-          //set up backup task
-          newTask.setBackupTask(currTask);
-          newTask.setBackupChildrenTasks(currTask.getChildTasks());
-
-          // put the mapping alias to task
-          aliasToTask.put(bigTableAlias, newTask);
-        }
-      } catch (Exception e) {
-        e.printStackTrace();
-        throw new SemanticException("Generate Map Join Task Error: " + e.getMessage());
-      }
-
-      // insert current common join task to conditional task
-      listWorks.add(currTask.getWork());
-      listTasks.add(currTask);
-      // clear JoinTree and OP Parse Context
-      currWork.setOpParseCtxMap(null);
-      currWork.setJoinTree(null);
-
-      // create conditional task and insert conditional task into task tree
-      ConditionalWork cndWork = new ConditionalWork(listWorks);
-      ConditionalTask cndTsk = (ConditionalTask) TaskFactory.get(cndWork, parseCtx.getConf());
-      cndTsk.setListTasks(listTasks);
-
-      // set resolver and resolver context
-      cndTsk.setResolver(new ConditionalResolverCommonJoin());
-      ConditionalResolverCommonJoinCtx resolverCtx = new ConditionalResolverCommonJoinCtx();
-      resolverCtx.setPathToAliases(pathToAliases);
-      resolverCtx.setAliasToKnownSize(aliasToSize);
-      resolverCtx.setAliasToTask(aliasToTask);
-      resolverCtx.setCommonJoinTask(currTask);
-      resolverCtx.setLocalTmpDir(context.getLocalScratchDir(false));
-      resolverCtx.setHdfsTmpDir(context.getMRScratchDir());
-      cndTsk.setResolverCtx(resolverCtx);
-
-      //replace the current task with the new generated conditional task
-      this.replaceTaskWithConditionalTask(currTask, cndTsk, physicalContext);
-      return cndTsk;
-    }
-
-    private void replaceTaskWithConditionalTask(
-        Task<? extends Serializable> currTask, ConditionalTask cndTsk,
-        PhysicalContext physicalContext) {
-      // add this task into task tree
-      // set all parent tasks
-      List<Task<? extends Serializable>> parentTasks = currTask.getParentTasks();
-      currTask.setParentTasks(null);
-      if (parentTasks != null) {
-        for (Task<? extends Serializable> tsk : parentTasks) {
-          // make new generated task depends on all the parent tasks of current task.
-          tsk.addDependentTask(cndTsk);
-          // remove the current task from its original parent task's dependent task
-          tsk.removeDependentTask(currTask);
-        }
-      } else {
-        // remove from current root task and add conditional task to root tasks
-        physicalContext.removeFromRootTask(currTask);
-        physicalContext.addToRootTask(cndTsk);
-      }
-      // set all child tasks
-      List<Task<? extends Serializable>> oldChildTasks = currTask.getChildTasks();
-      if (oldChildTasks != null) {
-        for (Task<? extends Serializable> tsk : cndTsk.getListTasks()) {
-          if (tsk.equals(currTask)) {
-            continue;
-          }
-          for (Task<? extends Serializable> oldChild : oldChildTasks) {
-            tsk.addDependentTask(oldChild);
-          }
-        }
-      }
-    }
-
-    // Replace the task with the new task. Copy the children and parents of the old
-    // task to the new task.
-    private void replaceTask(
-        Task<? extends Serializable> currTask, Task<? extends Serializable> newTask,
-        PhysicalContext physicalContext) {
-      // add this task into task tree
-      // set all parent tasks
-      List<Task<? extends Serializable>> parentTasks = currTask.getParentTasks();
-      currTask.setParentTasks(null);
-      if (parentTasks != null) {
-        for (Task<? extends Serializable> tsk : parentTasks) {
-          // remove the current task from its original parent task's dependent task
-          tsk.removeDependentTask(currTask);
-          // make new generated task depends on all the parent tasks of current task.
-          tsk.addDependentTask(newTask);
-        }
-      } else {
-        // remove from current root task and add conditional task to root tasks
-        physicalContext.removeFromRootTask(currTask);
-        physicalContext.addToRootTask(newTask);
-      }
-
-      // set all child tasks
-      List<Task<? extends Serializable>> oldChildTasks = currTask.getChildTasks();
-      currTask.setChildTasks(null);
-      if (oldChildTasks != null) {
-        for (Task<? extends Serializable> tsk : oldChildTasks) {
-          // remove the current task from its original parent task's dependent task
-          tsk.getParentTasks().remove(currTask);
-          // make new generated task depends on all the parent tasks of current task.
-          newTask.addDependentTask(tsk);
-        }
-      }
-    }
-
-    @Override
-    public Object dispatch(Node nd, Stack<Node> stack, Object... nodeOutputs)
-        throws SemanticException {
-      if (nodeOutputs == null || nodeOutputs.length == 0) {
-        throw new SemanticException("No Dispatch Context");
-      }
-
-      TaskGraphWalkerContext walkerCtx = (TaskGraphWalkerContext) nodeOutputs[0];
-
-      Task<? extends Serializable> currTask = (Task<? extends Serializable>) nd;
-      // not map reduce task or not conditional task, just skip
-      if (currTask.isMapRedTask()) {
-        if (currTask instanceof ConditionalTask) {
-          // get the list of task
-          List<Task<? extends Serializable>> taskList = ((ConditionalTask) currTask).getListTasks();
-          for (Task<? extends Serializable> tsk : taskList) {
-            if (tsk.isMapRedTask()) {
-              Task<? extends Serializable> newTask = this.processCurrentTask((MapRedTask) tsk,
-                  ((ConditionalTask) currTask), physicalContext.getContext());
-              walkerCtx.addToDispatchList(newTask);
-            }
-          }
-        } else {
-          Task<? extends Serializable> newTask =
-              this.processCurrentTask((MapRedTask) currTask, null, physicalContext.getContext());
-          walkerCtx.addToDispatchList(newTask);
-        }
-      }
-      return null;
-    }
-
-    /*
-     * If any operator which does not allow map-side conversion is present in the mapper, dont
-     * convert it into a conditional task.
-     */
-    private boolean checkOperatorOKMapJoinConversion(Operator<? extends OperatorDesc> op) {
-      if (!op.opAllowedConvertMapJoin()) {
-        return false;
-      }
-
-      if (op.getChildOperators() == null) {
-        return true;
-      }
-
-      for (Operator<? extends OperatorDesc> childOp : op.getChildOperators()) {
-        if (!checkOperatorOKMapJoinConversion(childOp)) {
-          return false;
-        }
-      }
-
-      return true;
-    }
-
-    private JoinOperator getJoinOp(MapRedTask task) throws SemanticException {
-      MapredWork work = task.getWork();
-      if (work == null) {
-        return null;
-      }
-      Operator<? extends OperatorDesc> reducerOp = work.getReducer();
-      if (reducerOp instanceof JoinOperator) {
-        /* Is any operator present, which prevents the conversion */
-        Map<String, Operator<? extends OperatorDesc>> aliasToWork = work.getAliasToWork();
-        for (Operator<? extends OperatorDesc> op : aliasToWork.values()) {
-          if (!checkOperatorOKMapJoinConversion(op)) {
-            return null;
-          }
-        }
-        return (JoinOperator) reducerOp;
-      } else {
-        return null;
-      }
-    }
-  }
 }

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java Fri Apr 26 04:59:50 2013
@@ -51,7 +51,15 @@ public class PhysicalOptimizer {
     }
     if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN)) {
       resolvers.add(new CommonJoinResolver());
+
+      // The joins have been automatically converted to map-joins.
+      // However, if the joins were converted to sort-merge joins automatically,
+      // they should also be tried as map-joins.
+      if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_AUTO_SORTMERGE_JOIN_TOMAPJOIN)) {
+        resolvers.add(new SortMergeJoinResolver());
+      }
     }
+
     if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER)) {
       resolvers.add(new IndexWhereResolver());
     }
@@ -61,7 +69,7 @@ public class PhysicalOptimizer {
     }
 
     // Physical optimizers which follow this need to be careful not to invalidate the inferences
-    // made by this optimizer.  Only optimizers which depend on the results of this one should
+    // made by this optimizer. Only optimizers which depend on the results of this one should
     // follow it.
     if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_INFER_BUCKET_SORT)) {
       resolvers.add(new BucketingSortingInferenceOptimizer());

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcContext.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcContext.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcContext.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcContext.java Fri Apr 26 04:59:50 2013
@@ -40,7 +40,7 @@ public class UnionProcContext implements
     private final transient boolean[] mapOnlySubqSet;
     private final transient boolean[] rootTask;
 
-    private transient int numInputs;
+    private final transient int numInputs;
 
     public UnionParseContext(int numInputs) {
       this.numInputs = numInputs;
@@ -70,27 +70,22 @@ public class UnionProcContext implements
       return numInputs;
     }
 
-    public void setNumInputs(int numInputs) {
-      this.numInputs = numInputs;
-    }
-
     public boolean allMapOnlySubQ() {
-      if (mapOnlySubq != null) {
-        for (boolean mapOnly : mapOnlySubq) {
-          if (!mapOnly) {
-            return false;
-          }
-        }
-      }
-      return true;
+      return isAllTrue(mapOnlySubq);
     }
 
     public boolean allMapOnlySubQSet() {
-      if (mapOnlySubqSet != null) {
-        for (boolean mapOnlySet : mapOnlySubqSet) {
-          if (!mapOnlySet) {
-            return false;
-          }
+      return isAllTrue(mapOnlySubqSet);
+    }
+
+    public boolean allRootTasks() {
+      return isAllTrue(rootTask);
+    }
+
+    public boolean isAllTrue(boolean[] array) {
+      for (boolean value : array) {
+        if (!value) {
+          return false;
         }
       }
       return true;

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java Fri Apr 26 04:59:50 2013
@@ -138,20 +138,21 @@ public final class UnionProcFactory {
         }
         start--;
       }
+      assert parentUnionOperator != null;
 
       // default to false
       boolean mapOnly = false;
-      if (parentUnionOperator != null) {
-        UnionParseContext parentUCtx =
+      boolean rootTask = false;
+      UnionParseContext parentUCtx =
           ctx.getUnionParseContext(parentUnionOperator);
-        if (parentUCtx != null && parentUCtx.allMapOnlySubQSet()) {
-          mapOnly = parentUCtx.allMapOnlySubQ();
-        }
+      if (parentUCtx != null && parentUCtx.allMapOnlySubQSet()) {
+        mapOnly = parentUCtx.allMapOnlySubQ();
+        rootTask = parentUCtx.allRootTasks();
       }
 
       uCtx.setMapOnlySubq(pos, mapOnly);
 
-      uCtx.setRootTask(pos, false);
+      uCtx.setRootTask(pos, rootTask);
       ctx.setUnionParseContext(union, uCtx);
       return null;
     }

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Fri Apr 26 04:59:50 2013
@@ -43,6 +43,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -75,8 +76,10 @@ import org.apache.hadoop.hive.ql.plan.Ad
 import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.AlterIndexDesc;
 import org.apache.hadoop.hive.ql.plan.AlterIndexDesc.AlterIndexTypes;
+import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes;
+import org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition;
 import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc;
 import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.CreateIndexDesc;
@@ -126,7 +129,6 @@ import org.apache.hadoop.hive.serde.serd
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.TextInputFormat;
-import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc;
 
 /**
  * DDLSemanticAnalyzer.
@@ -405,6 +407,9 @@ public class DDLSemanticAnalyzer extends
     case HiveParser.TOK_ALTERTABLE_SKEWED:
       analyzeAltertableSkewedby(ast);
       break;
+   case HiveParser.TOK_EXCHANGEPARTITION:
+      analyzeExchangePartition(ast);
+      break;
     default:
       throw new SemanticException("Unsupported command.");
     }
@@ -663,6 +668,69 @@ public class DDLSemanticAnalyzer extends
 
   }
 
+  private void analyzeExchangePartition(ASTNode ast) throws SemanticException {
+    Table sourceTable =  getTable(getUnescapedName((ASTNode)ast.getChild(0)));
+    Table destTable = getTable(getUnescapedName((ASTNode)ast.getChild(2)));
+
+    // Get the partition specs
+    Map<String, String> partSpecs = getPartSpec((ASTNode) ast.getChild(1));
+    validatePartitionValues(partSpecs);
+    boolean sameColumns = MetaStoreUtils.compareFieldColumns(
+        sourceTable.getAllCols(), destTable.getAllCols());
+    boolean samePartitions = MetaStoreUtils.compareFieldColumns(
+        sourceTable.getPartitionKeys(), destTable.getPartitionKeys());
+    if (!sameColumns || !samePartitions) {
+      throw new SemanticException(ErrorMsg.TABLES_INCOMPATIBLE_SCHEMAS.getMsg());
+    }
+    List<Partition> partitions = getPartitions(sourceTable, partSpecs, true);
+
+    // Verify that the partitions specified are continuous
+    // If a subpartition value is specified without specifying a partition's value
+    // then we throw an exception
+    if (!isPartitionValueContinuous(sourceTable.getPartitionKeys(), partSpecs)) {
+      throw new SemanticException(
+          ErrorMsg.PARTITION_VALUE_NOT_CONTINUOUS.getMsg(partSpecs.toString()));
+    }
+    List<Partition> destPartitions = null;
+    try {
+      destPartitions = getPartitions(destTable, partSpecs, true);
+    } catch (SemanticException ex) {
+      // We should expect a semantic exception being throw as this partition
+      // should not be present.
+    }
+    if (destPartitions != null) {
+      // If any destination partition is present then throw a Semantic Exception.
+      throw new SemanticException(ErrorMsg.PARTITION_EXISTS.getMsg(destPartitions.toString()));
+    }
+    AlterTableExchangePartition alterTableExchangePartition =
+      new AlterTableExchangePartition(sourceTable, destTable, partSpecs);
+    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
+      alterTableExchangePartition), conf));
+  }
+
+  /**
+   * @param partitionKeys the list of partition keys of the table
+   * @param partSpecs the partition specs given by the user
+   * @return true if no subpartition value is specified without a partition's
+   *         value being specified else it returns false
+   */
+  private boolean isPartitionValueContinuous(List<FieldSchema> partitionKeys,
+      Map<String, String> partSpecs) {
+    boolean partitionMissing = false;
+    for (FieldSchema partitionKey: partitionKeys) {
+      if (!partSpecs.containsKey(partitionKey.getName())) {
+        partitionMissing = true;
+      } else {
+        if (partitionMissing) {
+          // A subpartition value exists after a missing partition
+          // The partition value specified are not continuous, return false
+          return false;
+        }
+      }
+    }
+    return true;
+  }
+
   private void analyzeCreateDatabase(ASTNode ast) throws SemanticException {
     String dbName = unescapeIdentifier(ast.getChild(0).getText());
     boolean ifNotExists = false;
@@ -781,7 +849,148 @@ public class DDLSemanticAnalyzer extends
     }
 
     TruncateTableDesc truncateTblDesc = new TruncateTableDesc(tableName, partSpec);
-    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), truncateTblDesc), conf));
+
+    DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), truncateTblDesc);
+    Task<? extends Serializable> truncateTask = TaskFactory.get(ddlWork, conf);
+
+    // Is this a truncate column command
+    List<String> columnNames = null;
+    if (ast.getChildCount() == 2) {
+      try {
+        columnNames = getColumnNames((ASTNode)ast.getChild(1));
+
+        // Throw an error if the table is indexed
+        List<Index> indexes = db.getIndexes(table.getDbName(), tableName, (short)1);
+        if (indexes != null && indexes.size() > 0) {
+          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg());
+        }
+
+        List<String> bucketCols = null;
+        Class<? extends InputFormat> inputFormatClass = null;
+        boolean isArchived = false;
+        Path newTblPartLoc = null;
+        Path oldTblPartLoc = null;
+        List<FieldSchema> cols = null;
+        ListBucketingCtx lbCtx = null;
+        boolean isListBucketed = false;
+        List<String> listBucketColNames = null;
+
+        if (table.isPartitioned()) {
+          Partition part = db.getPartition(table, partSpec, false);
+
+          Path tabPath = table.getPath();
+          Path partPath = part.getPartitionPath();
+
+          // if the table is in a different dfs than the partition,
+          // replace the partition's dfs with the table's dfs.
+          newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri()
+              .getAuthority(), partPath.toUri().getPath());
+
+          oldTblPartLoc = partPath;
+
+          cols = part.getCols();
+          bucketCols = part.getBucketCols();
+          inputFormatClass = part.getInputFormatClass();
+          isArchived = ArchiveUtils.isArchived(part);
+          lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(),
+              part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf);
+          isListBucketed = part.isStoredAsSubDirectories();
+          listBucketColNames = part.getSkewedColNames();
+        } else {
+          // input and output are the same
+          oldTblPartLoc = table.getPath();
+          newTblPartLoc = table.getPath();
+          cols  = table.getCols();
+          bucketCols = table.getBucketCols();
+          inputFormatClass = table.getInputFormatClass();
+          lbCtx = constructListBucketingCtx(table.getSkewedColNames(), table.getSkewedColValues(),
+              table.getSkewedColValueLocationMaps(), table.isStoredAsSubDirectories(), conf);
+          isListBucketed = table.isStoredAsSubDirectories();
+          listBucketColNames = table.getSkewedColNames();
+        }
+
+        // throw a HiveException for non-rcfile.
+        if (!inputFormatClass.equals(RCFileInputFormat.class)) {
+          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_NOT_RC.getMsg());
+        }
+
+        // throw a HiveException if the table/partition is archived
+        if (isArchived) {
+          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_ARCHIVED.getMsg());
+        }
+
+        Set<Integer> columnIndexes = new HashSet<Integer>();
+        for (String columnName : columnNames) {
+          boolean found = false;
+          for (int columnIndex = 0; columnIndex < cols.size(); columnIndex++) {
+            if (columnName.equalsIgnoreCase(cols.get(columnIndex).getName())) {
+              columnIndexes.add(columnIndex);
+              found = true;
+              break;
+            }
+          }
+          // Throw an exception if the user is trying to truncate a column which doesn't exist
+          if (!found) {
+            throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(columnName));
+          }
+          // Throw an exception if the table/partition is bucketed on one of the columns
+          for (String bucketCol : bucketCols) {
+            if (bucketCol.equalsIgnoreCase(columnName)) {
+              throw new SemanticException(ErrorMsg.TRUNCATE_BUCKETED_COLUMN.getMsg(columnName));
+            }
+          }
+          if (isListBucketed) {
+            for (String listBucketCol : listBucketColNames) {
+              if (listBucketCol.equalsIgnoreCase(columnName)) {
+                throw new SemanticException(
+                    ErrorMsg.TRUNCATE_LIST_BUCKETED_COLUMN.getMsg(columnName));
+              }
+            }
+          }
+        }
+
+        truncateTblDesc.setColumnIndexes(new ArrayList<Integer>(columnIndexes));
+
+        truncateTblDesc.setInputDir(oldTblPartLoc.toString());
+        addInputsOutputsAlterTable(tableName, partSpec);
+
+        truncateTblDesc.setLbCtx(lbCtx);
+
+        addInputsOutputsAlterTable(tableName, partSpec);
+        ddlWork.setNeedLock(true);
+        TableDesc tblDesc = Utilities.getTableDesc(table);
+        // Write the output to temporary directory and move it to the final location at the end
+        // so the operation is atomic.
+        String queryTmpdir = ctx.getExternalTmpFileURI(newTblPartLoc.toUri());
+        truncateTblDesc.setOutputDir(queryTmpdir);
+        LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, queryTmpdir, tblDesc,
+            partSpec == null ? new HashMap<String, String>() : partSpec);
+        ltd.setLbCtx(lbCtx);
+        Task<MoveWork> moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false),
+            conf);
+        truncateTask.addDependentTask(moveTsk);
+
+        // Recalculate the HDFS stats if auto gather stats is set
+        if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
+          StatsWork statDesc;
+          if (oldTblPartLoc.equals(newTblPartLoc)) {
+            // If we're merging to the same location, we can avoid some metastore calls
+            tableSpec tablepart = new tableSpec(this.db, conf, root);
+            statDesc = new StatsWork(tablepart);
+          } else {
+            statDesc = new StatsWork(ltd);
+          }
+          statDesc.setNoStatsAggregator(true);
+          statDesc.setStatsReliable(conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
+          Task<? extends Serializable> statTask = TaskFactory.get(statDesc, conf);
+          moveTsk.addDependentTask(statTask);
+        }
+      } catch (HiveException e) {
+        throw new SemanticException(e);
+      }
+    }
+
+    rootTasks.add(truncateTask);
   }
 
   private boolean isFullSpec(Table table, Map<String, String> partSpec) {

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g Fri Apr 26 04:59:50 2013
@@ -258,6 +258,7 @@ KW_PARTIALSCAN: 'PARTIALSCAN';
 KW_USER: 'USER';
 KW_ROLE: 'ROLE';
 KW_INNER: 'INNER';
+KW_EXCHANGE: 'EXCHANGE';
 
 // Operators
 // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work.

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g Fri Apr 26 04:59:50 2013
@@ -295,6 +295,7 @@ TOK_WINDOWSPEC;
 TOK_WINDOWVALUES;
 TOK_WINDOWRANGE;
 TOK_IGNOREPROTECTION;
+TOK_EXCHANGEPARTITION;
 }
 
 
@@ -756,7 +757,7 @@ createTableStatement
 truncateTableStatement
 @init { msgs.push("truncate table statement"); }
 @after { msgs.pop(); }
-    : KW_TRUNCATE KW_TABLE tablePartitionPrefix -> ^(TOK_TRUNCATETABLE tablePartitionPrefix);
+    : KW_TRUNCATE KW_TABLE tablePartitionPrefix (KW_COLUMNS LPAREN columnNameList RPAREN)? -> ^(TOK_TRUNCATETABLE tablePartitionPrefix columnNameList?);
 
 createIndexStatement
 @init { msgs.push("create index statement");}
@@ -867,6 +868,7 @@ alterTableStatementSuffix
     | alterStatementSuffixProperties
     | alterTblPartitionStatement
     | alterStatementSuffixSkewedby
+    | alterStatementSuffixExchangePartition
     ;
 
 alterViewStatementSuffix
@@ -1103,6 +1105,13 @@ alterStatementSuffixSkewedby
 	->^(TOK_ALTERTABLE_SKEWED $name storedAsDirs)
 	;
 
+alterStatementSuffixExchangePartition
+@init {msgs.push("alter exchange partition");}
+@after{msgs.pop();}
+    : name=tableName KW_EXCHANGE partitionSpec KW_WITH KW_TABLE exchangename=tableName
+    -> ^(TOK_EXCHANGEPARTITION $name partitionSpec $exchangename)
+    ;
+
 alterStatementSuffixProtectMode
 @init { msgs.push("alter partition protect mode statement"); }
 @after { msgs.pop(); }

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Fri Apr 26 04:59:50 2013
@@ -2428,6 +2428,7 @@ public class SemanticAnalyzer extends Ba
       posn++;
     }
 
+    boolean subQuery = qb.getParseInfo().getIsSubQ();
     boolean isInTransform = (selExprList.getChild(posn).getChild(0).getType() ==
         HiveParser.TOK_TRANSFORM);
     if (isInTransform) {
@@ -2463,6 +2464,10 @@ public class SemanticAnalyzer extends Ba
         unparseTranslator.addIdentifierTranslation((ASTNode) udtfExpr
             .getChild(0));
       }
+      if (isUDTF && (selectStar = udtfExprType == HiveParser.TOK_FUNCTIONSTAR)) {
+        genColListRegex(".*", null, (ASTNode) udtfExpr.getChild(0),
+            col_list, inputRR, pos, out_rwsch, qb.getAliases(), subQuery);
+      }
     }
 
     if (isUDTF) {
@@ -2567,7 +2572,6 @@ public class SemanticAnalyzer extends Ba
 
       }
 
-      boolean subQuery = qb.getParseInfo().getIsSubQ();
       if (expr.getType() == HiveParser.TOK_ALLCOLREF) {
         pos = genColListRegex(".*", expr.getChildCount() == 0 ? null
             : getUnescapedName((ASTNode) expr.getChild(0)).toLowerCase(),
@@ -5982,6 +5986,7 @@ public class SemanticAnalyzer extends Ba
             reduceKeys.size(), numReds), new RowSchema(outputRS
             .getColumnInfos()), child), outputRS);
     rsOp.setColumnExprMap(colExprMap);
+    rsOp.setInputAlias(srcName);
     return rsOp;
   }
 
@@ -8075,9 +8080,6 @@ public class SemanticAnalyzer extends Ba
     RowResolver lvForwardRR = new RowResolver();
     RowResolver source = opParseCtx.get(op).getRowResolver();
     for (ColumnInfo col : source.getColumnInfos()) {
-      if (col.getIsVirtualCol() && col.isHiddenVirtualCol()) {
-        continue;
-      }
       String[] tabCol = source.reverseLookup(col.getInternalName());
       lvForwardRR.put(tabCol[0], tabCol[1], col);
     }
@@ -8161,7 +8163,7 @@ public class SemanticAnalyzer extends Ba
       String internalName = getColumnInternalName(outputInternalColNames.size());
       outputInternalColNames.add(internalName);
       ColumnInfo newCol = new ColumnInfo(internalName, c.getType(), c
-          .getTabAlias(), c.getIsVirtualCol());
+          .getTabAlias(), c.getIsVirtualCol(), c.isHiddenVirtualCol());
       String[] tableCol = source.reverseLookup(c.getInternalName());
       String tableAlias = tableCol[0];
       String colAlias = tableCol[1];
@@ -8371,7 +8373,7 @@ public class SemanticAnalyzer extends Ba
 
     // For each task, set the key descriptor for the reducer
     for (Task<? extends Serializable> rootTask : rootTasks) {
-      setKeyDescTaskTree(rootTask);
+      GenMapRedUtils.setKeyAndValueDescForTaskTree(rootTask);
     }
 
     // If a task contains an operator which instructs bucketizedhiveinputformat
@@ -8597,36 +8599,6 @@ public class SemanticAnalyzer extends Ba
     }
   }
 
-  // loop over all the tasks recursviely
-  private void setKeyDescTaskTree(Task<? extends Serializable> task) {
-
-    if (task instanceof ExecDriver) {
-      MapredWork work = (MapredWork) task.getWork();
-      work.deriveExplainAttributes();
-      HashMap<String, Operator<? extends OperatorDesc>> opMap = work
-          .getAliasToWork();
-      if (!opMap.isEmpty()) {
-        for (Operator<? extends OperatorDesc> op : opMap.values()) {
-          GenMapRedUtils.setKeyAndValueDesc(work, op);
-        }
-      }
-    } else if (task instanceof ConditionalTask) {
-      List<Task<? extends Serializable>> listTasks = ((ConditionalTask) task)
-          .getListTasks();
-      for (Task<? extends Serializable> tsk : listTasks) {
-        setKeyDescTaskTree(tsk);
-      }
-    }
-
-    if (task.getChildTasks() == null) {
-      return;
-    }
-
-    for (Task<? extends Serializable> childTask : task.getChildTasks()) {
-      setKeyDescTaskTree(childTask);
-    }
-  }
-
   @SuppressWarnings("nls")
   public Phase1Ctx initPhase1Ctx() {
 
@@ -10661,6 +10633,7 @@ public class SemanticAnalyzer extends Ba
       {
         RowResolver ptfMapRR = tabDef.getRawInputShape().getRr();
 
+        ptfDesc.setMapSide(true);
         input = putOpInsertMap(OperatorFactory.getAndMakeChild(ptfDesc,
             new RowSchema(ptfMapRR.getColumnInfos()),
             input), ptfMapRR);

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java Fri Apr 26 04:59:50 2013
@@ -202,6 +202,7 @@ public final class SemanticAnalyzerFacto
       case HiveParser.TOK_ALTERDATABASE_PROPERTIES:
       case HiveParser.TOK_ALTERTABLE_SKEWED:
       case HiveParser.TOK_TRUNCATETABLE:
+      case HiveParser.TOK_EXCHANGEPARTITION:
         return new DDLSemanticAnalyzer(conf);
       case HiveParser.TOK_ALTERTABLE_PARTITION:
         HiveOperation commandType = null;

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java Fri Apr 26 04:59:50 2013
@@ -71,7 +71,7 @@ public class ConditionalResolverCommonJo
     }
 
     public HashMap<String, Long> getAliasToKnownSize() {
-      return aliasToKnownSize;
+      return aliasToKnownSize == null ? new HashMap<String, Long>() : aliasToKnownSize;
     }
 
     public void setAliasToKnownSize(HashMap<String, Long> aliasToKnownSize) {

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java Fri Apr 26 04:59:50 2013
@@ -64,6 +64,7 @@ public class DDLWork implements Serializ
   private AlterDatabaseDesc alterDbDesc;
   private AlterTableAlterPartDesc alterTableAlterPartDesc;
   private TruncateTableDesc truncateTblDesc;
+  private AlterTableExchangePartition alterTableExchangePartition;
 
   private RoleDDLDesc roleDDLDesc;
   private GrantDesc grantDesc;
@@ -449,6 +450,12 @@ public class DDLWork implements Serializ
     this.alterTableAlterPartDesc = alterPartDesc;
   }
 
+  public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
+      AlterTableExchangePartition alterTableExchangePartition) {
+    this(inputs, outputs);
+    this.alterTableExchangePartition = alterTableExchangePartition;
+  }
+
     /**
    * @return Create Database descriptor
    */
@@ -1025,4 +1032,20 @@ public class DDLWork implements Serializ
   public void setTruncateTblDesc(TruncateTableDesc truncateTblDesc) {
     this.truncateTblDesc = truncateTblDesc;
   }
+
+  /**
+   * @return information about the table partition to be exchanged
+   */
+  public AlterTableExchangePartition getAlterTableExchangePartition() {
+    return this.alterTableExchangePartition;
+  }
+
+  /**
+   * @param alterTableExchangePartition
+   *          set the value of the table partition to be exchanged
+   */
+  public void setAlterTableExchangePartition(
+      AlterTableExchangePartition alterTableExchangePartition) {
+    this.alterTableExchangePartition = alterTableExchangePartition;
+  }
 }

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java Fri Apr 26 04:59:50 2013
@@ -50,6 +50,16 @@ public class FileSinkDesc extends Abstra
   private String staticSpec; // static partition spec ends with a '/'
   private boolean gatherStats;
 
+  // Consider a query like:
+  // insert overwrite table T3 select ... from T1 join T2 on T1.key = T2.key;
+  // where T1, T2 and T3 are sorted and bucketed by key into the same number of buckets,
+  // We dont need a reducer to enforce bucketing and sorting for T3.
+  // The field below captures the fact that the reducer introduced to enforce sorting/
+  // bucketing of T3 has been removed.
+  // In this case, a sort-merge join is needed, and so the sort-merge join between T1 and T2
+  // cannot be performed as a map-only job
+  private transient boolean removedReduceSinkBucketSort;
+
   // This file descriptor is linked to other file descriptors.
   // One use case is that, a union->select (star)->file sink, is broken down.
   // For eg: consider a query like:
@@ -364,4 +374,11 @@ public class FileSinkDesc extends Abstra
     this.statsCollectRawDataSize = statsCollectRawDataSize;
   }
 
+  public boolean isRemovedReduceSinkBucketSort() {
+    return removedReduceSinkBucketSort;
+  }
+
+  public void setRemovedReduceSinkBucketSort(boolean removedReduceSinkBucketSort) {
+    this.removedReduceSinkBucketSort = removedReduceSinkBucketSort;
+  }
 }

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java Fri Apr 26 04:59:50 2013
@@ -35,13 +35,13 @@ public class GroupByDesc extends Abstrac
    * PARTIAL1: partial aggregation - first phase: iterate, terminatePartial
    * PARTIAL2: partial aggregation - second phase: merge, terminatePartial
    * PARTIALS: For non-distinct the same as PARTIAL2, for distinct the same as
-   *           PARTIAL1
+   * PARTIAL1
    * FINAL: partial aggregation - final phase: merge, terminate
    * HASH: For non-distinct the same as PARTIAL1 but use hash-table-based aggregation
    * MERGEPARTIAL: FINAL for non-distinct aggregations, COMPLETE for distinct
    * aggregations.
    */
-  private static final long serialVersionUID = 1L;
+  private static long serialVersionUID = 1L;
 
   /**
    * Mode.
@@ -66,6 +66,7 @@ public class GroupByDesc extends Abstrac
   private float groupByMemoryUsage;
   private float memoryThreshold;
   transient private boolean isDistinct;
+  private boolean dontResetAggrsDistinct;
 
   public GroupByDesc() {
   }
@@ -83,8 +84,8 @@ public class GroupByDesc extends Abstrac
       final int groupingSetsPosition,
       final boolean isDistinct) {
     this(mode, outputColumnNames, keys, aggregators, groupKeyNotReductionKey,
-      false, groupByMemoryUsage, memoryThreshold, listGroupingSets,
-      groupingSetsPresent, groupingSetsPosition, isDistinct);
+        false, groupByMemoryUsage, memoryThreshold, listGroupingSets,
+        groupingSetsPresent, groupingSetsPosition, isDistinct);
   }
 
   public GroupByDesc(
@@ -212,11 +213,11 @@ public class GroupByDesc extends Abstrac
    */
   public boolean isDistinctLike() {
     ArrayList<AggregationDesc> aggregators = getAggregators();
-    for(AggregationDesc ad: aggregators){
-      if(!ad.getDistinct()) {
+    for (AggregationDesc ad : aggregators) {
+      if (!ad.getDistinct()) {
         GenericUDAFEvaluator udafEval = ad.getGenericUDAFEvaluator();
         UDFType annot = udafEval.getClass().getAnnotation(UDFType.class);
-        if(annot == null || !annot.distinctLike()) {
+        if (annot == null || !annot.distinctLike()) {
           return false;
         }
       }
@@ -257,4 +258,16 @@ public class GroupByDesc extends Abstrac
   public boolean isDistinct() {
     return isDistinct;
   }
+
+  public void setDistinct(boolean isDistinct) {
+    this.isDistinct = isDistinct;
+  }
+
+  public boolean isDontResetAggrsDistinct() {
+    return dontResetAggrsDistinct;
+  }
+
+  public void setDontResetAggrsDistinct(boolean dontResetAggrsDistinct) {
+    this.dontResetAggrsDistinct = dontResetAggrsDistinct;
+  }
 }

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java Fri Apr 26 04:59:50 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.plan;
 
 import java.io.ByteArrayOutputStream;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
@@ -29,13 +30,16 @@ import java.util.Map.Entry;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
+import org.apache.hadoop.hive.ql.exec.OperatorUtils;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol;
 import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.SortCol;
 import org.apache.hadoop.hive.ql.parse.OpParseContext;
 import org.apache.hadoop.hive.ql.parse.QBJoinTree;
 import org.apache.hadoop.hive.ql.parse.SplitSample;
+import org.apache.hadoop.mapred.JobConf;
 
 /**
  * MapredWork.
@@ -239,6 +243,12 @@ public class MapredWork extends Abstract
     return keyDesc;
   }
 
+  /**
+   * If the plan has a reducer and correspondingly a reduce-sink, then store the TableDesc pointing
+   * to keySerializeInfo of the ReduceSink
+   *
+   * @param keyDesc
+   */
   public void setKeyDesc(final TableDesc keyDesc) {
     this.keyDesc = keyDesc;
   }
@@ -557,4 +567,19 @@ public class MapredWork extends Abstract
   public void setFinalMapRed(boolean finalMapRed) {
     this.finalMapRed = finalMapRed;
   }
+
+  public void configureJobConf(JobConf jobConf) {
+    for (PartitionDesc partition : aliasToPartnInfo.values()) {
+      PlanUtils.configureJobConf(partition.getTableDesc(), jobConf);
+    }
+    Collection<Operator<?>> mappers = aliasToWork.values();
+    for (FileSinkOperator fs : OperatorUtils.findOperators(mappers, FileSinkOperator.class)) {
+      PlanUtils.configureJobConf(fs.getConf().getTableInfo(), jobConf);
+    }
+    if (reducer != null) {
+      for (FileSinkOperator fs : OperatorUtils.findOperators(reducer, FileSinkOperator.class)) {
+        PlanUtils.configureJobConf(fs.getConf().getTableInfo(), jobConf);
+      }
+    }
+  }
 }

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDesc.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDesc.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDesc.java Fri Apr 26 04:59:50 2013
@@ -46,6 +46,10 @@ public class PTFDesc extends AbstractOpe
 
   PartitionedTableFunctionDef funcDef;
   LeadLagInfo llInfo;
+  /*
+   * is this PTFDesc for a Map-Side PTF Operation?
+   */
+  boolean isMapSide = false;
 
   static{
     PTFUtils.makeTransient(PTFDesc.class, "llInfo");
@@ -75,6 +79,14 @@ public class PTFDesc extends AbstractOpe
     return funcDef != null && (funcDef instanceof WindowTableFunctionDef);
   }
 
+  public boolean isMapSide() {
+    return isMapSide;
+  }
+
+  public void setMapSide(boolean isMapSide) {
+    this.isMapSide = isMapSide;
+  }
+
   public abstract static class PTFInputDef {
     String expressionTreeString;
     ShapeDetails outputShape;
@@ -255,10 +267,7 @@ public class PTFDesc extends AbstractOpe
     transient TypeCheckCtx typeCheckCtx;
 
     static{
-      PTFUtils.makeTransient(ShapeDetails.class, "serde");
-      PTFUtils.makeTransient(ShapeDetails.class, "OI");
-      PTFUtils.makeTransient(ShapeDetails.class, "rr");
-      PTFUtils.makeTransient(ShapeDetails.class, "typeCheckCtx");
+      PTFUtils.makeTransient(ShapeDetails.class, "OI", "serde", "rr", "typeCheckCtx");
     }
 
     public String getSerdeClassName() {
@@ -588,8 +597,7 @@ public class PTFDesc extends AbstractOpe
     transient ObjectInspector OI;
 
     static{
-      PTFUtils.makeTransient(PTFExpressionDef.class, "exprEvaluator");
-      PTFUtils.makeTransient(PTFExpressionDef.class, "OI");
+      PTFUtils.makeTransient(PTFExpressionDef.class, "exprEvaluator", "OI");
     }
 
     public PTFExpressionDef() {}

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java Fri Apr 26 04:59:50 2013
@@ -59,6 +59,7 @@ import org.apache.hadoop.hive.serde2.laz
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.SequenceFileInputFormat;
 import org.apache.hadoop.mapred.SequenceFileOutputFormat;
 import org.apache.hadoop.mapred.TextInputFormat;
@@ -302,8 +303,8 @@ public final class PlanUtils {
     return new TableDesc(MetadataTypedColumnsetSerDe.class,
         TextInputFormat.class, IgnoreKeyTextOutputFormat.class, Utilities
         .makeProperties(
-        org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT,
-        separatorCode));
+            org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT,
+            separatorCode));
   }
 
   /**
@@ -729,6 +730,19 @@ public final class PlanUtils {
     }
   }
 
+  public static void configureJobConf(TableDesc tableDesc, JobConf jobConf) {
+    String handlerClass = tableDesc.getProperties().getProperty(
+        org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE);
+    try {
+      HiveStorageHandler storageHandler = HiveUtils.getStorageHandler(jobConf, handlerClass);
+      if (storageHandler != null) {
+        storageHandler.configureJobConf(tableDesc, jobConf);
+      }
+    } catch (HiveException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
   public static String stripQuotes(String val) {
     if ((val.charAt(0) == '\'' && val.charAt(val.length() - 1) == '\'')
         || (val.charAt(0) == '\"' && val.charAt(val.length() - 1) == '\"')) {

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java Fri Apr 26 04:59:50 2013
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql.plan;
 
+import java.util.List;
 import java.util.Map;
 
 /**
@@ -30,6 +31,10 @@ public class TruncateTableDesc extends D
 
   private String tableName;
   private Map<String, String> partSpec;
+  private List<Integer> columnIndexes;
+  private String inputDir;
+  private String outputDir;
+  private ListBucketingCtx lbCtx;
 
   public TruncateTableDesc() {
   }
@@ -56,4 +61,37 @@ public class TruncateTableDesc extends D
   public void setPartSpec(Map<String, String> partSpec) {
     this.partSpec = partSpec;
   }
+
+  @Explain(displayName = "Column Indexes")
+  public List<Integer> getColumnIndexes() {
+    return columnIndexes;
+  }
+
+  public void setColumnIndexes(List<Integer> columnIndexes) {
+    this.columnIndexes = columnIndexes;
+  }
+
+  public String getInputDir() {
+    return inputDir;
+  }
+
+  public void setInputDir(String inputDir) {
+    this.inputDir = inputDir;
+  }
+
+  public String getOutputDir() {
+    return outputDir;
+  }
+
+  public void setOutputDir(String outputDir) {
+    this.outputDir = outputDir;
+  }
+
+  public ListBucketingCtx getLbCtx() {
+    return lbCtx;
+  }
+
+  public void setLbCtx(ListBucketingCtx lbCtx) {
+    this.lbCtx = lbCtx;
+  }
 }

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java Fri Apr 26 04:59:50 2013
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hive.ql.ppd;
 
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -35,6 +36,7 @@ import org.apache.hadoop.hive.ql.exec.Fi
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.OperatorFactory;
+import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
 import org.apache.hadoop.hive.ql.exec.RowSchema;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -501,12 +503,19 @@ public final class OpProcFactory {
         Object... nodeOutputs) throws SemanticException {
       LOG.info("Processing for " + nd.getName() + "("
           + ((Operator) nd).getIdentifier() + ")");
+      ReduceSinkOperator rs = (ReduceSinkOperator) nd;
       OpWalkerInfo owi = (OpWalkerInfo) procCtx;
-      Set<String> aliases = owi.getRowResolver(nd).getTableNames();
+
+      Set<String> aliases;
       boolean ignoreAliases = false;
-      if (aliases.size() == 1 && aliases.contains("")) {
-        // Reduce sink of group by operator
-        ignoreAliases = true;
+      if (rs.getInputAlias() != null) {
+        aliases = new HashSet<String>(Arrays.asList(rs.getInputAlias()));
+      } else {
+        aliases = owi.getRowResolver(nd).getTableNames();
+        if (aliases.size() == 1 && aliases.contains("")) {
+          // Reduce sink of group by operator
+          ignoreAliases = true;
+        }
       }
       boolean hasUnpushedPredicates = mergeWithChildrenPred(nd, owi, null, aliases, ignoreAliases);
       if (HiveConf.getBoolVar(owi.getParseContext().getConf(),

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java Fri Apr 26 04:59:50 2013
@@ -191,19 +191,6 @@ public class SessionState {
     ls = new LineageState();
     overriddenConfigurations = new HashMap<String, String>();
     overriddenConfigurations.putAll(HiveConf.getConfSystemProperties());
-
-    // Register the Hive builtins jar and all of its functions
-    try {
-      Class<?> pluginClass = Utilities.getBuiltinUtilsClass();
-      URL jarLocation = pluginClass.getProtectionDomain().getCodeSource()
-        .getLocation();
-      add_builtin_resource(
-        ResourceType.JAR, jarLocation.toString());
-      FunctionRegistry.registerFunctionsFromPluginJar(
-        jarLocation, pluginClass.getClassLoader());
-    } catch (Exception ex) {
-      throw new RuntimeException("Failed to load Hive builtin functions", ex);
-    }
   }
 
   public void setCmd(String cmdString) {

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLeadLag.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLeadLag.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLeadLag.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLeadLag.java Fri Apr 26 04:59:50 2013
@@ -24,49 +24,56 @@ import org.apache.hadoop.hive.ql.exec.PT
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
+import org.apache.hadoop.io.IntWritable;
 
 public abstract class GenericUDFLeadLag extends GenericUDF
 {
 	transient ExprNodeEvaluator exprEvaluator;
 	transient PTFPartitionIterator<Object> pItr;
-	ObjectInspector firstArgOI;
-
-	private PrimitiveObjectInspector amtOI;
+	transient ObjectInspector firstArgOI;
+	transient ObjectInspector defaultArgOI;
+	transient Converter defaultValueConverter;
+	int amt;
 
 	static{
-		PTFUtils.makeTransient(GenericUDFLeadLag.class, "exprEvaluator");
-		PTFUtils.makeTransient(GenericUDFLeadLag.class, "pItr");
+		PTFUtils.makeTransient(GenericUDFLeadLag.class, "exprEvaluator", "pItr",
+        "firstArgOI", "defaultArgOI", "defaultValueConverter");
 	}
 
 	@Override
 	public Object evaluate(DeferredObject[] arguments) throws HiveException
 	{
-		DeferredObject amt = arguments[1];
-		int intAmt = 0;
-		try
-		{
-			intAmt = PrimitiveObjectInspectorUtils.getInt(amt.get(), amtOI);
-		}
-		catch (NullPointerException e)
-		{
-			intAmt = Integer.MAX_VALUE;
-		}
-		catch (NumberFormatException e)
-		{
-			intAmt = Integer.MAX_VALUE;
-		}
+    Object defaultVal = null;
+    if(arguments.length == 3){
+      defaultVal =  ObjectInspectorUtils.copyToStandardObject(
+          defaultValueConverter.convert(arguments[2].get()),
+          defaultArgOI);
+    }
 
 		int idx = pItr.getIndex() - 1;
+		int start = 0;
+		int end = pItr.getPartition().size();
 		try
 		{
-			Object row = getRow(intAmt);
-			Object ret = exprEvaluator.evaluate(row);
-			ret = ObjectInspectorUtils.copyToStandardObject(ret, firstArgOI, ObjectInspectorCopyOption.WRITABLE);
+		  Object ret = null;
+		  int newIdx = getIndex(amt);
+
+		  if(newIdx >= end || newIdx < start) {
+        ret = defaultVal;
+		  }
+		  else {
+        Object row = getRow(amt);
+        ret = exprEvaluator.evaluate(row);
+        ret = ObjectInspectorUtils.copyToStandardObject(ret,
+            firstArgOI, ObjectInspectorCopyOption.WRITABLE);
+		  }
 			return ret;
 		}
 		finally
@@ -83,25 +90,41 @@ public abstract class GenericUDFLeadLag 
 	public ObjectInspector initialize(ObjectInspector[] arguments)
 			throws UDFArgumentException
 	{
-		// index has to be a primitive
-		if (arguments[1] instanceof PrimitiveObjectInspector)
-		{
-			amtOI = (PrimitiveObjectInspector) arguments[1];
-		}
-		else
-		{
-			throw new UDFArgumentTypeException(1,
-					"Primitive Type is expected but "
-							+ arguments[1].getTypeName() + "\" is found");
-		}
-
-		firstArgOI = arguments[0];
-		return ObjectInspectorUtils.getStandardObjectInspector(firstArgOI,
-				ObjectInspectorCopyOption.WRITABLE);
+    if (!(arguments.length >= 1 && arguments.length <= 3)) {
+      throw new UDFArgumentTypeException(arguments.length - 1,
+          "Incorrect invocation of " + _getFnName() + ": _FUNC_(expr, amt, default)");
+    }
+
+    amt = 1;
+
+    if (arguments.length > 1) {
+      ObjectInspector amtOI = arguments[1];
+      if ( !ObjectInspectorUtils.isConstantObjectInspector(amtOI) ||
+          (amtOI.getCategory() != ObjectInspector.Category.PRIMITIVE) ||
+          ((PrimitiveObjectInspector)amtOI).getPrimitiveCategory() !=
+          PrimitiveObjectInspector.PrimitiveCategory.INT )
+      {
+        throw new UDFArgumentTypeException(0,
+            _getFnName() + " amount must be a integer value "
+            + amtOI.getTypeName() + " was passed as parameter 1.");
+      }
+      Object o = ((ConstantObjectInspector)amtOI).
+          getWritableConstantValue();
+      amt = ((IntWritable)o).get();
+    }
+
+    if (arguments.length == 3) {
+      defaultArgOI = arguments[2];
+      ObjectInspectorConverters.getConverter(arguments[2], arguments[0]);
+      defaultValueConverter = ObjectInspectorConverters.getConverter(arguments[2], arguments[0]);
+
+    }
+
+    firstArgOI = arguments[0];
+    return ObjectInspectorUtils.getStandardObjectInspector(firstArgOI,
+        ObjectInspectorCopyOption.WRITABLE);
 	}
 
-
-
 	public ExprNodeEvaluator getExprEvaluator()
 	{
 		return exprEvaluator;
@@ -122,7 +145,39 @@ public abstract class GenericUDFLeadLag 
 		this.pItr = pItr;
 	}
 
-	@Override
+	public ObjectInspector getFirstArgOI() {
+    return firstArgOI;
+  }
+
+  public void setFirstArgOI(ObjectInspector firstArgOI) {
+    this.firstArgOI = firstArgOI;
+  }
+
+  public ObjectInspector getDefaultArgOI() {
+    return defaultArgOI;
+  }
+
+  public void setDefaultArgOI(ObjectInspector defaultArgOI) {
+    this.defaultArgOI = defaultArgOI;
+  }
+
+  public Converter getDefaultValueConverter() {
+    return defaultValueConverter;
+  }
+
+  public void setDefaultValueConverter(Converter defaultValueConverter) {
+    this.defaultValueConverter = defaultValueConverter;
+  }
+
+  public int getAmt() {
+    return amt;
+  }
+
+  public void setAmt(int amt) {
+    this.amt = amt;
+  }
+
+  @Override
 	public String getDisplayString(String[] children)
 	{
 		assert (children.length == 2);
@@ -140,6 +195,8 @@ public abstract class GenericUDFLeadLag 
 
 	protected abstract Object getRow(int amt);
 
+	protected abstract int getIndex(int amt);
+
 	public static class GenericUDFLead extends GenericUDFLeadLag
 	{
 
@@ -150,6 +207,11 @@ public abstract class GenericUDFLeadLag 
 		}
 
 		@Override
+		protected int getIndex(int amt) {
+		  return pItr.getIndex() - 1 + amt;
+		}
+
+		@Override
 		protected Object getRow(int amt)
 		{
 			return pItr.lead(amt - 1);
@@ -166,6 +228,11 @@ public abstract class GenericUDFLeadLag 
 		}
 
 		@Override
+    protected int getIndex(int amt) {
+      return pItr.getIndex() - 1 - amt;
+    }
+
+		@Override
 		protected Object getRow(int amt)
 		{
 			return pItr.lag(amt + 1);

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java?rev=1476039&r1=1476038&r2=1476039&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java Fri Apr 26 04:59:50 2013
@@ -67,12 +67,9 @@ public abstract class TableFunctionEvalu
   transient protected PTFPartition outputPartition;
 
   static{
-    PTFUtils.makeTransient(TableFunctionEvaluator.class, "OI");
-    PTFUtils.makeTransient(TableFunctionEvaluator.class, "rawInputOI");
-    PTFUtils.makeTransient(TableFunctionEvaluator.class, "outputPartition");
+    PTFUtils.makeTransient(TableFunctionEvaluator.class, "outputOI", "rawInputOI");
   }
 
-
   public StructObjectInspector getOutputOI()
   {
     return OI;



Mime
View raw message