hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gunt...@apache.org
Subject svn commit: r1573252 [2/4] - in /hive/trunk: ./ common/src/java/org/apache/hadoop/hive/conf/ conf/ data/conf/tez/ itests/qtest/ itests/util/src/main/java/org/apache/hadoop/hive/ql/ ql/src/java/org/apache/hadoop/hive/ql/exec/ ql/src/java/org/apache/hado...
Date Sun, 02 Mar 2014 02:22:56 GMT
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java Sun Mar  2 02:22:54 2014
@@ -18,17 +18,35 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
+import java.util.ArrayList;
+import java.util.Deque;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.fs.Path;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.Operator;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
+import org.apache.hadoop.hive.ql.exec.UnionOperator;
+import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils;
 import org.apache.hadoop.hive.ql.plan.BaseWork;
 import org.apache.hadoop.hive.ql.plan.MapWork;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
+import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
 import org.apache.hadoop.hive.ql.plan.ReduceWork;
 import org.apache.hadoop.hive.ql.plan.TezWork;
+import org.apache.hadoop.hive.ql.plan.UnionWork;
 import org.apache.hadoop.hive.ql.plan.TezWork.EdgeType;
 
 /**
@@ -59,6 +77,13 @@ public class GenTezUtils {
     sequenceNumber = 0;
   }
 
+  public UnionWork createUnionWork(GenTezProcContext context, Operator<?> operator, TezWork tezWork) {
+    UnionWork unionWork = new UnionWork("Union "+ (++sequenceNumber));
+    context.unionWorkMap.put(operator, unionWork);
+    tezWork.add(unionWork);
+    return unionWork;
+  }
+
   public ReduceWork createReduceWork(GenTezProcContext context, Operator<?> root, TezWork tezWork) {
     assert !root.getParentOperators().isEmpty();
     ReduceWork reduceWork = new ReduceWork("Reducer "+ (++sequenceNumber));
@@ -121,11 +146,118 @@ public class GenTezUtils {
   }
 
   // this method's main use is to help unit testing this class
-  protected void setupMapWork(MapWork mapWork, GenTezProcContext context, 
-      PrunedPartitionList partitions, Operator<? extends OperatorDesc> root, 
+  protected void setupMapWork(MapWork mapWork, GenTezProcContext context,
+      PrunedPartitionList partitions, Operator<? extends OperatorDesc> root,
       String alias) throws SemanticException {
     // All the setup is done in GenMapRedUtils
     GenMapRedUtils.setMapWork(mapWork, context.parseContext,
         context.inputs, partitions, root, alias, context.conf, false);
   }
+
+  // removes any union operator and clones the plan
+  public void removeUnionOperators(Configuration conf, GenTezProcContext context,
+      BaseWork work)
+    throws SemanticException {
+
+    Set<Operator<?>> roots = work.getAllRootOperators();
+
+    // need to clone the plan.
+    Set<Operator<?>> newRoots = Utilities.cloneOperatorTree(conf, roots);
+
+    Map<Operator<?>, Operator<?>> replacementMap = new HashMap<Operator<?>, Operator<?>>();
+
+    Iterator<Operator<?>> it = newRoots.iterator();
+    for (Operator<?> orig: roots) {
+      replacementMap.put(orig,it.next());
+    }
+
+    // now we remove all the unions. we throw away any branch that's not reachable from
+    // the current set of roots. The reason is that those branches will be handled in
+    // different tasks.
+    Deque<Operator<?>> operators = new LinkedList<Operator<?>>();
+    operators.addAll(newRoots);
+
+    Set<Operator<?>> seen = new HashSet<Operator<?>>();
+
+    while(!operators.isEmpty()) {
+      Operator<?> current = operators.pop();
+      seen.add(current);
+
+      if (current instanceof FileSinkOperator) {
+        FileSinkOperator fileSink = (FileSinkOperator)current;
+
+        // remember it for additional processing later
+        context.fileSinkSet.add(fileSink);
+
+        FileSinkDesc desc = fileSink.getConf();
+        Path path = desc.getDirName();
+        List<FileSinkDesc> linked;
+
+        if (!context.linkedFileSinks.containsKey(path)) {
+          linked = new ArrayList<FileSinkDesc>();
+          context.linkedFileSinks.put(path, linked);
+        }
+        linked = context.linkedFileSinks.get(path);
+        linked.add(desc);
+
+        desc.setDirName(new Path(path, ""+linked.size()));
+        desc.setLinkedFileSinkDesc(linked);
+      }
+
+      if (current instanceof UnionOperator) {
+        Operator<?> parent = null;
+        int count = 0;
+
+        for (Operator<?> op: current.getParentOperators()) {
+          if (seen.contains(op)) {
+            ++count;
+            parent = op;
+          }
+        }
+
+        // we should have been able to reach the union from only one side.
+        assert count <= 1;
+
+        if (parent == null) {
+          // root operator is union (can happen in reducers)
+          replacementMap.put(current, current.getChildOperators().get(0));
+        } else {
+          parent.removeChildAndAdoptItsChildren(current);
+        }
+      }
+
+      if (current instanceof FileSinkOperator
+          || current instanceof ReduceSinkOperator) {
+        current.setChildOperators(null);
+      } else {
+        operators.addAll(current.getChildOperators());
+      }
+    }
+    work.replaceRoots(replacementMap);
+  }
+
+  public void processFileSink(GenTezProcContext context, FileSinkOperator fileSink)
+      throws SemanticException {
+
+    ParseContext parseContext = context.parseContext;
+
+    boolean isInsertTable = // is INSERT OVERWRITE TABLE
+        GenMapRedUtils.isInsertInto(parseContext, fileSink);
+    HiveConf hconf = parseContext.getConf();
+
+    boolean chDir = GenMapRedUtils.isMergeRequired(context.moveTask,
+        hconf, fileSink, context.currentTask, isInsertTable);
+
+    Path finalName = GenMapRedUtils.createMoveTask(context.currentTask,
+        chDir, fileSink, parseContext, context.moveTask, hconf, context.dependencyTask);
+
+    if (chDir) {
+      // Merge the files in the destination table/partitions by creating Map-only merge job
+      // If underlying data is RCFile a RCFileBlockMerge task would be created.
+      LOG.info("using CombineHiveInputformat for the merge job");
+      GenMapRedUtils.createMRWorkForMergingFiles(fileSink, finalName,
+          context.dependencyTask, context.moveTask,
+          hconf, context.currentTask);
+    }
+  }
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java Sun Mar  2 02:22:54 2014
@@ -19,15 +19,21 @@
 package org.apache.hadoop.hive.ql.parse;
 
 import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.LinkedList;
 import java.util.List;
+import java.util.Map;
 import java.util.Stack;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.HashTableDummyOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
+import org.apache.hadoop.hive.ql.exec.UnionOperator;
+import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.lib.NodeProcessor;
 import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
@@ -37,6 +43,7 @@ import org.apache.hadoop.hive.ql.plan.Ma
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.ReduceWork;
 import org.apache.hadoop.hive.ql.plan.TezWork;
+import org.apache.hadoop.hive.ql.plan.UnionWork;
 import org.apache.hadoop.hive.ql.plan.TezWork.EdgeType;
 
 /**
@@ -106,6 +113,41 @@ public class GenTezWork implements NodeP
       context.rootToWorkMap.put(root, work);
     }
 
+    // This is where we cut the tree as described above. We also remember that
+    // we might have to connect parent work with this work later.
+    for (Operator<?> parent: new ArrayList<Operator<?>>(root.getParentOperators())) {
+      context.leafOperatorToFollowingWork.put(parent, work);
+      LOG.debug("Removing " + parent + " as parent from " + root);
+      root.removeParent(parent);
+    }
+
+    if (!context.currentUnionOperators.isEmpty()) {      
+      // if there are union all operators we need to add the work to the set
+      // of union operators.
+
+      UnionWork unionWork;
+      if (context.unionWorkMap.containsKey(operator)) {
+        // we've seen this terminal before and have created a union work object.
+        // just need to add this work to it. There will be no children of this one
+        // since we've passed this operator before.
+        assert operator.getChildOperators().isEmpty();
+        unionWork = (UnionWork) context.unionWorkMap.get(operator);
+
+      } else {
+        // first time through. we need to create a union work object and add this
+        // work to it. Subsequent work should reference the union and not the actual
+        // work.
+        unionWork = utils.createUnionWork(context, operator, tezWork);
+      }
+
+      // finally hook everything up
+      tezWork.connect(unionWork, work, EdgeType.CONTAINS);
+      unionWork.addUnionOperators(context.currentUnionOperators);
+      context.currentUnionOperators.clear();
+      context.workWithUnionOperators.add(work);
+      work = unionWork;
+    }
+
     // We're scanning a tree from roots to leaf (this is not technically
     // correct, demux and mux operators might form a diamond shape, but
     // we will only scan one path and ignore the others, because the
@@ -134,16 +176,10 @@ public class GenTezWork implements NodeP
       // remember the output name of the reduce sink
       rs.getConf().setOutputName(rWork.getName());
 
-      // add dependency between the two work items
-      tezWork.connect(work, rWork, EdgeType.SIMPLE_EDGE);
-    }
-
-    // This is where we cut the tree as described above. We also remember that
-    // we might have to connect parent work with this work later.
-    for (Operator<?> parent: new ArrayList<Operator<?>>(root.getParentOperators())) {
-      context.leafOperatorToFollowingWork.put(parent, work);
-      LOG.debug("Removing " + parent + " as parent from " + root);
-      root.removeParent(parent);
+      if (!context.unionWorkMap.containsKey(operator)) {
+        // add dependency between the two work items
+        tezWork.connect(work, rWork, EdgeType.SIMPLE_EDGE);
+      }
     }
 
     // No children means we're at the bottom. If there are more operators to scan
@@ -182,7 +218,7 @@ public class GenTezWork implements NodeP
       for (BaseWork parentWork : linkWorkList) {
         tezWork.connect(parentWork, work, EdgeType.BROADCAST_EDGE);
 
-        // need to set up output name for reduce sink not that we know the name
+        // need to set up output name for reduce sink now that we know the name
         // of the downstream work
         for (ReduceSinkOperator r:
                context.linkWorkWithReduceSinkMap.get(parentWork)) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Sun Mar  2 02:22:54 2014
@@ -644,7 +644,9 @@ public class SemanticAnalyzer extends Ba
   }
 
   private void assertCombineInputFormat(Tree numerator, String message) throws SemanticException {
-    String inputFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEINPUTFORMAT);
+    String inputFormat = conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez") ?
+      HiveConf.getVar(conf, HiveConf.ConfVars.HIVETEZINPUTFORMAT):
+      HiveConf.getVar(conf, HiveConf.ConfVars.HIVEINPUTFORMAT);
     if (!inputFormat.equals(CombineHiveInputFormat.class.getName())) {
       throw new SemanticException(generateErrorMessage((ASTNode) numerator,
           message + " sampling is not supported in " + inputFormat));

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java Sun Mar  2 02:22:54 2014
@@ -31,6 +31,7 @@ import java.util.Stack;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.exec.ConditionalTask;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
@@ -53,16 +54,19 @@ import org.apache.hadoop.hive.ql.lib.Nod
 import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
 import org.apache.hadoop.hive.ql.lib.Rule;
 import org.apache.hadoop.hive.ql.lib.RuleRegExp;
+import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.optimizer.ConvertJoinMapJoin;
 import org.apache.hadoop.hive.ql.optimizer.ReduceSinkMapJoinProc;
 import org.apache.hadoop.hive.ql.optimizer.SetReducerParallelism;
 import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalContext;
 import org.apache.hadoop.hive.ql.optimizer.physical.Vectorizer;
+import org.apache.hadoop.hive.ql.optimizer.physical.StageIDsRearranger;
 import org.apache.hadoop.hive.ql.plan.BaseWork;
 import org.apache.hadoop.hive.ql.plan.MapWork;
 import org.apache.hadoop.hive.ql.plan.MoveWork;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.TezWork;
+import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
 
 /**
  * TezCompiler translates the operator plan into TezTasks.
@@ -75,6 +79,18 @@ public class TezCompiler extends TaskCom
   }
 
   @Override
+  public void init(HiveConf conf, LogHelper console, Hive db) {
+    super.init(conf, console, db);
+    
+    // Tez requires us to use RPC for the query plan
+    HiveConf.setBoolVar(conf, ConfVars.HIVE_RPC_QUERY_PLAN, true);
+
+    // We require the use of recursive input dirs for union processing
+    conf.setBoolean("mapred.input.dir.recursive", true);
+    HiveConf.setBoolVar(conf, ConfVars.HIVE_HADOOP_SUPPORTS_SUBDIRECTORIES, true);
+  }
+
+  @Override
   protected void optimizeOperatorPlan(ParseContext pCtx, Set<ReadEntity> inputs,
       Set<WriteEntity> outputs) throws SemanticException {
 
@@ -138,14 +154,18 @@ public class TezCompiler extends TaskCom
         TableScanOperator.getOperatorName() + "%"),
         new ProcessAnalyzeTable(GenTezUtils.getUtils()));
 
-    opRules.put(new RuleRegExp("Bail on Union",
+    opRules.put(new RuleRegExp("Handle union",
         UnionOperator.getOperatorName() + "%"), new NodeProcessor()
     {
       @Override
       public Object process(Node n, Stack<Node> s,
           NodeProcessorCtx procCtx, Object... os) throws SemanticException {
-        throw new SemanticException("Unions not yet supported on Tez."
-            +" Please use MR for this query");
+        GenTezProcContext context = (GenTezProcContext) procCtx;
+        UnionOperator union = (UnionOperator) n;
+
+        // simply need to remember that we've seen a union.
+        context.currentUnionOperators.add(union);
+        return null;
       }
     });
 
@@ -156,20 +176,31 @@ public class TezCompiler extends TaskCom
     topNodes.addAll(pCtx.getTopOps().values());
     GraphWalker ogw = new GenTezWorkWalker(disp, procCtx);
     ogw.startWalking(topNodes, null);
+
+    // we need to clone some operator plans and remove union operators still
+    for (BaseWork w: procCtx.workWithUnionOperators) {
+      GenTezUtils.getUtils().removeUnionOperators(conf, procCtx, w);
+    }
+
+    // finally make sure the file sink operators are set up right
+    for (FileSinkOperator fileSink: procCtx.fileSinkSet) {
+      GenTezUtils.getUtils().processFileSink(procCtx, fileSink);
+    }
   }
 
   @Override
   protected void setInputFormat(Task<? extends Serializable> task) {
     if (task instanceof TezTask) {
       TezWork work = ((TezTask)task).getWork();
-      Set<BaseWork> roots = work.getRoots();
-      for (BaseWork w: roots) {
-        assert w instanceof MapWork;
-        MapWork mapWork = (MapWork)w;
-        HashMap<String, Operator<? extends OperatorDesc>> opMap = mapWork.getAliasToWork();
-        if (!opMap.isEmpty()) {
-          for (Operator<? extends OperatorDesc> op : opMap.values()) {
-            setInputFormat(mapWork, op);
+      List<BaseWork> all = work.getAllWork();
+      for (BaseWork w: all) {
+        if (w instanceof MapWork) {
+          MapWork mapWork = (MapWork) w;
+          HashMap<String, Operator<? extends OperatorDesc>> opMap = mapWork.getAliasToWork();
+          if (!opMap.isEmpty()) {
+            for (Operator<? extends OperatorDesc> op : opMap.values()) {
+              setInputFormat(mapWork, op);
+            }
           }
         }
       }
@@ -217,6 +248,9 @@ public class TezCompiler extends TaskCom
     if (conf.getBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED)) {
       (new Vectorizer()).resolve(physicalCtx);
     }
+    if (!"none".equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.HIVESTAGEIDREARRANGE))) {
+      (new StageIDsRearranger()).resolve(physicalCtx);
+    }
     return;
   }
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java Sun Mar  2 02:22:54 2014
@@ -22,6 +22,7 @@ import java.util.ArrayList;
 import java.util.LinkedList;
 import java.util.LinkedHashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 
@@ -82,7 +83,9 @@ public abstract class BaseWork extends A
     dummyOps.add(dummyOp);
   }
 
-  protected abstract Set<Operator<?>> getAllRootOperators();
+  public abstract void replaceRoots(Map<Operator<?>, Operator<?>> replacementMap);
+
+  public abstract Set<Operator<?>> getAllRootOperators();
 
   public Set<Operator<?>> getAllOperators() {
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java Sun Mar  2 02:22:54 2014
@@ -305,6 +305,17 @@ public class MapWork extends BaseWork {
   }
 
   @Override
+  public void replaceRoots(Map<Operator<?>, Operator<?>> replacementMap) {
+    LinkedHashMap<String, Operator<?>> newAliasToWork = new LinkedHashMap<String, Operator<?>>();
+
+    for (Map.Entry<String, Operator<?>> entry: aliasToWork.entrySet()) {
+      newAliasToWork.put(entry.getKey(), replacementMap.get(entry.getValue()));
+    }
+
+    setAliasToWork(newAliasToWork);
+  }
+
+  @Override
   @Explain(displayName = "Map Operator Tree")
   public Set<Operator<?>> getAllRootOperators() {
     Set<Operator<?>> opSet = new LinkedHashSet<Operator<?>>();

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java Sun Mar  2 02:22:54 2014
@@ -129,7 +129,13 @@ public class ReduceWork extends BaseWork
   }
 
   @Override
-  protected Set<Operator<?>> getAllRootOperators() {
+  public void replaceRoots(Map<Operator<?>, Operator<?>> replacementMap) {
+    assert replacementMap.size() == 1;
+    setReducer(replacementMap.get(getReducer()));
+  }
+
+  @Override
+  public Set<Operator<?>> getAllRootOperators() {
     Set<Operator<?>> opSet = new LinkedHashSet<Operator<?>>();
     opSet.add(getReducer());
     return opSet;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java Sun Mar  2 02:22:54 2014
@@ -45,11 +45,14 @@ public class TezWork extends AbstractOpe
 
   public enum EdgeType {
     SIMPLE_EDGE,
-    BROADCAST_EDGE
+    BROADCAST_EDGE,
+    CONTAINS
   }
 
   private static transient final Log LOG = LogFactory.getLog(TezWork.class);
 
+  private static int counter;
+  private final String name;
   private final Set<BaseWork> roots = new HashSet<BaseWork>();
   private final Set<BaseWork> leaves = new HashSet<BaseWork>();
   private final Map<BaseWork, List<BaseWork>> workGraph = new HashMap<BaseWork, List<BaseWork>>();
@@ -57,6 +60,15 @@ public class TezWork extends AbstractOpe
   private final Map<Pair<BaseWork, BaseWork>, EdgeType> edgeProperties =
       new HashMap<Pair<BaseWork, BaseWork>, EdgeType>();
 
+  public TezWork(String name) {
+    this.name = name + ":" + (++counter);
+  }
+
+  @Explain(displayName = "DagName")
+  public String getName() {
+    return name;
+  }
+
   /**
    * getWorkMap returns a map of "vertex name" to BaseWork
    */

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java Sun Mar  2 02:22:54 2014
@@ -112,7 +112,7 @@ public class TestTezTask {
           }
         });
 
-    work = new TezWork();
+    work = new TezWork("");
 
     mws = new MapWork[] { new MapWork(), new MapWork()};
     rws = new ReduceWork[] { new ReduceWork(), new ReduceWork() };
@@ -194,7 +194,7 @@ public class TestTezTask {
 
   @Test
   public void testEmptyWork() throws IllegalArgumentException, IOException, Exception {
-    DAG dag = task.build(conf, new TezWork(), path, appLr, new Context(conf));
+    DAG dag = task.build(conf, new TezWork(""), path, appLr, new Context(conf));
     assertEquals(dag.getVertices().size(), 0);
   }
 

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java Sun Mar  2 02:22:54 2014
@@ -29,6 +29,8 @@ import java.math.BigInteger;
 import java.nio.ByteBuffer;
 import java.sql.Timestamp;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
@@ -39,6 +41,9 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
+
+import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_ORC_ZEROCOPY;
+
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
 import org.apache.hadoop.hive.serde2.io.DoubleWritable;
@@ -67,14 +72,19 @@ import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hive.common.util.HiveTestUtils;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TestName;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
 
 /**
  * Tests for the top level reader/streamFactory of ORC files.
  */
+@RunWith(value = Parameterized.class)
 public class TestOrcFile {
 
   public static class SimpleStruct {
@@ -191,6 +201,16 @@ public class TestOrcFile {
   Configuration conf;
   FileSystem fs;
   Path testFilePath;
+  private final boolean zeroCopy;
+
+  @Parameters
+  public static Collection<Boolean[]> data() {
+    return Arrays.asList(new Boolean[][] { {false}, {true}});
+  }
+
+  public TestOrcFile(Boolean zcr) {
+    zeroCopy = zcr.booleanValue();
+  }
 
   @Rule
   public TestName testCaseName = new TestName();
@@ -198,6 +218,9 @@ public class TestOrcFile {
   @Before
   public void openFileSystem () throws Exception {
     conf = new Configuration();
+    if(zeroCopy) {
+      conf.setBoolean(HIVE_ORC_ZEROCOPY.varname, zeroCopy);
+    }
     fs = FileSystem.getLocal(conf);
     testFilePath = new Path(workDir, "TestOrcFile." +
         testCaseName.getMethodName() + ".orc");
@@ -547,6 +570,7 @@ public class TestOrcFile {
       inspector = ObjectInspectorFactory.getReflectionObjectInspector
           (InnerStruct.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
+
     Writer writer = OrcFile.createWriter(testFilePath,
         OrcFile.writerOptions(conf)
             .inspector(inspector)
@@ -572,35 +596,36 @@ public class TestOrcFile {
     StripeStatistics ss1 = metadata.getStripeStatistics().get(0);
     StripeStatistics ss2 = metadata.getStripeStatistics().get(1);
     StripeStatistics ss3 = metadata.getStripeStatistics().get(2);
-    assertEquals(4996, ss1.getColumnStatistics()[0].getNumberOfValues());
+
+    assertEquals(5000, ss1.getColumnStatistics()[0].getNumberOfValues());
     assertEquals(5000, ss2.getColumnStatistics()[0].getNumberOfValues());
-    assertEquals(1004, ss3.getColumnStatistics()[0].getNumberOfValues());
+    assertEquals(1000, ss3.getColumnStatistics()[0].getNumberOfValues());
 
-    assertEquals(4996, ((IntegerColumnStatistics)ss1.getColumnStatistics()[1]).getNumberOfValues());
+    assertEquals(5000, ((IntegerColumnStatistics)ss1.getColumnStatistics()[1]).getNumberOfValues());
     assertEquals(5000, ((IntegerColumnStatistics)ss2.getColumnStatistics()[1]).getNumberOfValues());
-    assertEquals(1004, ((IntegerColumnStatistics)ss3.getColumnStatistics()[1]).getNumberOfValues());
+    assertEquals(1000, ((IntegerColumnStatistics)ss3.getColumnStatistics()[1]).getNumberOfValues());
     assertEquals(1, ((IntegerColumnStatistics)ss1.getColumnStatistics()[1]).getMinimum());
-    assertEquals(1, ((IntegerColumnStatistics)ss2.getColumnStatistics()[1]).getMinimum());
-    assertEquals(2, ((IntegerColumnStatistics)ss3.getColumnStatistics()[1]).getMinimum());
+    assertEquals(2, ((IntegerColumnStatistics)ss2.getColumnStatistics()[1]).getMinimum());
+    assertEquals(3, ((IntegerColumnStatistics)ss3.getColumnStatistics()[1]).getMinimum());
     assertEquals(1, ((IntegerColumnStatistics)ss1.getColumnStatistics()[1]).getMaximum());
     assertEquals(2, ((IntegerColumnStatistics)ss2.getColumnStatistics()[1]).getMaximum());
     assertEquals(3, ((IntegerColumnStatistics)ss3.getColumnStatistics()[1]).getMaximum());
-    assertEquals(4996, ((IntegerColumnStatistics)ss1.getColumnStatistics()[1]).getSum());
-    assertEquals(9996, ((IntegerColumnStatistics)ss2.getColumnStatistics()[1]).getSum());
-    assertEquals(3008, ((IntegerColumnStatistics)ss3.getColumnStatistics()[1]).getSum());
+    assertEquals(5000, ((IntegerColumnStatistics)ss1.getColumnStatistics()[1]).getSum());
+    assertEquals(10000, ((IntegerColumnStatistics)ss2.getColumnStatistics()[1]).getSum());
+    assertEquals(3000, ((IntegerColumnStatistics)ss3.getColumnStatistics()[1]).getSum());
 
-    assertEquals(4996, ((StringColumnStatistics)ss1.getColumnStatistics()[2]).getNumberOfValues());
+    assertEquals(5000, ((StringColumnStatistics)ss1.getColumnStatistics()[2]).getNumberOfValues());
     assertEquals(5000, ((StringColumnStatistics)ss2.getColumnStatistics()[2]).getNumberOfValues());
-    assertEquals(1004, ((StringColumnStatistics)ss3.getColumnStatistics()[2]).getNumberOfValues());
+    assertEquals(1000, ((StringColumnStatistics)ss3.getColumnStatistics()[2]).getNumberOfValues());
     assertEquals("one", ((StringColumnStatistics)ss1.getColumnStatistics()[2]).getMinimum());
-    assertEquals("one", ((StringColumnStatistics)ss2.getColumnStatistics()[2]).getMinimum());
+    assertEquals("two", ((StringColumnStatistics)ss2.getColumnStatistics()[2]).getMinimum());
     assertEquals("three", ((StringColumnStatistics)ss3.getColumnStatistics()[2]).getMinimum());
     assertEquals("one", ((StringColumnStatistics)ss1.getColumnStatistics()[2]).getMaximum());
     assertEquals("two", ((StringColumnStatistics)ss2.getColumnStatistics()[2]).getMaximum());
-    assertEquals("two", ((StringColumnStatistics)ss3.getColumnStatistics()[2]).getMaximum());
-    assertEquals(14988, ((StringColumnStatistics)ss1.getColumnStatistics()[2]).getSum());
+    assertEquals("three", ((StringColumnStatistics)ss3.getColumnStatistics()[2]).getMaximum());
+    assertEquals(15000, ((StringColumnStatistics)ss1.getColumnStatistics()[2]).getSum());
     assertEquals(15000, ((StringColumnStatistics)ss2.getColumnStatistics()[2]).getSum());
-    assertEquals(5012, ((StringColumnStatistics)ss3.getColumnStatistics()[2]).getSum());
+    assertEquals(5000, ((StringColumnStatistics)ss3.getColumnStatistics()[2]).getSum());
 
     RecordReaderImpl recordReader = (RecordReaderImpl) reader.rows(null);
     OrcProto.RowIndex[] index = recordReader.readRowIndex(0);

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestStringRedBlackTree.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestStringRedBlackTree.java?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestStringRedBlackTree.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestStringRedBlackTree.java Sun Mar  2 02:22:54 2014
@@ -262,7 +262,7 @@ public class TestStringRedBlackTree {
     bit.testCompressedSeek();
     bit.testBiggerItems();
     bit.testSkips();
-    TestOrcFile test1 = new TestOrcFile();
+    TestOrcFile test1 = new TestOrcFile(false);
     test1.test1();
     test1.emptyFile();
     test1.metaData();

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java Sun Mar  2 02:22:54 2014
@@ -34,7 +34,7 @@ public class TestTezWork {
   @Before
   public void setup() throws Exception {
     nodes = new LinkedList<BaseWork>();
-    work = new TezWork();
+    work = new TezWork("");
     addWork(5);
   }
 

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java Sun Mar  2 02:22:54 2014
@@ -20,20 +20,43 @@ package org.apache.hadoop.hive.ql.sessio
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 
+import java.util.Arrays;
+import java.util.Collection;
+
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
 
 /**
  * Test SessionState
  */
+@RunWith(value = Parameterized.class)
 public class TestSessionState {
 
+  private final boolean prewarm;
+
+  public TestSessionState(Boolean mode) {
+    this.prewarm = mode.booleanValue();
+  }
+
+  @Parameters
+  public static Collection<Boolean[]> data() {
+    return Arrays.asList(new Boolean[][] { {false}, {true}});
+  }
 
   @Before
-  public void setup(){
-    SessionState.start(new HiveConf());
+  public void setup() {
+    HiveConf conf = new HiveConf();
+    if (prewarm) {
+      HiveConf.setBoolVar(conf, ConfVars.HIVE_PREWARM_ENABLED, true);
+      HiveConf.setIntVar(conf, ConfVars.HIVE_PREWARM_NUM_CONTAINERS, 1);
+    }
+    SessionState.start(conf);
   }
 
   /**

Modified: hive/trunk/ql/src/test/resources/orc-file-dump-dictionary-threshold.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/resources/orc-file-dump-dictionary-threshold.out?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/resources/orc-file-dump-dictionary-threshold.out (original)
+++ hive/trunk/ql/src/test/resources/orc-file-dump-dictionary-threshold.out Sun Mar  2 02:22:54 2014
@@ -6,30 +6,30 @@ Type: struct<i:int,l:bigint,s:string>
 
 Stripe Statistics:
   Stripe 1:
-    Column 0: count: 4000
-    Column 1: count: 4000 min: -2147115959 max: 2145911404 sum: 71315665983
-    Column 2: count: 4000 min: -9211329013123260308 max: 9217851628057711416
-    Column 3: count: 4000 min: Darkness,-230 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788 sum: 245096
+    Column 0: count: 5000
+    Column 1: count: 5000 min: -2147115959 max: 2145911404 sum: 159677169195
+    Column 2: count: 5000 min: -9216505819108477308 max: 9217851628057711416
+    Column 3: count: 5000 min: Darkness,-230 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744 sum: 381254
   Stripe 2:
     Column 0: count: 5000
-    Column 1: count: 5000 min: -2147390285 max: 2146838901 sum: 107869424275
+    Column 1: count: 5000 min: -2147390285 max: 2147224606 sum: -14961457759
     Column 2: count: 5000 min: -9222178666167296739 max: 9221301751385928177
-    Column 3: count: 5000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984 sum: 972748
+    Column 3: count: 5000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938 sum: 1117994
   Stripe 3:
     Column 0: count: 5000
-    Column 1: count: 5000 min: -2145928262 max: 2147224606 sum: 38276585043
+    Column 1: count: 5000 min: -2145842720 max: 2146718321 sum: 141092475520
     Column 2: count: 5000 min: -9221963099397084326 max: 9222722740629726770
-    Column 3: count: 5000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766 sum: 1753024
+    Column 3: count: 5000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974 sum: 1925226
   Stripe 4:
     Column 0: count: 5000
-    Column 1: count: 5000 min: -2145378214 max: 2147453086 sum: -43469576640
+    Column 1: count: 5000 min: -2145378214 max: 2147453086 sum: -153680004530
     Column 2: count: 5000 min: -9222731174895935707 max: 9222919052987871506
-    Column 3: count: 5000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-127
 82-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788 sum: 2636664
+    Column 3: count: 5000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-120
 96-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788-19204-19254-19518-19596-19786-19874-19904 sum: 2815002
   Stripe 5:
-    Column 0: count: 2000
-    Column 1: count: 2000 min: -2143595397 max: 2144595861 sum: -64863580335
-    Column 2: count: 2000 min: -9212379634781416464 max: 9208134757538374043
-    Column 3: count: 2000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048-15494-15674-15726-16006-16056-16180-16304-16332-16452-16598-16730-16810-16994-17210-17268-17786-17962-18214-18444-18446-18724-18912-18952-19164 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-86
 20-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788-19204-19254-19518-19596-19786-19874-19904-20390-20752-20936 sum: 1302706
+    Column 0: count: 1000
+    Column 1: count: 1000 min: -2143595397 max: 2136858458 sum: -22999664100
+    Column 2: count: 1000 min: -9212379634781416464 max: 9197412874152820822
+    Column 3: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048-15494-15674-15726-16006-16056-16180-16304-16332-16452-16598-16730-16810-16994-17210-17268-17786-17962-18214-18444-18446-18724-18912-18952-19164-19348-19400-19546-19776-19896-20084 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7
 960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788-19204-19254-19518-19596-19786-19874-19904-20390-20752-20936 sum: 670762
 
 File Statistics:
   Column 0: count: 21000
@@ -38,67 +38,67 @@ File Statistics:
   Column 3: count: 21000 min: Darkness,-230 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788-19204-19254-19518-19596-19786-19874-19904-20390-20752-20936 sum: 6910238
 
 Stripes:
-  Stripe: offset: 3 data: 102311 rows: 4000 tail: 68 index: 224
+  Stripe: offset: 3 data: 144733 rows: 5000 tail: 68 index: 235
     Stream: column 0 section ROW_INDEX start: 3 length 10
     Stream: column 1 section ROW_INDEX start: 13 length 36
     Stream: column 2 section ROW_INDEX start: 49 length 39
-    Stream: column 3 section ROW_INDEX start: 88 length 139
-    Stream: column 1 section DATA start: 227 length 16022
-    Stream: column 2 section DATA start: 16249 length 32028
-    Stream: column 3 section DATA start: 48277 length 50887
-    Stream: column 3 section LENGTH start: 99164 length 3374
+    Stream: column 3 section ROW_INDEX start: 88 length 150
+    Stream: column 1 section DATA start: 238 length 20029
+    Stream: column 2 section DATA start: 20267 length 40035
+    Stream: column 3 section DATA start: 60302 length 80382
+    Stream: column 3 section LENGTH start: 140684 length 4287
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
     Encoding column 3: DIRECT_V2
-  Stripe: offset: 102606 data: 284999 rows: 5000 tail: 68 index: 356
-    Stream: column 0 section ROW_INDEX start: 102606 length 10
-    Stream: column 1 section ROW_INDEX start: 102616 length 36
-    Stream: column 2 section ROW_INDEX start: 102652 length 39
-    Stream: column 3 section ROW_INDEX start: 102691 length 271
-    Stream: column 1 section DATA start: 102962 length 20029
-    Stream: column 2 section DATA start: 122991 length 40035
-    Stream: column 3 section DATA start: 163026 length 219588
-    Stream: column 3 section LENGTH start: 382614 length 5347
+  Stripe: offset: 145039 data: 321684 rows: 5000 tail: 68 index: 415
+    Stream: column 0 section ROW_INDEX start: 145039 length 10
+    Stream: column 1 section ROW_INDEX start: 145049 length 35
+    Stream: column 2 section ROW_INDEX start: 145084 length 39
+    Stream: column 3 section ROW_INDEX start: 145123 length 331
+    Stream: column 1 section DATA start: 145454 length 20029
+    Stream: column 2 section DATA start: 165483 length 40035
+    Stream: column 3 section DATA start: 205518 length 256119
+    Stream: column 3 section LENGTH start: 461637 length 5501
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
     Encoding column 3: DIRECT_V2
-  Stripe: offset: 388029 data: 491655 rows: 5000 tail: 69 index: 544
-    Stream: column 0 section ROW_INDEX start: 388029 length 10
-    Stream: column 1 section ROW_INDEX start: 388039 length 36
-    Stream: column 2 section ROW_INDEX start: 388075 length 39
-    Stream: column 3 section ROW_INDEX start: 388114 length 459
-    Stream: column 1 section DATA start: 388573 length 20029
-    Stream: column 2 section DATA start: 408602 length 40035
-    Stream: column 3 section DATA start: 448637 length 425862
-    Stream: column 3 section LENGTH start: 874499 length 5729
+  Stripe: offset: 467206 data: 531773 rows: 5000 tail: 69 index: 569
+    Stream: column 0 section ROW_INDEX start: 467206 length 10
+    Stream: column 1 section ROW_INDEX start: 467216 length 36
+    Stream: column 2 section ROW_INDEX start: 467252 length 39
+    Stream: column 3 section ROW_INDEX start: 467291 length 484
+    Stream: column 1 section DATA start: 467775 length 20029
+    Stream: column 2 section DATA start: 487804 length 40035
+    Stream: column 3 section DATA start: 527839 length 466002
+    Stream: column 3 section LENGTH start: 993841 length 5707
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
     Encoding column 3: DIRECT_V2
-  Stripe: offset: 880297 data: 707368 rows: 5000 tail: 68 index: 691
-    Stream: column 0 section ROW_INDEX start: 880297 length 10
-    Stream: column 1 section ROW_INDEX start: 880307 length 36
-    Stream: column 2 section ROW_INDEX start: 880343 length 39
-    Stream: column 3 section ROW_INDEX start: 880382 length 606
-    Stream: column 1 section DATA start: 880988 length 20029
-    Stream: column 2 section DATA start: 901017 length 40035
-    Stream: column 3 section DATA start: 941052 length 641580
-    Stream: column 3 section LENGTH start: 1582632 length 5724
+  Stripe: offset: 999617 data: 751374 rows: 5000 tail: 69 index: 734
+    Stream: column 0 section ROW_INDEX start: 999617 length 10
+    Stream: column 1 section ROW_INDEX start: 999627 length 36
+    Stream: column 2 section ROW_INDEX start: 999663 length 39
+    Stream: column 3 section ROW_INDEX start: 999702 length 649
+    Stream: column 1 section DATA start: 1000351 length 20029
+    Stream: column 2 section DATA start: 1020380 length 40035
+    Stream: column 3 section DATA start: 1060415 length 685567
+    Stream: column 3 section LENGTH start: 1745982 length 5743
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
     Encoding column 3: DIRECT_V2
-  Stripe: offset: 1588424 data: 348697 rows: 2000 tail: 67 index: 797
-    Stream: column 0 section ROW_INDEX start: 1588424 length 10
-    Stream: column 1 section ROW_INDEX start: 1588434 length 36
-    Stream: column 2 section ROW_INDEX start: 1588470 length 39
-    Stream: column 3 section ROW_INDEX start: 1588509 length 712
-    Stream: column 1 section DATA start: 1589221 length 8011
-    Stream: column 2 section DATA start: 1597232 length 16014
-    Stream: column 3 section DATA start: 1613246 length 322259
-    Stream: column 3 section LENGTH start: 1935505 length 2413
+  Stripe: offset: 1800000 data: 177935 rows: 1000 tail: 67 index: 813
+    Stream: column 0 section ROW_INDEX start: 1800000 length 10
+    Stream: column 1 section ROW_INDEX start: 1800010 length 36
+    Stream: column 2 section ROW_INDEX start: 1800046 length 39
+    Stream: column 3 section ROW_INDEX start: 1800085 length 728
+    Stream: column 1 section DATA start: 1800813 length 4007
+    Stream: column 2 section DATA start: 1804820 length 8007
+    Stream: column 3 section DATA start: 1812827 length 164661
+    Stream: column 3 section LENGTH start: 1977488 length 1260
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2

Modified: hive/trunk/ql/src/test/results/clientpositive/tez/auto_join0.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/auto_join0.q.out?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/auto_join0.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/auto_join0.q.out Sun Mar  2 02:22:54 2014
@@ -31,6 +31,7 @@ STAGE PLANS:
         Map 1 <- Map 4 (BROADCAST_EDGE)
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
         Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:

Modified: hive/trunk/ql/src/test/results/clientpositive/tez/auto_join1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/auto_join1.q.out?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/auto_join1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/auto_join1.q.out Sun Mar  2 02:22:54 2014
@@ -1,7 +1,9 @@
 PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE
 PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
 POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE
 POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
 POSTHOOK: Output: default@dest_j1
 PREHOOK: query: explain
 FROM src src1 JOIN src src2 ON (src1.key = src2.key)
@@ -13,20 +15,16 @@ INSERT OVERWRITE TABLE dest_j1 SELECT sr
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6
-  Stage-5
-  Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
+  Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
   Stage-3 depends on stages: Stage-0
-  Stage-4
-  Stage-6
-  Stage-7 depends on stages: Stage-6
 
 STAGE PLANS:
   Stage: Stage-1
     Tez
       Edges:
         Map 2 <- Map 1 (BROADCAST_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
@@ -68,15 +66,6 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                             name: default.dest_j1
 
-  Stage: Stage-8
-    Conditional Operator
-
-  Stage: Stage-5
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
   Stage: Stage-2
     Dependency Collection
 
@@ -93,40 +82,6 @@ STAGE PLANS:
   Stage: Stage-3
     Stats-Aggr Operator
 
-  Stage: Stage-4
-    Tez
-      Vertices:
-        Merge 
-            Map Operator Tree:
-                TableScan
-                  File Output Operator
-                    compressed: false
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.dest_j1
-
-  Stage: Stage-6
-    Tez
-      Vertices:
-        Merge 
-            Map Operator Tree:
-                TableScan
-                  File Output Operator
-                    compressed: false
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.dest_j1
-
-  Stage: Stage-7
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value
 PREHOOK: type: QUERY

Modified: hive/trunk/ql/src/test/results/clientpositive/tez/bucket2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/bucket2.q.out?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/bucket2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/bucket2.q.out Sun Mar  2 02:22:54 2014
@@ -1,7 +1,9 @@
 PREHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS
 PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
 POSTHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS
 POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
 POSTHOOK: Output: default@bucket2_1
 PREHOOK: query: explain extended
 insert overwrite table bucket2_1
@@ -39,6 +41,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
@@ -196,6 +199,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:

Modified: hive/trunk/ql/src/test/results/clientpositive/tez/bucket3.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/bucket3.q.out?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/bucket3.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/bucket3.q.out Sun Mar  2 02:22:54 2014
@@ -1,7 +1,9 @@
 PREHOOK: query: CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS
 PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
 POSTHOOK: query: CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS
 POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
 POSTHOOK: Output: default@bucket3_1
 PREHOOK: query: explain extended
 insert overwrite table bucket3_1 partition (ds='1')
@@ -43,6 +45,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
@@ -221,6 +224,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:

Modified: hive/trunk/ql/src/test/results/clientpositive/tez/bucket4.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/bucket4.q.out?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/bucket4.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/bucket4.q.out Sun Mar  2 02:22:54 2014
@@ -1,7 +1,9 @@
 PREHOOK: query: CREATE TABLE bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
 PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
 POSTHOOK: query: CREATE TABLE bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
 POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
 POSTHOOK: Output: default@bucket4_1
 PREHOOK: query: explain extended
 insert overwrite table bucket4_1
@@ -39,6 +41,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
@@ -197,6 +200,7 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Tez
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:

Modified: hive/trunk/ql/src/test/results/clientpositive/tez/count.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/count.q.out?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/count.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/count.q.out Sun Mar  2 02:22:54 2014
@@ -1,13 +1,17 @@
 PREHOOK: query: create table abcd (a int, b int, c int, d int)
 PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
 POSTHOOK: query: create table abcd (a int, b int, c int, d int)
 POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
 POSTHOOK: Output: default@abcd
 PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd
 PREHOOK: type: LOAD
+#### A masked pattern was here ####
 PREHOOK: Output: default@abcd
 POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd
 POSTHOOK: type: LOAD
+#### A masked pattern was here ####
 POSTHOOK: Output: default@abcd
 PREHOOK: query: select * from abcd
 PREHOOK: type: QUERY
@@ -37,6 +41,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
@@ -108,6 +113,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
@@ -174,6 +180,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
@@ -239,6 +246,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:

Modified: hive/trunk/ql/src/test/results/clientpositive/tez/create_merge_compressed.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/create_merge_compressed.q.out?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/create_merge_compressed.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/create_merge_compressed.q.out Sun Mar  2 02:22:54 2014
@@ -1,18 +1,24 @@
 PREHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile
 PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
 POSTHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile
 POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_rc_merge_test
 PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test
 PREHOOK: type: LOAD
+#### A masked pattern was here ####
 PREHOOK: Output: default@src_rc_merge_test
 POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test
 POSTHOOK: type: LOAD
+#### A masked pattern was here ####
 POSTHOOK: Output: default@src_rc_merge_test
 PREHOOK: query: create table tgt_rc_merge_test(key int, value string) stored as rcfile
 PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
 POSTHOOK: query: create table tgt_rc_merge_test(key int, value string) stored as rcfile
 POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
 POSTHOOK: Output: default@tgt_rc_merge_test
 PREHOOK: query: insert into table tgt_rc_merge_test select * from src_rc_merge_test
 PREHOOK: type: QUERY

Modified: hive/trunk/ql/src/test/results/clientpositive/tez/cross_join.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/cross_join.q.out?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/cross_join.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/cross_join.q.out Sun Mar  2 02:22:54 2014
@@ -13,6 +13,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
@@ -72,6 +73,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
@@ -131,6 +133,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:

Modified: hive/trunk/ql/src/test/results/clientpositive/tez/ctas.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/ctas.q.out?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/ctas.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/ctas.q.out Sun Mar  2 02:22:54 2014
@@ -2,10 +2,12 @@ PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_
 
 create table nzhang_Tmp(a int, b string)
 PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
 POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
 
 create table nzhang_Tmp(a int, b string)
 POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_Tmp
 PREHOOK: query: select * from nzhang_Tmp
 PREHOOK: type: QUERY
@@ -21,15 +23,10 @@ POSTHOOK: query: explain create table nz
 POSTHOOK: type: CREATETABLE_AS_SELECT
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6
-  Stage-5
-  Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
-  Stage-9 depends on stages: Stage-2, Stage-0
-  Stage-3 depends on stages: Stage-9
-  Stage-0 depends on stages: Stage-5, Stage-4, Stage-7
-  Stage-4
-  Stage-6
-  Stage-7 depends on stages: Stage-6
+  Stage-2 depends on stages: Stage-1
+  Stage-4 depends on stages: Stage-2, Stage-0
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-1
@@ -37,6 +34,7 @@ STAGE PLANS:
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
         Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
@@ -80,19 +78,10 @@ STAGE PLANS:
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                         name: default.nzhang_CTAS1
 
-  Stage: Stage-8
-    Conditional Operator
-
-  Stage: Stage-5
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
   Stage: Stage-2
     Dependency Collection
 
-  Stage: Stage-9
+  Stage: Stage-4
       Create Table Operator:
         Create Table
           columns: k string, value string
@@ -109,40 +98,6 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-4
-    Tez
-      Vertices:
-        Merge 
-            Map Operator Tree:
-                TableScan
-                  File Output Operator
-                    compressed: false
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.nzhang_CTAS1
-
-  Stage: Stage-6
-    Tez
-      Vertices:
-        Merge 
-            Map Operator Tree:
-                TableScan
-                  File Output Operator
-                    compressed: false
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.nzhang_CTAS1
-
-  Stage: Stage-7
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
 PREHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -208,15 +163,10 @@ POSTHOOK: query: explain create table nz
 POSTHOOK: type: CREATETABLE_AS_SELECT
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6
-  Stage-5
-  Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
-  Stage-9 depends on stages: Stage-2, Stage-0
-  Stage-3 depends on stages: Stage-9
-  Stage-0 depends on stages: Stage-5, Stage-4, Stage-7
-  Stage-4
-  Stage-6
-  Stage-7 depends on stages: Stage-6
+  Stage-2 depends on stages: Stage-1
+  Stage-4 depends on stages: Stage-2, Stage-0
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-1
@@ -224,6 +174,7 @@ STAGE PLANS:
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
         Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
@@ -267,19 +218,10 @@ STAGE PLANS:
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                         name: default.nzhang_ctas2
 
-  Stage: Stage-8
-    Conditional Operator
-
-  Stage: Stage-5
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
   Stage: Stage-2
     Dependency Collection
 
-  Stage: Stage-9
+  Stage: Stage-4
       Create Table Operator:
         Create Table
           columns: key string, value string
@@ -296,40 +238,6 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-4
-    Tez
-      Vertices:
-        Merge 
-            Map Operator Tree:
-                TableScan
-                  File Output Operator
-                    compressed: false
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.nzhang_ctas2
-
-  Stage: Stage-6
-    Tez
-      Vertices:
-        Merge 
-            Map Operator Tree:
-                TableScan
-                  File Output Operator
-                    compressed: false
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.nzhang_ctas2
-
-  Stage: Stage-7
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
 PREHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -395,15 +303,10 @@ POSTHOOK: query: explain create table nz
 POSTHOOK: type: CREATETABLE_AS_SELECT
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6
-  Stage-5
-  Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
-  Stage-9 depends on stages: Stage-2, Stage-0
-  Stage-3 depends on stages: Stage-9
-  Stage-0 depends on stages: Stage-5, Stage-4, Stage-7
-  Stage-4
-  Stage-6
-  Stage-7 depends on stages: Stage-6
+  Stage-2 depends on stages: Stage-1
+  Stage-4 depends on stages: Stage-2, Stage-0
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-1
@@ -411,6 +314,7 @@ STAGE PLANS:
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
         Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
@@ -454,19 +358,10 @@ STAGE PLANS:
                         serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
                         name: default.nzhang_ctas3
 
-  Stage: Stage-8
-    Conditional Operator
-
-  Stage: Stage-5
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
   Stage: Stage-2
     Dependency Collection
 
-  Stage: Stage-9
+  Stage: Stage-4
       Create Table Operator:
         Create Table
           columns: half_key double, conb string
@@ -484,18 +379,6 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-4
-    Block level merge
-
-  Stage: Stage-6
-    Block level merge
-
-  Stage: Stage-7
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
 PREHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -624,15 +507,10 @@ POSTHOOK: query: explain create table nz
 POSTHOOK: type: CREATETABLE_AS_SELECT
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6
-  Stage-5
-  Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
-  Stage-9 depends on stages: Stage-2, Stage-0
-  Stage-3 depends on stages: Stage-9
-  Stage-0 depends on stages: Stage-5, Stage-4, Stage-7
-  Stage-4
-  Stage-6
-  Stage-7 depends on stages: Stage-6
+  Stage-2 depends on stages: Stage-1
+  Stage-4 depends on stages: Stage-2, Stage-0
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-1
@@ -640,6 +518,7 @@ STAGE PLANS:
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
         Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
@@ -683,19 +562,10 @@ STAGE PLANS:
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                         name: default.nzhang_ctas4
 
-  Stage: Stage-8
-    Conditional Operator
-
-  Stage: Stage-5
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
   Stage: Stage-2
     Dependency Collection
 
-  Stage: Stage-9
+  Stage: Stage-4
       Create Table Operator:
         Create Table
           columns: key string, value string
@@ -713,40 +583,6 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-4
-    Tez
-      Vertices:
-        Merge 
-            Map Operator Tree:
-                TableScan
-                  File Output Operator
-                    compressed: false
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.nzhang_ctas4
-
-  Stage: Stage-6
-    Tez
-      Vertices:
-        Merge 
-            Map Operator Tree:
-                TableScan
-                  File Output Operator
-                    compressed: false
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.nzhang_ctas4
-
-  Stage: Stage-7
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
 PREHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -853,15 +689,10 @@ TOK_CREATETABLE
 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6
-  Stage-5
-  Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
-  Stage-9 depends on stages: Stage-2, Stage-0
-  Stage-3 depends on stages: Stage-9
-  Stage-0 depends on stages: Stage-5, Stage-4, Stage-7
-  Stage-4
-  Stage-6
-  Stage-7 depends on stages: Stage-6
+  Stage-2 depends on stages: Stage-1
+  Stage-4 depends on stages: Stage-2, Stage-0
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-1
@@ -869,6 +700,7 @@ STAGE PLANS:
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
         Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
@@ -979,19 +811,10 @@ STAGE PLANS:
                     GatherStats: true
                     MultiFileSpray: false
 
-  Stage: Stage-8
-    Conditional Operator
-
-  Stage: Stage-5
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
   Stage: Stage-2
     Dependency Collection
 
-  Stage: Stage-9
+  Stage: Stage-4
       Create Table Operator:
         Create Table
           columns: key string, value string
@@ -1012,140 +835,6 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-4
-    Tez
-      Vertices:
-        Merge 
-            Map Operator Tree:
-                TableScan
-                  GatherStats: false
-                  File Output Operator
-                    compressed: false
-                    GlobalTableId: 0
-#### A masked pattern was here ####
-                    NumFilesPerFileSink: 1
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        properties:
-                          columns _col0,_col1
-                          columns.types string:string
-                          field.delim ,
-                          line.delim 
-
-                          name default.nzhang_ctas5
-                          serialization.format ,
-                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.nzhang_ctas5
-                    TotalFiles: 1
-                    GatherStats: false
-                    MultiFileSpray: false
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    columns _col0,_col1
-                    columns.types string:string
-                    field.delim ,
-                    line.delim 
-
-                    name default.nzhang_ctas5
-                    serialization.format ,
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      columns _col0,_col1
-                      columns.types string:string
-                      field.delim ,
-                      line.delim 
-
-                      name default.nzhang_ctas5
-                      serialization.format ,
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.nzhang_ctas5
-                  name: default.nzhang_ctas5
-            Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-6
-    Tez
-      Vertices:
-        Merge 
-            Map Operator Tree:
-                TableScan
-                  GatherStats: false
-                  File Output Operator
-                    compressed: false
-                    GlobalTableId: 0
-#### A masked pattern was here ####
-                    NumFilesPerFileSink: 1
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        properties:
-                          columns _col0,_col1
-                          columns.types string:string
-                          field.delim ,
-                          line.delim 
-
-                          name default.nzhang_ctas5
-                          serialization.format ,
-                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.nzhang_ctas5
-                    TotalFiles: 1
-                    GatherStats: false
-                    MultiFileSpray: false
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    columns _col0,_col1
-                    columns.types string:string
-                    field.delim ,
-                    line.delim 
-
-                    name default.nzhang_ctas5
-                    serialization.format ,
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      columns _col0,_col1
-                      columns.types string:string
-                      field.delim ,
-                      line.delim 
-
-                      name default.nzhang_ctas5
-                      serialization.format ,
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.nzhang_ctas5
-                  name: default.nzhang_ctas5
-            Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-7
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
 PREHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -1155,8 +844,10 @@ POSTHOOK: Input: default@src
 POSTHOOK: Output: default@nzhang_ctas5
 PREHOOK: query: create table nzhang_ctas6 (key string, `to` string)
 PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
 POSTHOOK: query: create table nzhang_ctas6 (key string, `to` string)
 POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_ctas6
 PREHOOK: query: insert overwrite table nzhang_ctas6 select key, value from src tablesample (10 rows)
 PREHOOK: type: QUERY

Modified: hive/trunk/ql/src/test/results/clientpositive/tez/custom_input_output_format.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/custom_input_output_format.q.out?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/custom_input_output_format.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/custom_input_output_format.q.out Sun Mar  2 02:22:54 2014
@@ -2,10 +2,12 @@ PREHOOK: query: CREATE TABLE src1_rot13_
   STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13InputFormat'
             OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13OutputFormat'
 PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
 POSTHOOK: query: CREATE TABLE src1_rot13_iof(key STRING, value STRING) 
   STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13InputFormat'
             OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13OutputFormat'
 POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src1_rot13_iof
 PREHOOK: query: DESCRIBE EXTENDED src1_rot13_iof
 PREHOOK: type: DESCTABLE

Modified: hive/trunk/ql/src/test/results/clientpositive/tez/disable_merge_for_bucketing.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/disable_merge_for_bucketing.q.out?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/disable_merge_for_bucketing.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/disable_merge_for_bucketing.q.out Sun Mar  2 02:22:54 2014
@@ -1,7 +1,9 @@
 PREHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS
 PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
 POSTHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS
 POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
 POSTHOOK: Output: default@bucket2_1
 PREHOOK: query: explain extended
 insert overwrite table bucket2_1
@@ -39,6 +41,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
@@ -196,6 +199,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:

Modified: hive/trunk/ql/src/test/results/clientpositive/tez/enforce_order.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/enforce_order.q.out?rev=1573252&r1=1573251&r2=1573252&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/enforce_order.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/enforce_order.q.out Sun Mar  2 02:22:54 2014
@@ -8,13 +8,17 @@ POSTHOOK: query: drop table table_desc
 POSTHOOK: type: DROPTABLE
 PREHOOK: query: create table table_asc(key string, value string) clustered by (key) sorted by (key ASC) into 1 BUCKETS
 PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
 POSTHOOK: query: create table table_asc(key string, value string) clustered by (key) sorted by (key ASC) into 1 BUCKETS
 POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
 POSTHOOK: Output: default@table_asc
 PREHOOK: query: create table table_desc(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS
 PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
 POSTHOOK: query: create table table_desc(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS
 POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
 POSTHOOK: Output: default@table_desc
 PREHOOK: query: insert overwrite table table_asc select key, value from src
 PREHOOK: type: QUERY



Mime
View raw message