hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hashut...@apache.org
Subject svn commit: r1574266 [23/23] - in /hive/trunk: common/src/java/org/apache/hadoop/hive/conf/ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/ metastore/ metastore/if/ metastore/scripts/upgrade/derby/ metastore/scripts/upgrade/mysql/ meta...
Date Wed, 05 Mar 2014 00:20:57 GMT
Added: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/TxnManagerFactory.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/TxnManagerFactory.java?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/TxnManagerFactory.java (added)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/TxnManagerFactory.java Wed Mar  5 00:20:53 2014
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.lockmgr;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.util.ReflectionUtils;
+
+/**
+ * A factory to get an instance of {@link HiveTxnManager}.  This should
+ * always be called rather than building a transaction manager via reflection.
+ * This factory will read the configuration file to determine which
+ * transaction manager to instantiate.  It will stash the chosen transaction
+ * manager into the Context object, and subsequently return it from there so
+ * that if there are multiple Hive threads running,
+ * each will get it's appropriate transaction manager.
+ */
+public class TxnManagerFactory {
+
+  private static TxnManagerFactory self;
+
+  /**
+   * Get the singleton instance of this factory.
+   * @return this factory
+   */
+  public static synchronized TxnManagerFactory getTxnManagerFactory() {
+    if (self == null) {
+      self = new TxnManagerFactory();
+    }
+    return self;
+  }
+
+  /**
+   * Create a new transaction manager.  The transaction manager to
+   * instantiate will be determined by the hive.txn.manager value in the
+   * configuration.  This should not be called if a transaction manager has
+   * already been constructed and stored in the Context object.
+   * @param conf HiveConf object used to construct the transaction manager
+   * @return the transaction manager
+   * @throws LockException if there is an error constructing the transaction
+   * manager.
+   */
+  public HiveTxnManager getTxnManager(HiveConf conf) throws
+      LockException {
+    HiveTxnManager txnMgr = null;
+
+    // Determine the transaction manager to use from the configuration.
+    String txnMgrName = conf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER);
+    if (txnMgrName == null || txnMgrName.isEmpty()) {
+      throw new LockException(ErrorMsg.TXNMGR_NOT_SPECIFIED.getMsg());
+    }
+
+    // Instantiate the chosen transaction manager
+    try {
+      HiveTxnManagerImpl impl = (HiveTxnManagerImpl)ReflectionUtils.newInstance(
+            conf.getClassByName(txnMgrName), conf);
+      impl.setHiveConf(conf);
+      txnMgr = impl;
+    } catch (ClassNotFoundException e) {
+      throw new LockException(ErrorMsg.TXNMGR_NOT_INSTANTIATED.getMsg());
+    }
+    return txnMgr;
+  }
+
+  private TxnManagerFactory() {
+  }
+}

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java?rev=1574266&r1=1574265&r2=1574266&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java Wed Mar  5 00:20:53 2014
@@ -18,36 +18,14 @@
 
 package org.apache.hadoop.hive.ql.lockmgr.zookeeper;
 
-import java.io.IOException;
-import java.net.InetAddress;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Queue;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.ErrorMsg;
-import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
-import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager;
-import org.apache.hadoop.hive.ql.lockmgr.HiveLockManagerCtx;
-import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode;
-import org.apache.hadoop.hive.ql.lockmgr.HiveLockObj;
-import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject;
+import org.apache.hadoop.hive.ql.lockmgr.*;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData;
-import org.apache.hadoop.hive.ql.lockmgr.LockException;
-import org.apache.hadoop.hive.ql.metadata.DummyPartition;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.metadata.*;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
@@ -55,7 +33,11 @@ import org.apache.zookeeper.Watcher;
 import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.ZooKeeper;
 
-import com.google.common.annotations.VisibleForTesting;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.util.*;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 public class ZooKeeperHiveLockManager implements HiveLockManager {
   HiveLockManagerCtx ctx;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1574266&r1=1574265&r2=1574266&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Wed Mar  5 00:20:53 2014
@@ -70,9 +70,10 @@ import org.apache.hadoop.hive.ql.index.H
 import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat;
 import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
 import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.HiveUtils;
+import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
+import org.apache.hadoop.hive.ql.lockmgr.LockException;
+import org.apache.hadoop.hive.ql.lockmgr.TxnManagerFactory;
+import org.apache.hadoop.hive.ql.metadata.*;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.authorization.AuthorizationParseUtils;
@@ -585,7 +586,7 @@ public class DDLSemanticAnalyzer extends
 
   private void addAlterDbDesc(AlterDatabaseDesc alterDesc) throws SemanticException {
     Database database = getDatabase(alterDesc.getDatabaseName());
-    outputs.add(new WriteEntity(database));
+    outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_METADATA_ONLY));
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterDesc), conf));
   }
 
@@ -740,12 +741,12 @@ public class DDLSemanticAnalyzer extends
       if (tableNames != null) {
         for (String tableName : tableNames) {
           Table table = getTable(dbName, tableName, true);
-          outputs.add(new WriteEntity(table));
+          outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL));
         }
       }
     }
     inputs.add(new ReadEntity(database));
-    outputs.add(new WriteEntity(database));
+    outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL));
 
     DropDatabaseDesc dropDatabaseDesc = new DropDatabaseDesc(dbName, ifExists, ifCascade);
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropDatabaseDesc), conf));
@@ -771,7 +772,7 @@ public class DDLSemanticAnalyzer extends
     Table tab = getTable(tableName, throwException);
     if (tab != null) {
       inputs.add(new ReadEntity(tab));
-      outputs.add(new WriteEntity(tab));
+      outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL));
     }
 
     DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectView, ifExists);
@@ -796,19 +797,19 @@ public class DDLSemanticAnalyzer extends
     Map<String, String> partSpec = getPartSpec((ASTNode) root.getChild(1));
     if (partSpec == null) {
       if (!table.isPartitioned()) {
-        outputs.add(new WriteEntity(table));
+        outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL));
       } else {
         for (Partition partition : getPartitions(table, null, false)) {
-          outputs.add(new WriteEntity(partition));
+          outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL));
         }
       }
     } else {
       if (isFullSpec(table, partSpec)) {
         Partition partition = getPartition(table, partSpec, true);
-        outputs.add(new WriteEntity(partition));
+        outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL));
       } else {
         for (Partition partition : getPartitions(table, partSpec, false)) {
-          outputs.add(new WriteEntity(partition));
+          outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL));
         }
       }
     }
@@ -1347,17 +1348,17 @@ public class DDLSemanticAnalyzer extends
     Table tab = getTable(tableName, true);
     if (partSpec == null || partSpec.isEmpty()) {
       inputs.add(new ReadEntity(tab));
-      outputs.add(new WriteEntity(tab));
+      outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL));
     }
     else {
       inputs.add(new ReadEntity(tab));
       if (desc == null || desc.getOp() != AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) {
         Partition part = getPartition(tab, partSpec, true);
-        outputs.add(new WriteEntity(part));
+        outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL));
       }
       else {
         for (Partition part : getPartitions(tab, partSpec, true)) {
-          outputs.add(new WriteEntity(part));
+          outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL));
         }
       }
     }
@@ -2236,7 +2237,7 @@ public class DDLSemanticAnalyzer extends
     ctx.setNeedLockMgr(true);
   }
 
-  /**
+   /**
    * Add the task according to the parsed command tree. This is used for the CLI
    * command "LOCK TABLE ..;".
    *
@@ -2566,7 +2567,7 @@ public class DDLSemanticAnalyzer extends
     Table tab = getTable(tblName, true);
     boolean isView = tab.isView();
     validateAlterTableType(tab, AlterTableTypes.ADDPARTITION, expectView);
-    outputs.add(new WriteEntity(tab));
+    outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL));
 
     int numCh = ast.getChildCount();
     int start = ifNotExists ? 2 : 1;
@@ -2666,7 +2667,7 @@ public class DDLSemanticAnalyzer extends
     try {
       Partition partition = db.getPartition(tab, currentPart, false);
       if (partition != null) {
-        outputs.add(new WriteEntity(partition));
+        outputs.add(new WriteEntity(partition, WriteEntity.WriteType.INSERT));
       }
       return partition;
     } catch (HiveException e) {
@@ -2700,7 +2701,7 @@ public class DDLSemanticAnalyzer extends
       AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc(
           SessionState.get().getCurrentDatabase(), tblName, null,
           AlterTableDesc.AlterTableTypes.TOUCH);
-      outputs.add(new WriteEntity(tab));
+      outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_METADATA_ONLY));
       rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
           touchDesc), conf));
     } else {
@@ -2965,7 +2966,7 @@ public class DDLSemanticAnalyzer extends
         }
       }
       for (Partition p : parts) {
-        outputs.add(new WriteEntity(p));
+        outputs.add(new WriteEntity(p, WriteEntity.WriteType.DDL));
       }
     }
   }
@@ -3009,7 +3010,7 @@ public class DDLSemanticAnalyzer extends
             throw new SemanticException(
               ErrorMsg.DROP_COMMAND_NOT_ALLOWED_FOR_PARTITION.getMsg(p.getCompleteName()));
           }
-          outputs.add(new WriteEntity(p));
+          outputs.add(new WriteEntity(p, WriteEntity.WriteType.DELETE));
         }
       }
     }
@@ -3033,7 +3034,7 @@ public class DDLSemanticAnalyzer extends
     Table tab = getTable(tableName, true);
 
     inputs.add(new ReadEntity(tab));
-    outputs.add(new WriteEntity(tab));
+    outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL));
 
     validateAlterTableType(tab, AlterTableTypes.ADDSKEWEDBY);
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java?rev=1574266&r1=1574265&r2=1574266&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java Wed Mar  5 00:20:53 2014
@@ -18,18 +18,6 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
 import org.antlr.runtime.tree.Tree;
 import org.apache.commons.lang.ObjectUtils;
 import org.apache.hadoop.fs.FileStatus;
@@ -50,15 +38,15 @@ import org.apache.hadoop.hive.ql.hooks.W
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
 import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
-import org.apache.hadoop.hive.ql.plan.CopyWork;
-import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
-import org.apache.hadoop.hive.ql.plan.DDLWork;
-import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
-import org.apache.hadoop.hive.ql.plan.MoveWork;
+import org.apache.hadoop.hive.ql.plan.*;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.serde.serdeConstants;
 
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.*;
+
 /**
  * ImportSemanticAnalyzer.
  *
@@ -237,7 +225,8 @@ public class ImportSemanticAnalyzer exte
               .toString()));
           loadTable(fromURI, table);
         }
-        outputs.add(new WriteEntity(table));
+        // Set this to read because we can't overwrite any existing partitions
+        outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_METADATA_ONLY));
       } catch (InvalidTableException e) {
         LOG.debug("table " + tblDesc.getTableName() + " does not exist");
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java?rev=1574266&r1=1574265&r2=1574266&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java Wed Mar  5 00:20:53 2014
@@ -18,14 +18,6 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import java.io.IOException;
-import java.io.Serializable;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-
 import org.antlr.runtime.tree.Tree;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.fs.FileStatus;
@@ -47,6 +39,14 @@ import org.apache.hadoop.hive.ql.plan.Lo
 import org.apache.hadoop.hive.ql.plan.MoveWork;
 import org.apache.hadoop.hive.ql.plan.StatsWork;
 
+import java.io.IOException;
+import java.io.Serializable;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
 /**
  * LoadSemanticAnalyzer.
  *
@@ -238,7 +238,9 @@ public class LoadSemanticAnalyzer extend
     Map<String, String> partSpec = ts.getPartSpec();
     if (partSpec == null) {
       partSpec = new LinkedHashMap<String, String>();
-      outputs.add(new WriteEntity(ts.tableHandle));
+      outputs.add(new WriteEntity(ts.tableHandle,
+          (isOverWrite ? WriteEntity.WriteType.INSERT_OVERWRITE :
+              WriteEntity.WriteType.INSERT)));
     } else {
       try{
         Partition part = Hive.get().getPartition(ts.tableHandle, partSpec, false);
@@ -247,9 +249,13 @@ public class LoadSemanticAnalyzer extend
             throw new SemanticException(ErrorMsg.OFFLINE_TABLE_OR_PARTITION.
                 getMsg(ts.tableName + ":" + part.getName()));
           }
-          outputs.add(new WriteEntity(part));
+          outputs.add(new WriteEntity(part,
+          (isOverWrite ? WriteEntity.WriteType.INSERT_OVERWRITE :
+              WriteEntity.WriteType.INSERT)));
         } else {
-          outputs.add(new WriteEntity(ts.tableHandle));
+          outputs.add(new WriteEntity(ts.tableHandle,
+          (isOverWrite ? WriteEntity.WriteType.INSERT_OVERWRITE :
+              WriteEntity.WriteType.INSERT)));
         }
       } catch(HiveException e) {
         throw new SemanticException(e);

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1574266&r1=1574265&r2=1574266&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Wed Mar  5 00:20:53 2014
@@ -5607,7 +5607,7 @@ public class SemanticAnalyzer extends Ba
       // in the case of DP, we will register WriteEntity in MoveTask when the
       // list of dynamically created partitions are known.
       if ((dpCtx == null || dpCtx.getNumDPCols() == 0)) {
-        output = new WriteEntity(dest_tab);
+        output = new WriteEntity(dest_tab, determineWriteType(ltd, isNonNativeTable));
         if (!outputs.add(output)) {
           throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES
               .getMsg(dest_tab.getTableName()));
@@ -5616,7 +5616,7 @@ public class SemanticAnalyzer extends Ba
       if ((dpCtx != null) && (dpCtx.getNumDPCols() >= 0)) {
         // No static partition specified
         if (dpCtx.getNumSPCols() == 0) {
-          output = new WriteEntity(dest_tab, false);
+          output = new WriteEntity(dest_tab, determineWriteType(ltd, isNonNativeTable), false);
           outputs.add(output);
         }
         // part of the partition specified
@@ -5630,7 +5630,7 @@ public class SemanticAnalyzer extends Ba
                 new DummyPartition(dest_tab, dest_tab.getDbName()
                     + "@" + dest_tab.getTableName() + "@" + ppath,
                     partSpec);
-            output = new WriteEntity(p, false);
+            output = new WriteEntity(p, WriteEntity.WriteType.INSERT, false);
             outputs.add(output);
           } catch (HiveException e) {
             throw new SemanticException(e.getMessage(), e);
@@ -5717,7 +5717,9 @@ public class SemanticAnalyzer extends Ba
         ltd.setHoldDDLTime(true);
       }
       loadTableWork.add(ltd);
-      if (!outputs.add(new WriteEntity(dest_part))) {
+      if (!outputs.add(new WriteEntity(dest_part, (ltd.getReplace() ?
+          WriteEntity.WriteType.INSERT_OVERWRITE :
+          WriteEntity.WriteType.INSERT)))) {
         throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES
             .getMsg(dest_tab.getTableName() + "@" + dest_part.getName()));
       }
@@ -8823,7 +8825,7 @@ public class SemanticAnalyzer extends Ba
       tsDesc.setStatsAggPrefix(tab.getDbName()+"."+k);
 
       // set up WritenEntity for replication
-      outputs.add(new WriteEntity(tab));
+      outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_METADATA_ONLY));
 
       // add WriteEntity for each matching partition
       if (tab.isPartitioned()) {
@@ -8834,7 +8836,7 @@ public class SemanticAnalyzer extends Ba
         if (partitions != null) {
           for (Partition partn : partitions) {
             // inputs.add(new ReadEntity(partn)); // is this needed at all?
-            outputs.add(new WriteEntity(partn));
+            outputs.add(new WriteEntity(partn, WriteEntity.WriteType.DDL_METADATA_ONLY));
           }
         }
       }
@@ -9883,7 +9885,7 @@ public class SemanticAnalyzer extends Ba
     String[] qualified = Hive.getQualifiedNames(tableName);
     String dbName = qualified.length == 1 ? SessionState.get().getCurrentDatabase() : qualified[0];
     Database database  = getDatabase(dbName);
-    outputs.add(new WriteEntity(database));
+    outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_METADATA_ONLY));
     // Handle different types of CREATE TABLE command
     CreateTableDesc crtTblDesc = null;
     switch (command_type) {
@@ -11261,4 +11263,13 @@ public class SemanticAnalyzer extends Ba
 		  gByRR.put(tab_alias, col_alias, colInfo);
 	  }
   }
+
+  private WriteEntity.WriteType determineWriteType(LoadTableDesc ltd, boolean isNonNativeTable) {
+    // Don't know the characteristics of non-native tables,
+    // and don't have a rational way to guess, so assume the most
+    // conservative case.
+    if (isNonNativeTable) return WriteEntity.WriteType.INSERT_OVERWRITE;
+    else return (ltd.getReplace() ? WriteEntity.WriteType.INSERT_OVERWRITE :
+        WriteEntity.WriteType.INSERT);
+  }
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java?rev=1574266&r1=1574265&r2=1574266&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java Wed Mar  5 00:20:53 2014
@@ -250,9 +250,9 @@ public class HiveAuthorizationTaskFactor
       Table tbl = getTable(SessionState.get().getCurrentDatabase(), subject.getObject());
       if (subject.getPartSpec() != null) {
         Partition part = getPartition(tbl, subject.getPartSpec());
-        outputs.add(new WriteEntity(part));
+        outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_METADATA_ONLY));
       } else {
-        outputs.add(new WriteEntity(tbl));
+        outputs.add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_METADATA_ONLY));
       }
     }
 

Added: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java (added)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java Wed Mar  5 00:20:53 2014
@@ -0,0 +1,276 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.lockmgr;
+
+import junit.framework.Assert;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
+import org.apache.hadoop.hive.ql.Context;
+import org.apache.hadoop.hive.ql.QueryPlan;
+import org.apache.hadoop.hive.ql.hooks.ReadEntity;
+import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.*;
+
+/**
+ * Unit tests for {@link DbTxnManager}.
+ */
+public class TestDbTxnManager {
+
+  private HiveConf conf = new HiveConf();
+  private HiveTxnManager txnMgr;
+  private Context ctx;
+  private int nextInput;
+  private int nextOutput;
+  HashSet<ReadEntity> readEntities;
+  HashSet<WriteEntity> writeEntities;
+
+  public TestDbTxnManager() throws Exception {
+    TxnDbUtil.setConfValues(conf);
+    ctx = new Context(conf);
+    LogManager.getRootLogger().setLevel(Level.DEBUG);
+    tearDown();
+  }
+
+  @Test
+  public void testSingleReadTable() throws Exception {
+    addTableInput();
+    QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.acquireLocks(qp, ctx, "fred");
+    List<HiveLock> locks = ctx.getHiveLocks();
+    Assert.assertEquals(1, locks.size());
+    Assert.assertEquals(1,
+        TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
+    txnMgr.getLockManager().unlock(locks.get(0));
+    locks = txnMgr.getLockManager().getLocks(false, false);
+    Assert.assertEquals(0, locks.size());
+  }
+
+  @Test
+  public void testSingleReadPartition() throws Exception {
+    addPartitionInput(newTable(true));
+    QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.acquireLocks(qp, ctx, null);
+    List<HiveLock> locks = ctx.getHiveLocks();
+    Assert.assertEquals(1, locks.size());
+    Assert.assertEquals(1,
+        TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
+    txnMgr.getLockManager().unlock(locks.get(0));
+    locks = txnMgr.getLockManager().getLocks(false, false);
+    Assert.assertEquals(0, locks.size());
+
+  }
+
+  @Test
+  public void testSingleReadMultiPartition() throws Exception {
+    Table t = newTable(true);
+    addPartitionInput(t);
+    addPartitionInput(t);
+    addPartitionInput(t);
+    QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.acquireLocks(qp, ctx, "fred");
+    List<HiveLock> locks = ctx.getHiveLocks();
+    Assert.assertEquals(1, locks.size());
+    Assert.assertEquals(3,
+        TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
+    txnMgr.getLockManager().unlock(locks.get(0));
+    locks = txnMgr.getLockManager().getLocks(false, false);
+    Assert.assertEquals(0, locks.size());
+  }
+
+  @Test
+  public void testJoin() throws Exception {
+    Table t = newTable(true);
+    addPartitionInput(t);
+    addPartitionInput(t);
+    addPartitionInput(t);
+    addTableInput();
+    QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.acquireLocks(qp, ctx, "fred");
+    List<HiveLock> locks = ctx.getHiveLocks();
+    Assert.assertEquals(1, locks.size());
+    Assert.assertEquals(4,
+        TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
+    txnMgr.getLockManager().unlock(locks.get(0));
+    locks = txnMgr.getLockManager().getLocks(false, false);
+    Assert.assertEquals(0, locks.size());
+  }
+
+  @Test
+  public void testSingleWriteTable() throws Exception {
+    WriteEntity we = addTableOutput(WriteEntity.WriteType.INSERT);
+    QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.acquireLocks(qp, ctx, "fred");
+    List<HiveLock> locks = ctx.getHiveLocks();
+    Assert.assertEquals(1, locks.size());
+    Assert.assertEquals(1,
+        TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
+    txnMgr.getLockManager().unlock(locks.get(0));
+    locks = txnMgr.getLockManager().getLocks(false, false);
+    Assert.assertEquals(0, locks.size());
+  }
+
+  @Test
+  public void testReadWrite() throws Exception {
+    Table t = newTable(true);
+    addPartitionInput(t);
+    addPartitionInput(t);
+    addPartitionInput(t);
+    WriteEntity we = addTableOutput(WriteEntity.WriteType.INSERT);
+    QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.acquireLocks(qp, ctx, "fred");
+    List<HiveLock> locks = ctx.getHiveLocks();
+    Assert.assertEquals(1, locks.size());
+    Assert.assertEquals(4,
+        TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
+    txnMgr.getLockManager().unlock(locks.get(0));
+    locks = txnMgr.getLockManager().getLocks(false, false);
+    Assert.assertEquals(0, locks.size());
+  }
+
+  @Test
+  public void testUpdate() throws Exception {
+    WriteEntity we = addTableOutput(WriteEntity.WriteType.UPDATE);
+    QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.acquireLocks(qp, ctx, "fred");
+    List<HiveLock> locks = ctx.getHiveLocks();
+    Assert.assertEquals(1, locks.size());
+    Assert.assertEquals(1,
+        TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
+    txnMgr.getLockManager().unlock(locks.get(0));
+    locks = txnMgr.getLockManager().getLocks(false, false);
+    Assert.assertEquals(0, locks.size());
+  }
+
+  @Test
+  public void testDelete() throws Exception {
+    WriteEntity we = addTableOutput(WriteEntity.WriteType.DELETE);
+    QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.acquireLocks(qp, ctx, "fred");
+    List<HiveLock> locks = ctx.getHiveLocks();
+    Assert.assertEquals(1, locks.size());
+    Assert.assertEquals(1,
+        TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
+    txnMgr.getLockManager().unlock(locks.get(0));
+    locks = txnMgr.getLockManager().getLocks(false, false);
+    Assert.assertEquals(0, locks.size());
+  }
+
+  @Test
+  public void testDDL() throws Exception {
+    WriteEntity we = addTableOutput(WriteEntity.WriteType.DDL);
+    QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.acquireLocks(qp, ctx, "fred");
+    List<HiveLock> locks = ctx.getHiveLocks();
+    Assert.assertEquals(1, locks.size());
+    Assert.assertEquals(1,
+        TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
+    txnMgr.getLockManager().unlock(locks.get(0));
+    locks = txnMgr.getLockManager().getLocks(false, false);
+    Assert.assertEquals(0, locks.size());
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    TxnDbUtil.prepDb();
+    txnMgr = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
+    Assert.assertTrue(txnMgr instanceof DbTxnManager);
+    nextInput = 1;
+    nextOutput = 1;
+    readEntities = new HashSet<ReadEntity>();
+    writeEntities = new HashSet<WriteEntity>();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    TxnDbUtil.cleanDb();
+  }
+
+  private static class MockQueryPlan extends QueryPlan {
+    private HashSet<ReadEntity> inputs;
+    private HashSet<WriteEntity> outputs;
+
+    MockQueryPlan(TestDbTxnManager test) {
+      HashSet<ReadEntity> r = test.readEntities;
+      HashSet<WriteEntity> w = test.writeEntities;
+      inputs = (r == null) ? new HashSet<ReadEntity>() : r;
+      outputs = (w == null) ? new HashSet<WriteEntity>() : w;
+    }
+
+    @Override
+    public HashSet<ReadEntity> getInputs() {
+      return inputs;
+    }
+
+    @Override
+    public HashSet<WriteEntity> getOutputs() {
+      return outputs;
+    }
+  }
+
+  private Table newTable(boolean isPartitioned) {
+    Table t = new Table("default", "table" + Integer.toString(nextInput++));
+    if (isPartitioned) {
+      FieldSchema fs = new FieldSchema();
+      fs.setName("version");
+      fs.setType("String");
+      List<FieldSchema> partCols = new ArrayList<FieldSchema>(1);
+      partCols.add(fs);
+      t.setPartCols(partCols);
+    }
+    return t;
+  }
+
+  private void addTableInput() {
+    ReadEntity re = new ReadEntity(newTable(false));
+    readEntities.add(re);
+  }
+
+  private void addPartitionInput(Table t) throws Exception {
+    Map<String, String> partSpec = new HashMap<String, String>();
+    partSpec.put("version", Integer.toString(nextInput++));
+    Partition p = new Partition(t, partSpec, new Path("/dev/null"));
+    ReadEntity re = new ReadEntity(p);
+    readEntities.add(re);
+  }
+
+  private WriteEntity addTableOutput(WriteEntity.WriteType writeType) {
+    WriteEntity we = new WriteEntity(newTable(false), writeType);
+    writeEntities.add(we);
+    return we;
+  }
+
+  private WriteEntity addPartitionOutput(Table t, WriteEntity.WriteType writeType)
+      throws Exception {
+    Map<String, String> partSpec = new HashMap<String, String>();
+    partSpec.put("version", Integer.toString(nextInput++));
+    Partition p = new Partition(t, partSpec, new Path("/dev/null"));
+    WriteEntity we = new WriteEntity(p, writeType);
+    writeEntities.add(we);
+    return we;
+  }
+}

Added: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java (added)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java Wed Mar  5 00:20:53 2014
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.lockmgr;
+
+import junit.framework.Assert;
+import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+
+public class TestDummyTxnManager {
+
+  @Test
+  public void testDedupLockObjects() {
+    List<HiveLockObj> lockObjs = new ArrayList<HiveLockObj>();
+    String path1 = "path1";
+    String path2 = "path2";
+    HiveLockObjectData lockData1 = new HiveLockObjectData(
+        "query1", "1", "IMPLICIT", "drop table table1");
+    HiveLockObjectData lockData2 = new HiveLockObjectData(
+        "query1", "1", "IMPLICIT", "drop table table1");
+
+    // Start with the following locks:
+    // [path1, shared]
+    // [path1, exclusive]
+    // [path2, shared]
+    // [path2, shared]
+    // [path2, shared]
+    lockObjs.add(new HiveLockObj(new HiveLockObject(path1, lockData1), HiveLockMode.SHARED));
+    String name1 = lockObjs.get(lockObjs.size() - 1).getName();
+    lockObjs.add(new HiveLockObj(new HiveLockObject(path1, lockData1), HiveLockMode.EXCLUSIVE));
+    lockObjs.add(new HiveLockObj(new HiveLockObject(path2, lockData2), HiveLockMode.SHARED));
+    String name2 = lockObjs.get(lockObjs.size() - 1).getName();
+    lockObjs.add(new HiveLockObj(new HiveLockObject(path2, lockData2), HiveLockMode.SHARED));
+    lockObjs.add(new HiveLockObj(new HiveLockObject(path2, lockData2), HiveLockMode.SHARED));
+
+    DummyTxnManager.dedupLockObjects(lockObjs);
+
+    // After dedup we should be left with 2 locks:
+    // [path1, exclusive]
+    // [path2, shared]
+    Assert.assertEquals("Locks should be deduped", 2, lockObjs.size());
+
+    Comparator<HiveLockObj> cmp = new Comparator<HiveLockObj>() {
+      public int compare(HiveLockObj lock1, HiveLockObj lock2) {
+        return lock1.getName().compareTo(lock2.getName());
+      }
+    };
+    Collections.sort(lockObjs, cmp);
+
+    HiveLockObj lockObj = lockObjs.get(0);
+    Assert.assertEquals(name1, lockObj.getName());
+    Assert.assertEquals(HiveLockMode.EXCLUSIVE, lockObj.getMode());
+
+    lockObj = lockObjs.get(1);
+    Assert.assertEquals(name2, lockObj.getName());
+    Assert.assertEquals(HiveLockMode.SHARED, lockObj.getMode());
+  }
+}

Added: hive/trunk/ql/src/test/queries/clientnegative/dbtxnmgr_nodblock.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/dbtxnmgr_nodblock.q?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/dbtxnmgr_nodblock.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/dbtxnmgr_nodblock.q Wed Mar  5 00:20:53 2014
@@ -0,0 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+drop database if exists drop_nodblock;
+create database drop_nodblock;
+lock database drop_nodblock shared;

Added: hive/trunk/ql/src/test/queries/clientnegative/dbtxnmgr_nodbunlock.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/dbtxnmgr_nodbunlock.q?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/dbtxnmgr_nodbunlock.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/dbtxnmgr_nodbunlock.q Wed Mar  5 00:20:53 2014
@@ -0,0 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+drop database if exists drop_nodbunlock;
+create database drop_nodbunlock;
+unlock database drop_nodbunlock;

Added: hive/trunk/ql/src/test/queries/clientnegative/dbtxnmgr_notablelock.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/dbtxnmgr_notablelock.q?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/dbtxnmgr_notablelock.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/dbtxnmgr_notablelock.q Wed Mar  5 00:20:53 2014
@@ -0,0 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+drop table if exists drop_notablelock;
+create table drop_notablelock (c int);
+lock table drop_notablelock shared;

Added: hive/trunk/ql/src/test/queries/clientnegative/dbtxnmgr_notableunlock.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/dbtxnmgr_notableunlock.q?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/dbtxnmgr_notableunlock.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/dbtxnmgr_notableunlock.q Wed Mar  5 00:20:53 2014
@@ -0,0 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+drop table if exists drop_notableunlock;
+create table drop_notableunlock (c int);
+unlock table drop_notableunlock;

Added: hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query1.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query1.q?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query1.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query1.q Wed Mar  5 00:20:53 2014
@@ -0,0 +1,17 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create table T1(key string, val string) stored as textfile;
+
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+
+select * from T1;
+
+create table T2(key string, val string) stored as textfile;
+
+insert into table T2 select * from T1;
+
+select * from T2;
+
+drop table T1;
+drop table T2;

Added: hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query2.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query2.q?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query2.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query2.q Wed Mar  5 00:20:53 2014
@@ -0,0 +1,17 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create table T1(key string, val string) stored as textfile;
+
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+
+select * from T1;
+
+create table T2(key string, val string) stored as textfile;
+
+insert overwrite table T2 select * from T1;
+
+select * from T2;
+
+drop table T1;
+drop table T2;

Added: hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query3.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query3.q?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query3.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query3.q Wed Mar  5 00:20:53 2014
@@ -0,0 +1,21 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create table T1(key string, val string) stored as textfile;
+
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+
+select * from T1;
+
+create table T2(key string, val string) partitioned by (pval string) stored as textfile;
+
+insert into table T2 partition (pval = '1') select * from T1;
+
+select * from T2;
+
+insert overwrite table T2 partition (pval = '1') select * from T1;
+
+select * from T2;
+
+drop table T1;
+drop table T2;

Added: hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query4.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query4.q?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query4.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query4.q Wed Mar  5 00:20:53 2014
@@ -0,0 +1,19 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.exec.dynamic.partition=true;
+
+create table T1(key string, val string) stored as textfile;
+
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+
+select * from T1;
+
+create table T2(key string) partitioned by (val string) stored as textfile;
+
+insert overwrite table T2 partition (val) select key, val from T1;
+
+select * from T2;
+
+drop table T1;
+drop table T2;

Added: hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query5.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query5.q?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query5.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/dbtxnmgr_query5.q Wed Mar  5 00:20:53 2014
@@ -0,0 +1,24 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create database foo;
+
+use foo;
+
+create table T1(key string, val string) partitioned by (ds string) stored as textfile;
+
+alter table T1 add partition (ds='today');
+
+create view V1 as select key from T1;
+
+show tables;
+
+describe T1;
+
+drop view V1;
+
+drop table T1;
+
+show databases;
+
+drop database foo;

Added: hive/trunk/ql/src/test/results/clientnegative/dbtxnmgr_nodblock.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/dbtxnmgr_nodblock.q.out?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/dbtxnmgr_nodblock.q.out (added)
+++ hive/trunk/ql/src/test/results/clientnegative/dbtxnmgr_nodblock.q.out Wed Mar  5 00:20:53 2014
@@ -0,0 +1,11 @@
+PREHOOK: query: drop database if exists drop_nodblock
+PREHOOK: type: DROPDATABASE
+POSTHOOK: query: drop database if exists drop_nodblock
+POSTHOOK: type: DROPDATABASE
+PREHOOK: query: create database drop_nodblock
+PREHOOK: type: CREATEDATABASE
+POSTHOOK: query: create database drop_nodblock
+POSTHOOK: type: CREATEDATABASE
+PREHOOK: query: lock database drop_nodblock shared
+PREHOOK: type: LOCKDATABASE
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Current transaction manager does not support explicit lock requests.  Transaction manager:   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager

Added: hive/trunk/ql/src/test/results/clientnegative/dbtxnmgr_nodbunlock.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/dbtxnmgr_nodbunlock.q.out?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/dbtxnmgr_nodbunlock.q.out (added)
+++ hive/trunk/ql/src/test/results/clientnegative/dbtxnmgr_nodbunlock.q.out Wed Mar  5 00:20:53 2014
@@ -0,0 +1,11 @@
+PREHOOK: query: drop database if exists drop_nodbunlock
+PREHOOK: type: DROPDATABASE
+POSTHOOK: query: drop database if exists drop_nodbunlock
+POSTHOOK: type: DROPDATABASE
+PREHOOK: query: create database drop_nodbunlock
+PREHOOK: type: CREATEDATABASE
+POSTHOOK: query: create database drop_nodbunlock
+POSTHOOK: type: CREATEDATABASE
+PREHOOK: query: unlock database drop_nodbunlock
+PREHOOK: type: UNLOCKDATABASE
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Current transaction manager does not support explicit lock requests.  Transaction manager:   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager

Added: hive/trunk/ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out (added)
+++ hive/trunk/ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out Wed Mar  5 00:20:53 2014
@@ -0,0 +1,14 @@
+PREHOOK: query: drop table if exists drop_notablelock
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists drop_notablelock
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table drop_notablelock (c int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: create table drop_notablelock (c int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@drop_notablelock
+PREHOOK: query: lock table drop_notablelock shared
+PREHOOK: type: LOCKTABLE
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Current transaction manager does not support explicit lock requests.  Transaction manager:   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager

Added: hive/trunk/ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out (added)
+++ hive/trunk/ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out Wed Mar  5 00:20:53 2014
@@ -0,0 +1,14 @@
+PREHOOK: query: drop table if exists drop_notableunlock
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists drop_notableunlock
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table drop_notableunlock (c int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: create table drop_notableunlock (c int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@drop_notableunlock
+PREHOOK: query: unlock table drop_notableunlock
+PREHOOK: type: UNLOCKTABLE
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Current transaction manager does not support explicit lock requests.  Transaction manager:   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager

Added: hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query1.q.out?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query1.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query1.q.out Wed Mar  5 00:20:53 2014
@@ -0,0 +1,82 @@
+PREHOOK: query: create table T1(key string, val string) stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: create table T1(key string, val string) stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@t1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@t1
+PREHOOK: query: select * from T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+1	11
+2	12
+3	13
+7	17
+8	18
+8	28
+PREHOOK: query: create table T2(key string, val string) stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: create table T2(key string, val string) stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T2
+PREHOOK: query: insert into table T2 select * from T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@t2
+POSTHOOK: query: insert into table T2 select * from T1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+PREHOOK: query: select * from T2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+1	11
+2	12
+3	13
+7	17
+8	18
+8	28
+PREHOOK: query: drop table T1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@t1
+POSTHOOK: query: drop table T1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+PREHOOK: query: drop table T2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@t2
+PREHOOK: Output: default@t2
+POSTHOOK: query: drop table T2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@t2
+POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]

Added: hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query2.q.out?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query2.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query2.q.out Wed Mar  5 00:20:53 2014
@@ -0,0 +1,82 @@
+PREHOOK: query: create table T1(key string, val string) stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: create table T1(key string, val string) stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@t1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@t1
+PREHOOK: query: select * from T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+1	11
+2	12
+3	13
+7	17
+8	18
+8	28
+PREHOOK: query: create table T2(key string, val string) stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: create table T2(key string, val string) stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T2
+PREHOOK: query: insert overwrite table T2 select * from T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@t2
+POSTHOOK: query: insert overwrite table T2 select * from T1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+PREHOOK: query: select * from T2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+1	11
+2	12
+3	13
+7	17
+8	18
+8	28
+PREHOOK: query: drop table T1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@t1
+POSTHOOK: query: drop table T1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+PREHOOK: query: drop table T2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@t2
+PREHOOK: Output: default@t2
+POSTHOOK: query: drop table T2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@t2
+POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]

Added: hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query3.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query3.q.out?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query3.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query3.q.out Wed Mar  5 00:20:53 2014
@@ -0,0 +1,120 @@
+PREHOOK: query: create table T1(key string, val string) stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: create table T1(key string, val string) stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@t1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@t1
+PREHOOK: query: select * from T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+1	11
+2	12
+3	13
+7	17
+8	18
+8	28
+PREHOOK: query: create table T2(key string, val string) partitioned by (pval string) stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: create table T2(key string, val string) partitioned by (pval string) stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T2
+PREHOOK: query: insert into table T2 partition (pval = '1') select * from T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@t2@pval=1
+POSTHOOK: query: insert into table T2 partition (pval = '1') select * from T1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@t2@pval=1
+POSTHOOK: Lineage: t2 PARTITION(pval=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(pval=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+PREHOOK: query: select * from T2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t2@pval=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t2@pval=1
+#### A masked pattern was here ####
+POSTHOOK: Lineage: t2 PARTITION(pval=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(pval=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+1	11	1
+2	12	1
+3	13	1
+7	17	1
+8	18	1
+8	28	1
+PREHOOK: query: insert overwrite table T2 partition (pval = '1') select * from T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@t2@pval=1
+POSTHOOK: query: insert overwrite table T2 partition (pval = '1') select * from T1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@t2@pval=1
+POSTHOOK: Lineage: t2 PARTITION(pval=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(pval=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(pval=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(pval=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+PREHOOK: query: select * from T2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t2@pval=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t2@pval=1
+#### A masked pattern was here ####
+POSTHOOK: Lineage: t2 PARTITION(pval=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(pval=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(pval=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(pval=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+1	11	1
+2	12	1
+3	13	1
+7	17	1
+8	18	1
+8	28	1
+PREHOOK: query: drop table T1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@t1
+POSTHOOK: query: drop table T1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t2 PARTITION(pval=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(pval=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(pval=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(pval=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+PREHOOK: query: drop table T2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@t2
+PREHOOK: Output: default@t2
+POSTHOOK: query: drop table T2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@t2
+POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2 PARTITION(pval=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(pval=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(pval=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(pval=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]

Added: hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query4.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query4.q.out?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query4.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query4.q.out Wed Mar  5 00:20:53 2014
@@ -0,0 +1,115 @@
+PREHOOK: query: create table T1(key string, val string) stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: create table T1(key string, val string) stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@t1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@t1
+PREHOOK: query: select * from T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+1	11
+2	12
+3	13
+7	17
+8	18
+8	28
+PREHOOK: query: create table T2(key string) partitioned by (val string) stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: create table T2(key string) partitioned by (val string) stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T2
+PREHOOK: query: insert overwrite table T2 partition (val) select key, val from T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@t2
+POSTHOOK: query: insert overwrite table T2 partition (val) select key, val from T1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@t2@val=11
+POSTHOOK: Output: default@t2@val=12
+POSTHOOK: Output: default@t2@val=13
+POSTHOOK: Output: default@t2@val=17
+POSTHOOK: Output: default@t2@val=18
+POSTHOOK: Output: default@t2@val=28
+POSTHOOK: Lineage: t2 PARTITION(val=11).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=12).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=13).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=17).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=18).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=28).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+PREHOOK: query: select * from T2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t2@val=11
+PREHOOK: Input: default@t2@val=12
+PREHOOK: Input: default@t2@val=13
+PREHOOK: Input: default@t2@val=17
+PREHOOK: Input: default@t2@val=18
+PREHOOK: Input: default@t2@val=28
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t2@val=11
+POSTHOOK: Input: default@t2@val=12
+POSTHOOK: Input: default@t2@val=13
+POSTHOOK: Input: default@t2@val=17
+POSTHOOK: Input: default@t2@val=18
+POSTHOOK: Input: default@t2@val=28
+#### A masked pattern was here ####
+POSTHOOK: Lineage: t2 PARTITION(val=11).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=12).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=13).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=17).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=18).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=28).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+1	11
+2	12
+3	13
+7	17
+8	18
+8	28
+PREHOOK: query: drop table T1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@t1
+POSTHOOK: query: drop table T1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t2 PARTITION(val=11).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=12).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=13).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=17).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=18).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=28).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+PREHOOK: query: drop table T2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@t2
+PREHOOK: Output: default@t2
+POSTHOOK: query: drop table T2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@t2
+POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2 PARTITION(val=11).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=12).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=13).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=17).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=18).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2 PARTITION(val=28).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]

Added: hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query5.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query5.q.out?rev=1574266&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query5.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/dbtxnmgr_query5.q.out Wed Mar  5 00:20:53 2014
@@ -0,0 +1,77 @@
+PREHOOK: query: create database foo
+PREHOOK: type: CREATEDATABASE
+POSTHOOK: query: create database foo
+POSTHOOK: type: CREATEDATABASE
+PREHOOK: query: use foo
+PREHOOK: type: SWITCHDATABASE
+POSTHOOK: query: use foo
+POSTHOOK: type: SWITCHDATABASE
+PREHOOK: query: create table T1(key string, val string) partitioned by (ds string) stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:foo
+POSTHOOK: query: create table T1(key string, val string) partitioned by (ds string) stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:foo
+POSTHOOK: Output: foo@T1
+PREHOOK: query: alter table T1 add partition (ds='today')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: foo@t1
+POSTHOOK: query: alter table T1 add partition (ds='today')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: foo@t1
+POSTHOOK: Output: foo@t1@ds=today
+PREHOOK: query: create view V1 as select key from T1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: foo@t1
+POSTHOOK: query: create view V1 as select key from T1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: foo@t1
+POSTHOOK: Output: foo@V1
+PREHOOK: query: show tables
+PREHOOK: type: SHOWTABLES
+POSTHOOK: query: show tables
+POSTHOOK: type: SHOWTABLES
+t1
+v1
+PREHOOK: query: describe T1
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe T1
+POSTHOOK: type: DESCTABLE
+key                 	string              	None                
+val                 	string              	None                
+ds                  	string              	None                
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	None                
+PREHOOK: query: drop view V1
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: foo@v1
+PREHOOK: Output: foo@v1
+POSTHOOK: query: drop view V1
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: foo@v1
+POSTHOOK: Output: foo@v1
+PREHOOK: query: drop table T1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: foo@t1
+PREHOOK: Output: foo@t1
+POSTHOOK: query: drop table T1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: foo@t1
+POSTHOOK: Output: foo@t1
+PREHOOK: query: show databases
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: show databases
+POSTHOOK: type: SHOWDATABASES
+default
+foo
+PREHOOK: query: drop database foo
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:foo
+PREHOOK: Output: database:foo
+POSTHOOK: query: drop database foo
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:foo
+POSTHOOK: Output: database:foo



Mime
View raw message