hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From w...@apache.org
Subject hive git commit: HIVE-11793 : SHOW LOCKS with DbTxnManager ignores filter options (Wei Zheng, reviewed by Eugene Koifman)
Date Wed, 11 May 2016 21:31:24 GMT
Repository: hive
Updated Branches:
  refs/heads/branch-1 763c41333 -> 6c160bc1c


HIVE-11793 : SHOW LOCKS with DbTxnManager ignores filter options (Wei Zheng, reviewed by Eugene
Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6c160bc1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6c160bc1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6c160bc1

Branch: refs/heads/branch-1
Commit: 6c160bc1cdd2290861623e6437784ee39ca4eb91
Parents: 763c413
Author: Wei Zheng <weiz@apache.org>
Authored: Wed May 11 14:30:55 2016 -0700
Committer: Wei Zheng <weiz@apache.org>
Committed: Wed May 11 14:30:55 2016 -0700

----------------------------------------------------------------------
 .../hive/hcatalog/streaming/TestStreaming.java  |   8 +-
 .../hive/metastore/HiveMetaStoreClient.java     |   6 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |  10 ++
 .../hadoop/hive/metastore/txn/TxnHandler.java   |  28 +++++
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  25 +++-
 .../hadoop/hive/ql/lockmgr/DbLockManager.java   |   6 +-
 .../hadoop/hive/ql/plan/ShowLocksDesc.java      |   4 +-
 .../hive/ql/lockmgr/TestDbTxnManager2.java      | 126 +++++++++++++++++++
 .../queries/clientpositive/dbtxnmgr_showlocks.q |  14 +++
 .../clientpositive/dbtxnmgr_showlocks.q.out     |  47 ++++++-
 10 files changed, 265 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/6c160bc1/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
index f4ee208..6016425 100644
--- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
+++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
 import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
@@ -647,13 +648,16 @@ public class TestStreaming {
     //todo: this should ideally check Transaction heartbeat as well, but heartbeat
     //timestamp is not reported yet
     //GetOpenTxnsInfoResponse txnresp = msClient.showTxns();
-    ShowLocksResponse response = msClient.showLocks();
+    ShowLocksRequest request = new ShowLocksRequest();
+    request.setDbname(dbName2);
+    request.setTablename(tblName2);
+    ShowLocksResponse response = msClient.showLocks(request);
     Assert.assertEquals("Wrong nubmer of locks: " + response, 1, response.getLocks().size());
     ShowLocksResponseElement lock = response.getLocks().get(0);
     long acquiredAt = lock.getAcquiredat();
     long heartbeatAt = lock.getLastheartbeat();
     txnBatch.heartbeat();
-    response = msClient.showLocks();
+    response = msClient.showLocks(request);
     Assert.assertEquals("Wrong number of locks2: " + response, 1, response.getLocks().size());
     lock = response.getLocks().get(0);
     Assert.assertEquals("Acquired timestamp didn't match", acquiredAt, lock.getAcquiredat());

http://git-wip-us.apache.org/repos/asf/hive/blob/6c160bc1/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 6bef3f5..94d5d86 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -1914,11 +1914,17 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
   }
 
   @Override
+  @Deprecated
   public ShowLocksResponse showLocks() throws TException {
     return client.show_locks(new ShowLocksRequest());
   }
 
   @Override
+  public ShowLocksResponse showLocks(ShowLocksRequest request) throws TException {
+    return client.show_locks(request);
+  }
+
+  @Override
   public void heartbeat(long txnid, long lockid)
       throws NoSuchLockException, NoSuchTxnException, TxnAbortedException,
       TException {

http://git-wip-us.apache.org/repos/asf/hive/blob/6c160bc1/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index da3d7bc..da693f7 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
+import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
 import org.apache.hadoop.hive.metastore.api.TxnOpenException;
@@ -1329,9 +1330,18 @@ public interface IMetaStoreClient {
    * @return List of currently held and waiting locks.
    * @throws TException
    */
+  @Deprecated
   ShowLocksResponse showLocks() throws TException;
 
   /**
+   * Show all currently held and waiting locks.
+   * @param showLocksRequest SHOW LOCK request
+   * @return List of currently held and waiting locks.
+   * @throws TException
+   */
+  ShowLocksResponse showLocks(ShowLocksRequest showLocksRequest) throws TException;
+
+  /**
    * Send a heartbeat to indicate that the client holding these locks (if
    * any) and that opened this transaction (if one exists) is still alive.
    * The default timeout for transactions and locks is 300 seconds,

http://git-wip-us.apache.org/repos/asf/hive/blob/6c160bc1/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index b1d330d..b1dea19 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -1115,6 +1115,34 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
         String s = "select hl_lock_ext_id, hl_txnid, hl_db, hl_table, hl_partition, hl_lock_state,
" +
           "hl_lock_type, hl_last_heartbeat, hl_acquired_at, hl_user, hl_host, hl_lock_int_id,"
+
           "hl_blockedby_ext_id, hl_blockedby_int_id from HIVE_LOCKS";
+
+        // Some filters may have been specified in the SHOW LOCKS statement. Add them to
the query.
+        String dbName = rqst.getDbname();
+        String tableName = rqst.getTablename();
+        String partName = rqst.getPartname();
+
+        StringBuilder filter = new StringBuilder();
+        if (dbName != null && !dbName.isEmpty()) {
+          filter.append("hl_db=").append(quoteString(dbName));
+        }
+        if (tableName != null && !tableName.isEmpty()) {
+          if (filter.length() > 0) {
+            filter.append(" and ");
+          }
+          filter.append("hl_table=").append(quoteString(tableName));
+        }
+        if (partName != null && !partName.isEmpty()) {
+          if (filter.length() > 0) {
+            filter.append(" and ");
+          }
+          filter.append("hl_partition=").append(quoteString(partName));
+        }
+        String whereClause = filter.toString();
+
+        if (!whereClause.isEmpty()) {
+          s = s + " where " + whereClause;
+        }
+
         LOG.debug("Doing to execute query <" + s + ">");
         ResultSet rs = stmt.executeQuery(s);
         while (rs.next()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/6c160bc1/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 7818043..be4753f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -85,6 +85,7 @@ import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
+import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
@@ -2583,7 +2584,29 @@ public class DDLTask extends Task<DDLWork> implements Serializable
{
     }
     lockMgr = (DbLockManager)lm;
 
-    ShowLocksResponse rsp = lockMgr.getLocks();
+    String dbName = showLocks.getDbName();
+    String tblName = showLocks.getTableName();
+    Map<String, String> partSpec = showLocks.getPartSpec();
+    if (dbName == null && tblName != null) {
+      dbName = SessionState.get().getCurrentDatabase();
+    }
+
+    ShowLocksRequest rqst = new ShowLocksRequest();
+    rqst.setDbname(dbName);
+    rqst.setTablename(tblName);
+    if (partSpec != null) {
+      List<String> keyList = new ArrayList<String>();
+      List<String> valList = new ArrayList<String>();
+      for (String partKey : partSpec.keySet()) {
+        String partVal = partSpec.remove(partKey);
+        keyList.add(partKey);
+        valList.add(partVal);
+      }
+      String partName = FileUtils.makePartName(keyList, valList);
+      rqst.setPartname(partName);
+    }
+
+    ShowLocksResponse rsp = lockMgr.getLocks(rqst);
 
     // write the results in the file
     DataOutputStream os = null;

http://git-wip-us.apache.org/repos/asf/hive/blob/6c160bc1/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java
index f12024f..ad4bd4c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java
@@ -256,8 +256,12 @@ public class DbLockManager implements HiveLockManager{
   }
 
   public ShowLocksResponse getLocks() throws LockException {
+    return getLocks(new ShowLocksRequest());
+  }
+
+  public ShowLocksResponse getLocks(ShowLocksRequest showLocksRequest) throws LockException
{
     try {
-      return client.showLocks();
+      return client.showLocks(showLocksRequest);
     } catch (TException e) {
       throw new LockException(ErrorMsg.METASTORE_COMMUNICATION_FAILED.getMsg(), e);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/6c160bc1/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java
index 9e93254..45a86f6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java
@@ -53,8 +53,8 @@ public class ShowLocksDesc extends DDLDesc implements Serializable {
    * Schema for use with db txn manager.
    */
   private static final String newFormatSchema = "lockid,database,table,partition,lock_state,"
+
-      "lock_type,transaction_id,last_heartbeat,acquired_at,user," +
-      "hostname#string:string:string:string:string:string:string:string:string:string:string";
+      "blocked_by,lock_type,transaction_id,last_heartbeat,acquired_at,user,hostname#" +
+      "string:string:string:string:string:string:string:string:string:string:string:string";
 
   public String getDatabase() {
     return dbName;

http://git-wip-us.apache.org/repos/asf/hive/blob/6c160bc1/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index 15e1ee8..04e556b 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -24,9 +24,11 @@ import org.apache.hadoop.hive.ql.TestTxnCommands2;
 import org.apache.hadoop.hive.ql.txn.AcidWriteSetService;
 import org.junit.After;
 import org.junit.Assert;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.LockState;
 import org.apache.hadoop.hive.metastore.api.LockType;
+import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
 import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
@@ -42,7 +44,9 @@ import org.junit.Test;
 
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 /**
  * See additional tests in {@link org.apache.hadoop.hive.ql.lockmgr.TestDbTxnManager}
@@ -621,6 +625,105 @@ public class TestDbTxnManager2 {
     Assert.assertEquals(actual.toString(), normalizeCase(expectedTable), normalizeCase(actual.getTablename()));
     Assert.assertEquals(actual.toString(), normalizeCase(expectedPartition), normalizeCase(actual.getPartname()));
   }
+
+  @Test
+  public void testShowLocksFilterOptions() throws Exception {
+    CommandProcessorResponse cpr = driver.run("drop table if exists db1.t14");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("drop table if exists db2.t14"); // Note that db1 and db2 have a table
with common name
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("drop table if exists db2.t15");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("drop table if exists db2.t16");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("drop database if exists db1");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("drop database if exists db2");
+    checkCmdOnDriver(cpr);
+
+    cpr = driver.run("create database if not exists db1");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("create database if not exists db2");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("create table if not exists db1.t14 (a int, b int) partitioned by (ds
string) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("create table if not exists db2.t14 (a int, b int) clustered by (b)
into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("create table if not exists db2.t15 (a int, b int) clustered by (b)
into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("create table if not exists db2.t16 (a int, b int) clustered by (b)
into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
+    checkCmdOnDriver(cpr);
+
+    // Acquire different locks at different levels
+
+    cpr = driver.compileAndRespond("insert into table db1.t14 partition (ds='today') values
(1, 2)");
+    checkCmdOnDriver(cpr);
+    txnMgr.acquireLocks(driver.getPlan(), ctx, "Tom");
+
+    HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
+    cpr = driver.compileAndRespond("insert into table db1.t14 partition (ds='tomorrow') values
(3, 4)");
+    checkCmdOnDriver(cpr);
+    txnMgr2.acquireLocks(driver.getPlan(), ctx, "Jerry");
+
+    HiveTxnManager txnMgr3 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
+    cpr = driver.compileAndRespond("select * from db2.t15");
+    checkCmdOnDriver(cpr);
+    txnMgr3.acquireLocks(driver.getPlan(), ctx, "Donald");
+
+    HiveTxnManager txnMgr4 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
+    cpr = driver.compileAndRespond("select * from db2.t16");
+    checkCmdOnDriver(cpr);
+    txnMgr4.acquireLocks(driver.getPlan(), ctx, "Hillary");
+
+    HiveTxnManager txnMgr5 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
+    cpr = driver.compileAndRespond("select * from db2.t14");
+    checkCmdOnDriver(cpr);
+    txnMgr5.acquireLocks(driver.getPlan(), ctx, "Obama");
+
+    // Simulate SHOW LOCKS with different filter options
+
+    // SHOW LOCKS (no filter)
+    List<ShowLocksResponseElement> locks = getLocks();
+    Assert.assertEquals("Unexpected lock count", 7, locks.size());
+    // locks.get(0) is a lock on tmp table in default database used for insert
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", "ds=today", locks.get(1));
+    // locks.get(2) is a lock on tmp table in default database used for insert
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", "ds=tomorrow", locks.get(3));
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t15", null, locks.get(4));
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t16", null, locks.get(5));
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t14", null, locks.get(6));
+
+    // SHOW LOCKS db2
+    locks = getLocksWithFilterOptions(txnMgr3, "db2", null, null);
+    Assert.assertEquals("Unexpected lock count", 3, locks.size());
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t15", null, locks.get(0));
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t16", null, locks.get(1));
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t14", null, locks.get(2));
+
+    // SHOW LOCKS t14
+    cpr = driver.run("use db1");
+    checkCmdOnDriver(cpr);
+    locks = getLocksWithFilterOptions(txnMgr, null, "t14", null);
+    Assert.assertEquals("Unexpected lock count", 2, locks.size());
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", "ds=today", locks.get(0));
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", "ds=tomorrow", locks.get(1));
+    // Note that it shouldn't show t14 from db2
+
+    // SHOW LOCKS t14 PARTITION ds='today'
+    Map<String, String> partSpec = new HashMap<String, String>();
+    partSpec.put("ds", "today");
+    locks = getLocksWithFilterOptions(txnMgr, null, "t14", partSpec);
+    Assert.assertEquals("Unexpected lock count", 1, locks.size());
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", "ds=today", locks.get(0));
+
+    // SHOW LOCKS t15
+    cpr = driver.run("use db2");
+    checkCmdOnDriver(cpr);
+    locks = getLocksWithFilterOptions(txnMgr3, null, "t15", null);
+    Assert.assertEquals("Unexpected lock count", 1, locks.size());
+    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t15", null, locks.get(0));
+  }
+
   private void checkCmdOnDriver(CommandProcessorResponse cpr) {
     Assert.assertTrue(cpr.toString(), cpr.getResponseCode() == 0);
   }
@@ -1171,4 +1274,27 @@ public class TestDbTxnManager2 {
     Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString("select
* from COMPLETED_TXN_COMPONENTS"),
       4, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1'
and ctc_partition is not null"));
   }
+
+  private List<ShowLocksResponseElement> getLocksWithFilterOptions(HiveTxnManager txnMgr,
+      String dbName, String tblName, Map<String, String> partSpec) throws Exception
{
+    if (dbName == null && tblName != null) {
+      dbName = SessionState.get().getCurrentDatabase();
+    }
+    ShowLocksRequest rqst = new ShowLocksRequest();
+    rqst.setDbname(dbName);
+    rqst.setTablename(tblName);
+    if (partSpec != null) {
+      List<String> keyList = new ArrayList<String>();
+      List<String> valList = new ArrayList<String>();
+      for (String partKey : partSpec.keySet()) {
+        String partVal = partSpec.remove(partKey);
+        keyList.add(partKey);
+        valList.add(partVal);
+      }
+      String partName = FileUtils.makePartName(keyList, valList);
+      rqst.setPartname(partName);
+    }
+    ShowLocksResponse rsp = ((DbLockManager)txnMgr.getLockManager()).getLocks(rqst);
+    return rsp.getLocks();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/6c160bc1/ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q b/ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q
index 30b26f4..da8e448 100644
--- a/ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q
+++ b/ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q
@@ -8,3 +8,17 @@ show locks extended;
 show locks default;
 
 show transactions;
+
+create table partitioned_acid_table (a int, b int) partitioned by (p string) clustered by
(a) into 2 buckets stored as orc tblproperties ('transactional'='true');
+
+show locks database default;
+
+show locks partitioned_acid_table;
+
+show locks partitioned_acid_table extended;
+
+show locks partitioned_acid_table partition (p='abc');
+
+show locks partitioned_acid_table partition (p='abc') extended;
+
+drop table partitioned_acid_table;

http://git-wip-us.apache.org/repos/asf/hive/blob/6c160bc1/ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out
index 46d8ea1..c1adeb3 100644
--- a/ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out
+++ b/ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out
@@ -2,19 +2,60 @@ PREHOOK: query: show locks
 PREHOOK: type: SHOWLOCKS
 POSTHOOK: query: show locks
 POSTHOOK: type: SHOWLOCKS
-Lock ID	Database	Table	Partition	State	Blocked By	Type	Transaction ID	Last Hearbeat	Acquired
At	User
+Lock ID	Database	Table	Partition	State	Blocked By	Type	Transaction ID	Last Hearbeat	Acquired
At	User	Hostname
 PREHOOK: query: show locks extended
 PREHOOK: type: SHOWLOCKS
 POSTHOOK: query: show locks extended
 POSTHOOK: type: SHOWLOCKS
-Lock ID	Database	Table	Partition	State	Blocked By	Type	Transaction ID	Last Hearbeat	Acquired
At	User
+Lock ID	Database	Table	Partition	State	Blocked By	Type	Transaction ID	Last Hearbeat	Acquired
At	User	Hostname
 PREHOOK: query: show locks default
 PREHOOK: type: SHOWLOCKS
 POSTHOOK: query: show locks default
 POSTHOOK: type: SHOWLOCKS
-Lock ID	Database	Table	Partition	State	Blocked By	Type	Transaction ID	Last Hearbeat	Acquired
At	User
+Lock ID	Database	Table	Partition	State	Blocked By	Type	Transaction ID	Last Hearbeat	Acquired
At	User	Hostname
 PREHOOK: query: show transactions
 PREHOOK: type: SHOW TRANSACTIONS
 POSTHOOK: query: show transactions
 POSTHOOK: type: SHOW TRANSACTIONS
 Transaction ID	Transaction State	User	Hostname
+PREHOOK: query: create table partitioned_acid_table (a int, b int) partitioned by (p string)
clustered by (a) into 2 buckets stored as orc tblproperties ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@partitioned_acid_table
+POSTHOOK: query: create table partitioned_acid_table (a int, b int) partitioned by (p string)
clustered by (a) into 2 buckets stored as orc tblproperties ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@partitioned_acid_table
+PREHOOK: query: show locks database default
+PREHOOK: type: SHOWLOCKS
+POSTHOOK: query: show locks database default
+POSTHOOK: type: SHOWLOCKS
+Lock ID	Database	Table	Partition	State	Blocked By	Type	Transaction ID	Last Hearbeat	Acquired
At	User	Hostname
+PREHOOK: query: show locks partitioned_acid_table
+PREHOOK: type: SHOWLOCKS
+POSTHOOK: query: show locks partitioned_acid_table
+POSTHOOK: type: SHOWLOCKS
+Lock ID	Database	Table	Partition	State	Blocked By	Type	Transaction ID	Last Hearbeat	Acquired
At	User	Hostname
+PREHOOK: query: show locks partitioned_acid_table extended
+PREHOOK: type: SHOWLOCKS
+POSTHOOK: query: show locks partitioned_acid_table extended
+POSTHOOK: type: SHOWLOCKS
+Lock ID	Database	Table	Partition	State	Blocked By	Type	Transaction ID	Last Hearbeat	Acquired
At	User	Hostname
+PREHOOK: query: show locks partitioned_acid_table partition (p='abc')
+PREHOOK: type: SHOWLOCKS
+POSTHOOK: query: show locks partitioned_acid_table partition (p='abc')
+POSTHOOK: type: SHOWLOCKS
+Lock ID	Database	Table	Partition	State	Blocked By	Type	Transaction ID	Last Hearbeat	Acquired
At	User	Hostname
+PREHOOK: query: show locks partitioned_acid_table partition (p='abc') extended
+PREHOOK: type: SHOWLOCKS
+POSTHOOK: query: show locks partitioned_acid_table partition (p='abc') extended
+POSTHOOK: type: SHOWLOCKS
+Lock ID	Database	Table	Partition	State	Blocked By	Type	Transaction ID	Last Hearbeat	Acquired
At	User	Hostname
+PREHOOK: query: drop table partitioned_acid_table
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@partitioned_acid_table
+PREHOOK: Output: default@partitioned_acid_table
+POSTHOOK: query: drop table partitioned_acid_table
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@partitioned_acid_table
+POSTHOOK: Output: default@partitioned_acid_table


Mime
View raw message