hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gunt...@apache.org
Subject svn commit: r1622396 [2/8] - in /hive/branches/cbo: ./ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/ beeline/src/java/org/apache/hive/beeline/ beeline/src/test/org/apache/hive/beeline/ bin/ bin/ext/ checkstyle/ common/src/java/or...
Date Thu, 04 Sep 2014 02:49:50 GMT
Modified: hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java (original)
+++ hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java Thu Sep  4 02:49:46 2014
@@ -25,6 +25,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
+import java.util.concurrent.TimeUnit;
 
 import junit.framework.Assert;
 
@@ -116,7 +117,7 @@ public class TestHCatPartitionPublish {
         + msPort);
     hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
     hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3);
-    hcatConf.setIntVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 120);
+    hcatConf.setTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 120, TimeUnit.SECONDS);
     hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
         HCatSemanticAnalyzer.class.getName());
     hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");

Modified: hive/branches/cbo/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java (original)
+++ hive/branches/cbo/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java Thu Sep  4 02:49:46 2014
@@ -199,7 +199,8 @@ public class HCatLoader extends HCatBase
     throws IOException {
     Table table = phutil.getTable(location,
       hcatServerUri != null ? hcatServerUri : PigHCatUtil.getHCatServerUri(job),
-      PigHCatUtil.getHCatServerPrincipal(job));
+      PigHCatUtil.getHCatServerPrincipal(job),
+      job);   // Pass job to initialize metastore conf overrides
     List<FieldSchema> tablePartitionKeys = table.getPartitionKeys();
     String[] partitionKeys = new String[tablePartitionKeys.size()];
     for (int i = 0; i < tablePartitionKeys.size(); i++) {
@@ -215,7 +216,11 @@ public class HCatLoader extends HCatBase
 
     Table table = phutil.getTable(location,
       hcatServerUri != null ? hcatServerUri : PigHCatUtil.getHCatServerUri(job),
-      PigHCatUtil.getHCatServerPrincipal(job));
+      PigHCatUtil.getHCatServerPrincipal(job),
+
+      // Pass job to initialize metastore conf overrides for embedded metastore case
+      // (hive.metastore.uris = "").
+      job);
     HCatSchema hcatTableSchema = HCatUtil.getTableSchemaWithPtnCols(table);
     try {
       PigHCatUtil.validateHCatTableSchemaFollowsPigRules(hcatTableSchema);

Modified: hive/branches/cbo/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java (original)
+++ hive/branches/cbo/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java Thu Sep  4 02:49:46 2014
@@ -142,8 +142,16 @@ class PigHCatUtil {
   }
 
   private static HiveMetaStoreClient getHiveMetaClient(String serverUri,
-                             String serverKerberosPrincipal, Class<?> clazz) throws Exception {
-    HiveConf hiveConf = new HiveConf(clazz);
+                             String serverKerberosPrincipal,
+                             Class<?> clazz,
+                             Job job) throws Exception {
+
+    // The job configuration is passed in so the configuration will be cloned
+    // from the pig job configuration. This is necessary for overriding
+    // metastore configuration arguments like the metastore jdbc connection string
+    // and password, in the case of an embedded metastore, which you get when
+    // hive.metastore.uris = "".
+    HiveConf hiveConf = new HiveConf(job.getConfiguration(), clazz);
 
     if (serverUri != null) {
       hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, serverUri.trim());
@@ -178,7 +186,13 @@ class PigHCatUtil {
     return new HCatSchema(fcols);
   }
 
-  public Table getTable(String location, String hcatServerUri, String hcatServerPrincipal) throws IOException {
+  /*
+  * The job argument is passed so that configuration overrides can be used to initialize
+  * the metastore configuration in the special case of an embedded metastore
+  * (hive.metastore.uris = "").
+  */
+  public Table getTable(String location, String hcatServerUri, String hcatServerPrincipal,
+      Job job) throws IOException {
     Pair<String, String> loc_server = new Pair<String, String>(location, hcatServerUri);
     Table hcatTable = hcatTableCache.get(loc_server);
     if (hcatTable != null) {
@@ -191,7 +205,7 @@ class PigHCatUtil {
     Table table = null;
     HiveMetaStoreClient client = null;
     try {
-      client = getHiveMetaClient(hcatServerUri, hcatServerPrincipal, PigHCatUtil.class);
+      client = getHiveMetaClient(hcatServerUri, hcatServerPrincipal, PigHCatUtil.class, job);
       table = HCatUtil.getTable(client, dbName, tableName);
     } catch (NoSuchObjectException nsoe) {
       throw new PigException("Table not found : " + nsoe.getMessage(), PIG_EXCEPTION_CODE); // prettier error messages to frontend

Modified: hive/branches/cbo/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm (original)
+++ hive/branches/cbo/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm Thu Sep  4 02:49:46 2014
@@ -1422,7 +1422,7 @@ sub run
             $testStatuses->{$testName} = $failedStr;
 
           }
-          $msg= "$msg at " . time . "\n";
+          $msg .= "\nEnding test $testName at " . $endTime ."\n";
           #print $msg;
           print $log $msg;
           $duration = $endTime - $beginTime;
@@ -1435,6 +1435,7 @@ sub run
 
         if ($@) {
           $msg= "ERROR $subName at : ".__LINE__." Failed to run test $testName <$@>\n";
+          $msg .= "Ending test $testName at " . time ."\n";
           #print $msg;
           print $log $msg;
           $testStatuses->{$testName} = $abortedStr;

Modified: hive/branches/cbo/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java (original)
+++ hive/branches/cbo/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java Thu Sep  4 02:49:46 2014
@@ -107,7 +107,7 @@ public class HiveEndPoint {
   public StreamingConnection newConnection(final boolean createPartIfNotExists)
           throws ConnectionError, InvalidPartition, InvalidTable, PartitionCreationFailed
           , ImpersonationFailed , InterruptedException {
-    return newConnection(null, createPartIfNotExists, null);
+    return newConnection(createPartIfNotExists, null, null);
   }
 
   /**
@@ -126,67 +126,63 @@ public class HiveEndPoint {
   public StreamingConnection newConnection(final boolean createPartIfNotExists, HiveConf conf)
           throws ConnectionError, InvalidPartition, InvalidTable, PartitionCreationFailed
           , ImpersonationFailed , InterruptedException {
-    return newConnection(null, createPartIfNotExists, conf);
+    return newConnection(createPartIfNotExists, conf, null);
   }
 
   /**
    * Acquire a new connection to MetaStore for streaming
-   * @param proxyUser User on whose behalf all hdfs and hive operations will be
-   *                  performed on this connection. Set it to null or empty string
-   *                  to connect as user of current process without impersonation.
-   *                  Currently this argument is not supported and must be null
    * @param createPartIfNotExists If true, the partition specified in the endpoint
    *                              will be auto created if it does not exist
+   * @param authenticatedUser  UserGroupInformation object obtained from successful authentication.
+   *                           Uses insecure mode if this argument is null.
    * @return
-   * @throws ConnectionError if problem connecting
+   * @throws ConnectionError if there is a connection problem
    * @throws InvalidPartition  if specified partition is not valid (createPartIfNotExists = false)
-   * @throws ImpersonationFailed  if not able to impersonate 'proxyUser'
+   * @throws ImpersonationFailed  if not able to impersonate 'username'
    * @throws IOException  if there was an I/O error when acquiring connection
    * @throws PartitionCreationFailed if failed to create partition
    * @throws InterruptedException
    */
-  private StreamingConnection newConnection(final String proxyUser,
-                                            final boolean createPartIfNotExists, final HiveConf conf)
+  public StreamingConnection newConnection(final boolean createPartIfNotExists, final HiveConf conf,
+                                            final UserGroupInformation authenticatedUser)
           throws ConnectionError, InvalidPartition,
                InvalidTable, PartitionCreationFailed, ImpersonationFailed , InterruptedException {
-    if (proxyUser ==null || proxyUser.trim().isEmpty() ) {
-      return newConnectionImpl(System.getProperty("user.name"), null, createPartIfNotExists, conf);
+
+    if( authenticatedUser==null ) {
+      return newConnectionImpl(authenticatedUser, createPartIfNotExists, conf);
     }
-    final UserGroupInformation ugi = getUserGroupInfo(proxyUser);
+
     try {
-      return ugi.doAs (
-              new PrivilegedExceptionAction<StreamingConnection>() {
+      return authenticatedUser.doAs (
+             new PrivilegedExceptionAction<StreamingConnection>() {
                 @Override
                 public StreamingConnection run()
                         throws ConnectionError, InvalidPartition, InvalidTable
                         , PartitionCreationFailed {
-                  return newConnectionImpl(proxyUser, ugi, createPartIfNotExists, conf);
+                  return newConnectionImpl(authenticatedUser, createPartIfNotExists, conf);
                 }
-              }
+             }
       );
     } catch (IOException e) {
-      throw new ImpersonationFailed("Failed to impersonate '" + proxyUser +
-              "' when acquiring connection", e);
+      throw new ConnectionError("Failed to connect as : " + authenticatedUser.getShortUserName(), e);
     }
   }
 
-
-
-  private StreamingConnection newConnectionImpl(String proxyUser, UserGroupInformation ugi,
+  private StreamingConnection newConnectionImpl(UserGroupInformation ugi,
                                                boolean createPartIfNotExists, HiveConf conf)
           throws ConnectionError, InvalidPartition, InvalidTable
           , PartitionCreationFailed {
-    return new ConnectionImpl(this, proxyUser, ugi, conf, createPartIfNotExists);
+    return new ConnectionImpl(this, ugi, conf, createPartIfNotExists);
   }
 
-  private static UserGroupInformation getUserGroupInfo(String proxyUser)
+  private static UserGroupInformation getUserGroupInfo(String user)
           throws ImpersonationFailed {
     try {
       return UserGroupInformation.createProxyUser(
-              proxyUser, UserGroupInformation.getLoginUser());
+              user, UserGroupInformation.getLoginUser());
     } catch (IOException e) {
-      LOG.error("Unable to login as proxy user. Exception follows.", e);
-      throw new ImpersonationFailed(proxyUser,e);
+      LOG.error("Unable to get UserGroupInfo for user : " + user, e);
+      throw new ImpersonationFailed(user,e);
     }
   }
 
@@ -242,14 +238,12 @@ public class HiveEndPoint {
   private static class ConnectionImpl implements StreamingConnection {
     private final IMetaStoreClient msClient;
     private final HiveEndPoint endPt;
-    private final String proxyUser;
     private final UserGroupInformation ugi;
+    private final String username;
 
     /**
-     *
      * @param endPoint end point to connect to
-     * @param proxyUser  can be null
-     * @param ugi of prody user. If ugi is null, impersonation of proxy user will be disabled
+     * @param ugi on behalf of whom streaming is done. cannot be null
      * @param conf HiveConf object
      * @param createPart create the partition if it does not exist
      * @throws ConnectionError if there is trouble connecting
@@ -257,15 +251,15 @@ public class HiveEndPoint {
      * @throws InvalidTable if specified table does not exist
      * @throws PartitionCreationFailed if createPart=true and not able to create partition
      */
-    private ConnectionImpl(HiveEndPoint endPoint, String proxyUser, UserGroupInformation ugi,
+    private ConnectionImpl(HiveEndPoint endPoint, UserGroupInformation ugi,
                            HiveConf conf, boolean createPart)
             throws ConnectionError, InvalidPartition, InvalidTable
                    , PartitionCreationFailed {
-      this.proxyUser = proxyUser;
       this.endPt = endPoint;
       this.ugi = ugi;
+      this.username = ugi==null ? System.getProperty("user.name") : ugi.getShortUserName();
       if (conf==null) {
-        conf = HiveEndPoint.createHiveConf(this.getClass(),endPoint.metaStoreUri);
+        conf = HiveEndPoint.createHiveConf(this.getClass(), endPoint.metaStoreUri);
       }
       this.msClient = getMetaStoreClient(endPoint, conf);
       if (createPart  &&  !endPoint.partitionVals.isEmpty()) {
@@ -324,21 +318,21 @@ public class HiveEndPoint {
         return ugi.doAs (
                 new PrivilegedExceptionAction<TransactionBatch>() {
                   @Override
-                  public TransactionBatch run() throws StreamingException {
+                  public TransactionBatch run() throws StreamingException, InterruptedException {
                     return fetchTransactionBatchImpl(numTransactions, recordWriter);
                   }
                 }
         );
       } catch (IOException e) {
-        throw new ImpersonationFailed("Failed impersonating proxy user '" + proxyUser +
-                "' when acquiring Transaction Batch on endPoint " + endPt, e);
+        throw new ImpersonationFailed("Failed to fetch Txn Batch as user '" + ugi.getShortUserName()
+                + "' when acquiring Transaction Batch on endPoint " + endPt, e);
       }
     }
 
     private TransactionBatch fetchTransactionBatchImpl(int numTransactions,
                                                   RecordWriter recordWriter)
-            throws StreamingException, TransactionBatchUnAvailable {
-      return new TransactionBatchImpl(proxyUser, ugi, endPt, numTransactions, msClient
+            throws StreamingException, TransactionBatchUnAvailable, InterruptedException {
+      return new TransactionBatchImpl(username, ugi, endPt, numTransactions, msClient
               , recordWriter);
     }
 
@@ -445,7 +439,7 @@ public class HiveEndPoint {
   } // class ConnectionImpl
 
   private static class TransactionBatchImpl implements TransactionBatch {
-    private final String proxyUser;
+    private final String username;
     private final UserGroupInformation ugi;
     private final HiveEndPoint endPt;
     private final IMetaStoreClient msClient;
@@ -461,7 +455,7 @@ public class HiveEndPoint {
     /**
      * Represents a batch of transactions acquired from MetaStore
      *
-     * @param proxyUser
+     * @param user
      * @param ugi
      * @param endPt
      * @param numTxns
@@ -470,9 +464,9 @@ public class HiveEndPoint {
      * @throws StreamingException if failed to create new RecordUpdater for batch
      * @throws TransactionBatchUnAvailable if failed to acquire a new Transaction batch
      */
-    private TransactionBatchImpl(String proxyUser, UserGroupInformation ugi, HiveEndPoint endPt
-              , int numTxns, IMetaStoreClient msClient, RecordWriter recordWriter)
-            throws StreamingException, TransactionBatchUnAvailable {
+    private TransactionBatchImpl(final String user, UserGroupInformation ugi, HiveEndPoint endPt
+              , final int numTxns, final IMetaStoreClient msClient, RecordWriter recordWriter)
+            throws StreamingException, TransactionBatchUnAvailable, InterruptedException {
       try {
         if ( endPt.partitionVals!=null   &&   !endPt.partitionVals.isEmpty() ) {
           Table tableObj = msClient.getTable(endPt.database, endPt.table);
@@ -481,20 +475,38 @@ public class HiveEndPoint {
         } else {
           partNameForLock = null;
         }
-        this.proxyUser = proxyUser;
+        this.username = user;
         this.ugi = ugi;
         this.endPt = endPt;
         this.msClient = msClient;
         this.recordWriter = recordWriter;
-        this.txnIds = msClient.openTxns(proxyUser, numTxns).getTxn_ids();
+
+        txnIds = openTxnImpl(msClient, user, numTxns, ugi);
+
+
         this.currentTxnIndex = -1;
         this.state = TxnState.INACTIVE;
         recordWriter.newBatch(txnIds.get(0), txnIds.get(txnIds.size()-1));
       } catch (TException e) {
         throw new TransactionBatchUnAvailable(endPt, e);
+      } catch (IOException e) {
+        throw new TransactionBatchUnAvailable(endPt, e);
       }
     }
 
+    private List<Long> openTxnImpl(final IMetaStoreClient msClient, final String user, final int numTxns, UserGroupInformation ugi)
+            throws IOException, TException,  InterruptedException {
+      if(ugi==null) {
+        return  msClient.openTxns(user, numTxns).getTxn_ids();
+      }
+      return (List<Long>) ugi.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          return msClient.openTxns(user, numTxns).getTxn_ids();
+        }
+      }) ;
+    }
+
     @Override
     public String toString() {
       if (txnIds==null || txnIds.isEmpty()) {
@@ -526,8 +538,8 @@ public class HiveEndPoint {
               }
         );
       } catch (IOException e) {
-        throw new ImpersonationFailed("Failed impersonating proxyUser '" + proxyUser +
-                "' when switch to next Transaction for endPoint :" + endPt, e);
+        throw new ImpersonationFailed("Failed switching to next Txn as user '" + username +
+                "' in Txn batch :" + this, e);
       }
     }
 
@@ -536,7 +548,7 @@ public class HiveEndPoint {
         throw new InvalidTrasactionState("No more transactions available in" +
                 " current batch for end point : " + endPt);
       ++currentTxnIndex;
-      lockRequest = createLockRequest(endPt, partNameForLock, proxyUser, getCurrentTxnId());
+      lockRequest = createLockRequest(endPt, partNameForLock, username, getCurrentTxnId());
       try {
         LockResponse res = msClient.lock(lockRequest);
         if (res.getState() != LockState.ACQUIRED) {
@@ -608,8 +620,8 @@ public class HiveEndPoint {
             }
         );
       } catch (IOException e) {
-        throw new ImpersonationFailed("Failed impersonating proxy user '" + proxyUser +
-                "' when writing to endPoint :" + endPt + ". Transaction Id: "
+        throw new ImpersonationFailed("Failed wirting as user '" + username +
+                "' to endPoint :" + endPt + ". Transaction Id: "
                 + getCurrentTxnId(), e);
       }
     }
@@ -641,8 +653,8 @@ public class HiveEndPoint {
                 }
         );
       } catch (IOException e) {
-        throw new ImpersonationFailed("Failed impersonating proxyUser '" + proxyUser +
-                "' when writing to endPoint :" + endPt + ". Transaction Id: "
+        throw new ImpersonationFailed("Failed writing as user '" + username +
+                "' to endPoint :" + endPt + ". Transaction Id: "
                 + getCurrentTxnId(), e);
       }
     }
@@ -680,9 +692,8 @@ public class HiveEndPoint {
               }
         );
       } catch (IOException e) {
-        throw new ImpersonationFailed("Failed impersonating proxy user '" + proxyUser +
-                "' when committing Txn on endPoint :" + endPt + ". Transaction Id: "
-                + getCurrentTxnId(), e);
+        throw new ImpersonationFailed("Failed committing Txn ID " + getCurrentTxnId() + " as user '"
+                + username + "'on endPoint :" + endPt + ". Transaction Id: ", e);
       }
 
     }
@@ -726,9 +737,8 @@ public class HiveEndPoint {
                 }
         );
       } catch (IOException e) {
-        throw new ImpersonationFailed("Failed impersonating proxy user '" + proxyUser +
-                "' when aborting Txn on endPoint :" + endPt + ". Transaction Id: "
-                + getCurrentTxnId(), e);
+        throw new ImpersonationFailed("Failed aborting Txn " + getCurrentTxnId()  + " as user '"
+                + username + "' on endPoint :" + endPt, e);
       }
     }
 
@@ -784,8 +794,8 @@ public class HiveEndPoint {
                 }
         );
       } catch (IOException e) {
-        throw new ImpersonationFailed("Failed impersonating proxy user '" + proxyUser +
-                "' when closing Txn Batch on  endPoint :" + endPt, e);
+        throw new ImpersonationFailed("Failed closing Txn Batch as user '" + username +
+                "' on  endPoint :" + endPt, e);
       }
     }
 

Modified: hive/branches/cbo/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java (original)
+++ hive/branches/cbo/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java Thu Sep  4 02:49:46 2014
@@ -653,7 +653,7 @@ public class Server {
     verifyParam(inputs, "input");
     verifyParam(mapper, "mapper");
     verifyParam(reducer, "reducer");
-    
+
     Map<String, Object> userArgs = new HashMap<String, Object>();
     userArgs.put("user.name", getDoAsUser());
     userArgs.put("input", inputs);
@@ -680,8 +680,8 @@ public class Server {
   /**
    * Run a MapReduce Jar job.
    * Params correspond to the REST api params
-   * @param  usesHcatalog if {@code true}, means the Jar uses HCat and thus needs to access 
-   *    metastore, which requires additional steps for WebHCat to perform in a secure cluster.  
+   * @param  usesHcatalog if {@code true}, means the Jar uses HCat and thus needs to access
+   *    metastore, which requires additional steps for WebHCat to perform in a secure cluster.
    * @param callback URL which WebHCat will call when the hive job finishes
    * @see org.apache.hive.hcatalog.templeton.tool.TempletonControllerJob
    */
@@ -703,7 +703,7 @@ public class Server {
     verifyUser();
     verifyParam(jar, "jar");
     verifyParam(mainClass, "class");
-    
+
     Map<String, Object> userArgs = new HashMap<String, Object>();
     userArgs.put("user.name", getDoAsUser());
     userArgs.put("jar", jar);
@@ -729,7 +729,7 @@ public class Server {
    * Run a Pig job.
    * Params correspond to the REST api params.  If '-useHCatalog' is in the {@code pigArgs, usesHcatalog},
    * is interpreted as true.
-   * @param  usesHcatalog if {@code true}, means the Pig script uses HCat and thus needs to access 
+   * @param  usesHcatalog if {@code true}, means the Pig script uses HCat and thus needs to access
    *    metastore, which requires additional steps for WebHCat to perform in a secure cluster.
    *    This does nothing to ensure that Pig is installed on target node in the cluster.
    * @param callback URL which WebHCat will call when the hive job finishes
@@ -752,7 +752,7 @@ public class Server {
     if (execute == null && srcFile == null) {
       throw new BadParam("Either execute or file parameter required");
     }
-    
+
     //add all function arguments to a map
     Map<String, Object> userArgs = new HashMap<String, Object>();
     userArgs.put("user.name", getDoAsUser());
@@ -819,7 +819,7 @@ public class Server {
    * @param execute    SQL statement to run, equivalent to "-e" from hive command line
    * @param srcFile    name of hive script file to run, equivalent to "-f" from hive
    *                   command line
-   * @param hiveArgs   additional command line argument passed to the hive command line. 
+   * @param hiveArgs   additional command line argument passed to the hive command line.
    *                   Please check https://cwiki.apache.org/Hive/languagemanual-cli.html
    *                   for detailed explanation of command line arguments
    * @param otherFiles additional files to be shipped to the launcher, such as the jars
@@ -846,7 +846,7 @@ public class Server {
     if (execute == null && srcFile == null) {
       throw new BadParam("Either execute or file parameter required");
     }
-    
+
     //add all function arguments to a map
     Map<String, Object> userArgs = new HashMap<String, Object>();
     userArgs.put("user.name", getDoAsUser());
@@ -903,42 +903,42 @@ public class Server {
    * Example usages:
    * 1. curl -s 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan'
    * Return all the Job IDs submitted by hsubramaniyan
-   * 2. curl -s 
+   * 2. curl -s
    * 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan&showall=true'
    * Return all the Job IDs that are visible to hsubramaniyan
    * 3. curl -s
    * 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan&jobid=job_201312091733_0003'
    * Return all the Job IDs for hsubramaniyan after job_201312091733_0003.
-   * 4. curl -s 'http://localhost:50111/templeton/v1/jobs? 
+   * 4. curl -s 'http://localhost:50111/templeton/v1/jobs?
    * user.name=hsubramaniyan&jobid=job_201312091733_0003&numrecords=5'
-   * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after job_201312091733_0003.  
-   * 5.  curl -s 
+   * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after job_201312091733_0003.
+   * 5.  curl -s
    * 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan&numrecords=5'
-   * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after sorting the Job ID list 
+   * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after sorting the Job ID list
    * lexicographically.
    * </p>
    * <p>
    * Supporting pagination using "jobid" and "numrecords" parameters:
    * Step 1: Get the start "jobid" = job_xxx_000, "numrecords" = n
-   * Step 2: Issue a curl command by specifying the user-defined "numrecords" and "jobid" 
-   * Step 3: If list obtained from Step 2 has size equal to "numrecords", retrieve the list's 
+   * Step 2: Issue a curl command by specifying the user-defined "numrecords" and "jobid"
+   * Step 3: If list obtained from Step 2 has size equal to "numrecords", retrieve the list's
    * last record and get the Job Id of the last record as job_yyy_k, else quit.
    * Step 4: set "jobid"=job_yyy_k and go to step 2.
-   * </p> 
+   * </p>
    * @param fields If "fields" set to "*", the request will return full details of the job.
    * If "fields" is missing, will only return the job ID. Currently the value can only
    * be "*", other values are not allowed and will throw exception.
    * @param showall If "showall" is set to "true", the request will return all jobs the user
    * has permission to view, not only the jobs belonging to the user.
-   * @param jobid If "jobid" is present, the records whose Job Id is lexicographically greater 
-   * than "jobid" are only returned. For example, if "jobid" = "job_201312091733_0001", 
-   * the jobs whose Job ID is greater than "job_201312091733_0001" are returned. The number of 
+   * @param jobid If "jobid" is present, the records whose Job Id is lexicographically greater
+   * than "jobid" are only returned. For example, if "jobid" = "job_201312091733_0001",
+   * the jobs whose Job ID is greater than "job_201312091733_0001" are returned. The number of
    * records returned depends on the value of "numrecords".
-   * @param numrecords If the "jobid" and "numrecords" parameters are present, the top #numrecords 
-   * records appearing after "jobid" will be returned after sorting the Job Id list 
-   * lexicographically. 
-   * If "jobid" parameter is missing and "numrecords" is present, the top #numrecords will 
-   * be returned after lexicographically sorting the Job Id list. If "jobid" parameter is present 
+   * @param numrecords If the "jobid" and "numrecords" parameters are present, the top #numrecords
+   * records appearing after "jobid" will be returned after sorting the Job Id list
+   * lexicographically.
+   * If "jobid" parameter is missing and "numrecords" is present, the top #numrecords will
+   * be returned after lexicographically sorting the Job Id list. If "jobid" parameter is present
    * and "numrecords" is missing, all the records whose Job Id is greater than "jobid" are returned.
    * @return list of job items based on the filter conditions specified by the user.
    */
@@ -950,7 +950,7 @@ public class Server {
                                        @QueryParam("jobid") String jobid,
                                        @QueryParam("numrecords") String numrecords)
     throws NotAuthorizedException, BadParam, IOException, InterruptedException {
-    
+
     verifyUser();
 
     boolean showDetails = false;
@@ -971,9 +971,9 @@ public class Server {
     try {
       if (numrecords != null) {
         numRecords = Integer.parseInt(numrecords);
-	if (numRecords <= 0) {
-	  throw new BadParam("numrecords should be an integer > 0");
-	}    
+  if (numRecords <= 0) {
+    throw new BadParam("numrecords should be an integer > 0");
+  }
       }
       else {
         numRecords = -1;
@@ -983,18 +983,18 @@ public class Server {
       throw new BadParam("Invalid numrecords format: numrecords should be an integer > 0");
     }
 
-    // Sort the list lexicographically            
+    // Sort the list lexicographically
     Collections.sort(list);
 
     for (String job : list) {
       // If numRecords = -1, fetch all records.
       // Hence skip all the below checks when numRecords = -1.
       if (numRecords != -1) {
-        // If currRecord >= numRecords, we have already fetched the top #numRecords                                                                                                              
+        // If currRecord >= numRecords, we have already fetched the top #numRecords
         if (currRecord >= numRecords) {
           break;
-        } 
-        // If the current record needs to be returned based on the 
+        }
+        // If the current record needs to be returned based on the
         // filter conditions specified by the user, increment the counter
         else if ((jobid != null && job.compareTo(jobid) > 0) || jobid == null) {
           currRecord++;
@@ -1101,7 +1101,7 @@ public class Server {
    * value of user.name query param, in kerberos mode it's the kinit'ed user.
    */
   private String getRequestingUser() {
-    if (theSecurityContext == null) { 
+    if (theSecurityContext == null) {
       return null;
     }
     String userName = null;
@@ -1114,7 +1114,7 @@ public class Server {
     if(userName == null) {
       return null;
     }
-    //map hue/foo.bar@something.com->hue since user group checks 
+    //map hue/foo.bar@something.com->hue since user group checks
     // and config files are in terms of short name
     return UserGroupInformation.createRemoteUser(userName).getShortUserName();
   }
@@ -1161,7 +1161,7 @@ public class Server {
       return unkHost;
     }
   }
-  
+
   private void checkEnableLogPrerequisite(boolean enablelog, String statusdir) throws BadParam {
     if (enablelog && !TempletonUtils.isset(statusdir))
       throw new BadParam("enablelog is only applicable when statusdir is set");

Modified: hive/branches/cbo/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java (original)
+++ hive/branches/cbo/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java Thu Sep  4 02:49:46 2014
@@ -169,9 +169,9 @@ public class JobState {
     String childJobIDs = getField("children");
     if (childJobIDs != null) {
       for (String jobid : childJobIDs.split(",")) {
-    	children.add(new JobState(jobid, config));
+        children.add(new JobState(jobid, config));
       }
-    }    
+    }
     return children;
   }
 

Modified: hive/branches/cbo/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java (original)
+++ hive/branches/cbo/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java Thu Sep  4 02:49:46 2014
@@ -28,6 +28,7 @@ import java.util.concurrent.atomic.Atomi
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.HiveMetaStore;
@@ -49,6 +50,7 @@ public class MiniHS2 extends AbstractHiv
   public static final String HS2_BINARY_MODE = "binary";
   public static final String HS2_HTTP_MODE = "http";
   private static final String driverName = "org.apache.hive.jdbc.HiveDriver";
+  private static final FsPermission FULL_PERM = new FsPermission((short)00777);
   private HiveServer2 hiveServer2 = null;
   private final File baseDir;
   private final Path baseDfsDir;
@@ -59,6 +61,7 @@ public class MiniHS2 extends AbstractHiv
   private boolean useMiniKdc = false;
   private final String serverPrincipal;
   private final String serverKeytab;
+  private final boolean isMetastoreRemote;
 
   public static class Builder {
     private HiveConf hiveConf = new HiveConf();
@@ -67,6 +70,7 @@ public class MiniHS2 extends AbstractHiv
     private String serverPrincipal;
     private String serverKeytab;
     private boolean isHTTPTransMode = false;
+    private boolean isMetastoreRemote;
 
     public Builder() {
     }
@@ -83,6 +87,11 @@ public class MiniHS2 extends AbstractHiv
       return this;
     }
 
+    public Builder withRemoteMetastore() {
+      this.isMetastoreRemote = true;
+      return this;
+    }
+
     public Builder withConf(HiveConf hiveConf) {
       this.hiveConf = hiveConf;
       return this;
@@ -107,7 +116,8 @@ public class MiniHS2 extends AbstractHiv
       } else {
         hiveConf.setVar(ConfVars.HIVE_SERVER2_TRANSPORT_MODE, HS2_BINARY_MODE);
       }
-      return new MiniHS2(hiveConf, useMiniMR, useMiniKdc, serverPrincipal, serverKeytab);
+      return new MiniHS2(hiveConf, useMiniMR, useMiniKdc, serverPrincipal, serverKeytab,
+          isMetastoreRemote);
     }
   }
 
@@ -139,12 +149,14 @@ public class MiniHS2 extends AbstractHiv
     return useMiniKdc;
   }
 
-  private MiniHS2(HiveConf hiveConf, boolean useMiniMR, boolean useMiniKdc, String serverPrincipal, String serverKeytab) throws Exception {
+  private MiniHS2(HiveConf hiveConf, boolean useMiniMR, boolean useMiniKdc,
+      String serverPrincipal, String serverKeytab, boolean isMetastoreRemote) throws Exception {
     super(hiveConf, "localhost", MetaStoreUtils.findFreePort(), MetaStoreUtils.findFreePort());
     this.useMiniMR = useMiniMR;
     this.useMiniKdc = useMiniKdc;
     this.serverPrincipal = serverPrincipal;
     this.serverKeytab = serverKeytab;
+    this.isMetastoreRemote = isMetastoreRemote;
     baseDir =  Files.createTempDir();
     FileSystem fs;
     if (useMiniMR) {
@@ -169,6 +181,9 @@ public class MiniHS2 extends AbstractHiv
 
     fs.mkdirs(baseDfsDir);
     Path wareHouseDir = new Path(baseDfsDir, "warehouse");
+    // Create warehouse with 777, so that user impersonation has no issues.
+    FileSystem.mkdirs(fs, wareHouseDir, FULL_PERM);
+
     fs.mkdirs(wareHouseDir);
     setWareHouseDir(wareHouseDir.toString());
     System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, metaStoreURL);
@@ -180,10 +195,15 @@ public class MiniHS2 extends AbstractHiv
     hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT, getHttpPort());
 
     Path scratchDir = new Path(baseDfsDir, "scratch");
-    fs.mkdirs(scratchDir);
+
+    // Create scratchdir with 777, so that user impersonation has no issues.
+    FileSystem.mkdirs(fs, scratchDir, FULL_PERM);
     System.setProperty(HiveConf.ConfVars.SCRATCHDIR.varname, scratchDir.toString());
-    System.setProperty(HiveConf.ConfVars.LOCALSCRATCHDIR.varname,
-        baseDir.getPath() + File.separator + "scratch");
+    hiveConf.setVar(ConfVars.SCRATCHDIR, scratchDir.toString());
+
+    String localScratchDir = baseDir.getPath() + File.separator + "scratch";
+    System.setProperty(HiveConf.ConfVars.LOCALSCRATCHDIR.varname, localScratchDir);
+    hiveConf.setVar(ConfVars.LOCALSCRATCHDIR, localScratchDir);
   }
 
   public MiniHS2(HiveConf hiveConf) throws Exception {
@@ -191,10 +211,17 @@ public class MiniHS2 extends AbstractHiv
   }
 
   public MiniHS2(HiveConf hiveConf, boolean useMiniMR) throws Exception {
-    this(hiveConf, useMiniMR, false, null, null);
+    this(hiveConf, useMiniMR, false, null, null, false);
   }
 
   public void start(Map<String, String> confOverlay) throws Exception {
+    if (isMetastoreRemote) {
+      int metaStorePort = MetaStoreUtils.findFreePort();
+      getHiveConf().setVar(ConfVars.METASTOREURIS, "thrift://localhost:" + metaStorePort);
+      MetaStoreUtils.startMetaStore(metaStorePort,
+      ShimLoader.getHadoopThriftAuthBridge(), getHiveConf());
+    }
+
     hiveServer2 = new HiveServer2();
     // Set confOverlay parameters
     for (Map.Entry<String, String> entry : confOverlay.entrySet()) {
@@ -208,6 +235,9 @@ public class MiniHS2 extends AbstractHiv
 
   public void stop() {
     verifyStarted();
+    // Currently there is no way to stop the MetaStore service. It will be stopped when the
+    // test JVM exits. This is how other tests are also using MetaStore server.
+
     hiveServer2.stop();
     setStarted(false);
     try {

Modified: hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java (original)
+++ hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java Thu Sep  4 02:49:46 2014
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.metastore
 
 import java.io.IOException;
 import java.net.ServerSocket;
+import java.util.concurrent.TimeUnit;
 
 import junit.framework.TestCase;
 
@@ -45,7 +46,7 @@ public class TestMetaStoreAuthorization 
         "true");
     conf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
     conf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-    conf.setIntVar(ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, 60);
+    conf.setTimeVar(ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, 60, TimeUnit.SECONDS);
   }
 
   public void testIsWritable() throws Exception {

Modified: hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java (original)
+++ hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java Thu Sep  4 02:49:46 2014
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.metastore
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.TimeUnit;
 
 import junit.framework.Assert;
 import junit.framework.TestCase;
@@ -58,7 +59,7 @@ public class TestRetryingHMSHandler exte
     hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
     hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
     hiveConf.setIntVar(HiveConf.ConfVars.HMSHANDLERATTEMPTS, 2);
-    hiveConf.setIntVar(HiveConf.ConfVars.HMSHANDLERINTERVAL, 0);
+    hiveConf.setTimeVar(HiveConf.ConfVars.HMSHANDLERINTERVAL, 0, TimeUnit.MILLISECONDS);
     hiveConf.setBoolVar(HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF, false);
     msc = new HiveMetaStoreClient(hiveConf, null);
   }

Modified: hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java (original)
+++ hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java Thu Sep  4 02:49:46 2014
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hive.ql.security;
 
 import java.net.URI;
-import java.security.AccessControlException;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -48,7 +47,7 @@ public class TestStorageBasedMetastoreAu
   @Override
   protected void allowCreateInDb(String dbName, String userName, String location)
       throws Exception {
-    setPermissions(location,"-rwxr--r--");
+    setPermissions(location,"-rwxr--r-t");
   }
 
   @Override
@@ -79,7 +78,7 @@ public class TestStorageBasedMetastoreAu
   @Override
   protected void allowDropOnDb(String dbName, String userName, String location)
       throws Exception {
-    setPermissions(location,"-rwxr--r--");
+    setPermissions(location,"-rwxr--r-t");
   }
 
   protected void setPermissions(String locn, String permissions) throws Exception {

Modified: hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hive/service/auth/TestCustomAuthentication.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hive/service/auth/TestCustomAuthentication.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hive/service/auth/TestCustomAuthentication.java (original)
+++ hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hive/service/auth/TestCustomAuthentication.java Thu Sep  4 02:49:46 2014
@@ -18,7 +18,6 @@
 package org.apache.hive.service.auth;
 
 import junit.framework.Assert;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hive.service.server.HiveServer2;
 import org.junit.AfterClass;

Modified: hive/branches/cbo/itests/src/test/resources/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/src/test/resources/testconfiguration.properties?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/itests/src/test/resources/testconfiguration.properties (original)
+++ hive/branches/cbo/itests/src/test/resources/testconfiguration.properties Thu Sep  4 02:49:46 2014
@@ -101,6 +101,7 @@ minitez.query.files.shared=alter_merge_2
   script_env_var2.q,\
   script_pipe.q,\
   scriptfile1.q,\
+  select_dummy_source.q,\
   stats_counter.q,\
   stats_counter_partitioned.q,\
   stats_noscan_1.q,\

Modified: hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessControllerForTest.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessControllerForTest.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessControllerForTest.java (original)
+++ hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessControllerForTest.java Thu Sep  4 02:49:46 2014
@@ -30,7 +30,7 @@ import org.apache.hadoop.hive.ql.securit
  * To be used for testing purposes only!
  */
 @Private
-public class SQLStdHiveAccessControllerForTest extends SQLStdHiveAccessController {
+public class SQLStdHiveAccessControllerForTest extends SQLStdHiveAccessControllerWrapper {
 
   SQLStdHiveAccessControllerForTest(HiveMetastoreClientFactory metastoreClientFactory, HiveConf conf,
       HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) throws HiveAuthzPluginException {

Modified: hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java (original)
+++ hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java Thu Sep  4 02:49:46 2014
@@ -38,7 +38,7 @@ public class SQLStdHiveAuthorizationVali
 
   public SQLStdHiveAuthorizationValidatorForTest(HiveMetastoreClientFactory metastoreClientFactory,
       HiveConf conf, HiveAuthenticationProvider authenticator,
-      SQLStdHiveAccessController privController) {
+      SQLStdHiveAccessControllerWrapper privController) {
     super(metastoreClientFactory, conf, authenticator, privController);
   }
 

Modified: hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactoryForTest.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactoryForTest.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactoryForTest.java (original)
+++ hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactoryForTest.java Thu Sep  4 02:49:46 2014
@@ -32,7 +32,7 @@ public class SQLStdHiveAuthorizerFactory
   @Override
   public HiveAuthorizer createHiveAuthorizer(HiveMetastoreClientFactory metastoreClientFactory,
       HiveConf conf, HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) throws HiveAuthzPluginException {
-    SQLStdHiveAccessController privilegeManager =
+    SQLStdHiveAccessControllerWrapper privilegeManager =
         new SQLStdHiveAccessControllerForTest(metastoreClientFactory, conf, authenticator, ctx);
     return new HiveAuthorizerImpl(
         privilegeManager,

Modified: hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java (original)
+++ hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java Thu Sep  4 02:49:46 2014
@@ -160,7 +160,7 @@ public abstract class HiveBaseResultSet 
   }
 
   public InputStream getBinaryStream(String columnName) throws SQLException {
-	return getBinaryStream(findColumn(columnName));
+    return getBinaryStream(findColumn(columnName));
   }
 
   public Blob getBlob(int i) throws SQLException {

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Thu Sep  4 02:49:46 2014
@@ -42,6 +42,7 @@ import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.Set;
 import java.util.Timer;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
@@ -445,7 +446,7 @@ public class HiveMetaStore extends Thrif
         partitionValidationPattern = null;
       }
 
-      long cleanFreq = hiveConf.getLongVar(ConfVars.METASTORE_EVENT_CLEAN_FREQ) * 1000L;
+      long cleanFreq = hiveConf.getTimeVar(ConfVars.METASTORE_EVENT_CLEAN_FREQ, TimeUnit.MILLISECONDS);
       if (cleanFreq > 0) {
         // In default config, there is no timer.
         Timer cleaner = new Timer("Metastore Events Cleaner Thread", true);
@@ -3719,7 +3720,7 @@ public class HiveMetaStore extends Thrif
       } finally {
         endFunction("write_partition_column_statistics: ", ret != false, null, tableName);
       }
-    }
+    } 
 
     @Override
     public boolean delete_partition_column_statistics(String dbName, String tableName,
@@ -5044,17 +5045,17 @@ public class HiveMetaStore extends Thrif
 
     @Override
     public boolean set_aggr_stats_for(SetPartitionsStatsRequest request)
-        throws NoSuchObjectException, InvalidObjectException, MetaException,
-        InvalidInputException, TException {
+        throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException,
+        TException {
       boolean ret = true;
       for (ColumnStatistics colStats : request.getColStats()) {
         ret = ret && update_partition_column_statistics(colStats);
       }
       return ret;
     }
-
   }
 
+  
   public static IHMSHandler newHMSHandler(String name, HiveConf hiveConf) throws MetaException {
     return newHMSHandler(name, hiveConf, false);
   }

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Thu Sep  4 02:49:46 2014
@@ -39,6 +39,7 @@ import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
+import java.util.concurrent.TimeUnit;
 
 import javax.security.auth.login.LoginException;
 
@@ -151,7 +152,7 @@ public class HiveMetaStoreClient impleme
 
   // for thrift connects
   private int retries = 5;
-  private int retryDelaySeconds = 0;
+  private long retryDelaySeconds = 0;
 
   static final protected Log LOG = LogFactory.getLog("hive.metastore");
 
@@ -182,7 +183,8 @@ public class HiveMetaStoreClient impleme
 
     // get the number retries
     retries = HiveConf.getIntVar(conf, HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES);
-    retryDelaySeconds = conf.getIntVar(ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY);
+    retryDelaySeconds = conf.getTimeVar(
+        ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS);
 
     // user wants file store based configuration
     if (conf.getVar(HiveConf.ConfVars.METASTOREURIS) != null) {
@@ -317,13 +319,14 @@ public class HiveMetaStoreClient impleme
     HadoopShims shim = ShimLoader.getHadoopShims();
     boolean useSasl = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL);
     boolean useFramedTransport = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT);
-    int clientSocketTimeout = conf.getIntVar(ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT);
+    int clientSocketTimeout = (int) conf.getTimeVar(
+        ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
 
     for (int attempt = 0; !isConnected && attempt < retries; ++attempt) {
       for (URI store : metastoreUris) {
         LOG.info("Trying to connect to metastore with URI " + store);
         try {
-          transport = new TSocket(store.getHost(), store.getPort(), 1000 * clientSocketTimeout);
+          transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout);
           if (useSasl) {
             // Wrap thrift connection with SASL for secure connection.
             try {

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java Thu Sep  4 02:49:46 2014
@@ -42,6 +42,7 @@ import javax.jdo.datastore.JDOConnection
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.math3.stat.StatUtils;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
@@ -918,11 +919,11 @@ class MetaStoreDirectSql {
     long end = doTrace ? System.nanoTime() : 0;
     timingTrace(doTrace, qText, start, end);
     ForwardQueryResult fqr = (ForwardQueryResult) qResult;
-    List<Integer> colnumbers = new ArrayList<Integer>();
-    colnumbers.addAll(fqr);
-    for (Integer colnumber : colnumbers) {
-      if (colnumber == colNames.size())
+    Iterator<?> iter = fqr.iterator();
+    while (iter.hasNext()) {
+      if (StatObjectConverter.extractSqlLong(iter.next()) == colNames.size()) {
         partsFound++;
+      }
     }
     return partsFound;
   }
@@ -995,15 +996,15 @@ class MetaStoreDirectSql {
       for (Object[] row : list) {
         String colName = (String) row[0];
         String colType = (String) row[1];
-        if ((Integer) row[2] == partNames.size() || (Integer) row[2] < 2) {
-          // Extrapolation is not needed for this column if
-          // count(\"PARTITION_NAME\")==partNames.size()
-          // Or, extrapolation is not possible for this column if
-          // count(\"PARTITION_NAME\")<2
+        // Extrapolation is not needed for this column if
+        // count(\"PARTITION_NAME\")==partNames.size()
+        // Or, extrapolation is not possible for this column if
+        // count(\"PARTITION_NAME\")<2
+        Long count = StatObjectConverter.extractSqlLong(row[2]);
+        if (count == partNames.size() || count < 2) {
           noExtraColumnNames.add(colName);
         } else {
-          extraColumnNameTypeParts.put(colName,
-              new String[] { colType, String.valueOf((Integer) row[2]) });
+          extraColumnNameTypeParts.put(colName, new String[] { colType, String.valueOf(count) });
         }
       }
       query.closeAll();
@@ -1090,12 +1091,12 @@ class MetaStoreDirectSql {
             String colStatName = IExtrapolatePartStatus.colStatNames[colStatIndex];
             // if the aggregation type is sum, we do a scale-up
             if (IExtrapolatePartStatus.aggrTypes[colStatIndex] == IExtrapolatePartStatus.AggrType.Sum) {
-              Long val = (Long) sumMap.get(colName).get(colStatIndex);
-              if (val == null) {
+              Object o = sumMap.get(colName).get(colStatIndex);
+              if (o == null) {
                 row[2 + colStatIndex] = null;
               } else {
-                row[2 + colStatIndex] = (Long) (val / sumVal * (partNames
-                    .size()));
+                Long val = StatObjectConverter.extractSqlLong(o);
+                row[2 + colStatIndex] = (Long) (val / sumVal * (partNames.size()));
               }
             } else {
               // if the aggregation type is min/max, we extrapolate from the

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Thu Sep  4 02:49:46 2014
@@ -45,9 +45,11 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
@@ -163,19 +165,25 @@ public class MetaStoreUtils {
     return updateUnpartitionedTableStatsFast(db, tbl, wh, madeDir, false);
   }
 
+  public static boolean updateUnpartitionedTableStatsFast(Database db, Table tbl, Warehouse wh,
+      boolean madeDir, boolean forceRecompute) throws MetaException {
+    return updateUnpartitionedTableStatsFast(tbl,
+        wh.getFileStatusesForUnpartitionedTable(db, tbl), madeDir, forceRecompute);
+  }
+
   /**
    * Updates the numFiles and totalSize parameters for the passed unpartitioned Table by querying
    * the warehouse if the passed Table does not already have values for these parameters.
-   * @param db
    * @param tbl
-   * @param wh
+   * @param fileStatus
    * @param newDir if true, the directory was just created and can be assumed to be empty
    * @param forceRecompute Recompute stats even if the passed Table already has
    * these parameters set
    * @return true if the stats were updated, false otherwise
    */
-  public static boolean updateUnpartitionedTableStatsFast(Database db, Table tbl, Warehouse wh,
-      boolean newDir, boolean forceRecompute) throws MetaException {
+  public static boolean updateUnpartitionedTableStatsFast(Table tbl,
+      FileStatus[] fileStatus, boolean newDir, boolean forceRecompute) throws MetaException {
+
     Map<String,String> params = tbl.getParameters();
     boolean updated = false;
     if (forceRecompute ||
@@ -188,7 +196,6 @@ public class MetaStoreUtils {
         // The table location already exists and may contain data.
         // Let's try to populate those stats that don't require full scan.
         LOG.info("Updating table stats fast for " + tbl.getTableName());
-        FileStatus[] fileStatus = wh.getFileStatusesForUnpartitionedTable(db, tbl);
         populateQuickStats(fileStatus, params);
         LOG.info("Updated size of table " + tbl.getTableName() +" to "+ params.get(StatsSetupConst.TOTAL_SIZE));
         if(!params.containsKey(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK)) {
@@ -1043,11 +1050,17 @@ public class MetaStoreUtils {
 
   public static void startMetaStore(final int port,
       final HadoopThriftAuthBridge bridge) throws Exception {
+    startMetaStore(port, bridge, new HiveConf(HMSHandler.class));
+  }
+
+  public static void startMetaStore(final int port,
+      final HadoopThriftAuthBridge bridge, final HiveConf hiveConf)
+      throws Exception{
     Thread thread = new Thread(new Runnable() {
       @Override
       public void run() {
         try {
-          HiveMetaStore.startMetaStore(port, bridge);
+          HiveMetaStore.startMetaStore(port, bridge, hiveConf);
         } catch (Throwable e) {
           LOG.error("Metastore Thrift Server threw an exception...",e);
         }
@@ -1057,6 +1070,7 @@ public class MetaStoreUtils {
     thread.start();
     loopUntilHMSReady(port);
   }
+
   /**
    * A simple connect test to make sure that the metastore is up
    * @throws Exception

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Thu Sep  4 02:49:46 2014
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.metastore
 
 import static org.apache.commons.lang.StringUtils.join;
 
+import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
@@ -34,6 +35,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
@@ -88,6 +90,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.ResourceUri;
 import org.apache.hadoop.hive.metastore.api.Role;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
@@ -129,6 +132,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator;
 import org.apache.hadoop.hive.metastore.parser.FilterLexer;
 import org.apache.hadoop.hive.metastore.parser.FilterParser;
+import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.thrift.TException;
 import org.datanucleus.store.rdbms.exceptions.MissingTableException;
@@ -157,7 +161,7 @@ public class ObjectStore implements RawS
 
   private static final Map<String, Class> PINCLASSMAP;
   static {
-    Map<String, Class> map = new HashMap();
+    Map<String, Class> map = new HashMap<String, Class>();
     map.put("table", MTable.class);
     map.put("storagedescriptor", MStorageDescriptor.class);
     map.put("serdeinfo", MSerDeInfo.class);
@@ -297,6 +301,16 @@ public class ObjectStore implements RawS
         }
       }
     }
+    // Password may no longer be in the conf, use getPassword()
+    try {
+      String passwd =
+          ShimLoader.getHadoopShims().getPassword(conf, HiveConf.ConfVars.METASTOREPWD.varname);
+      if (passwd != null && !passwd.isEmpty()) {
+        prop.setProperty(HiveConf.ConfVars.METASTOREPWD.varname, passwd);
+      }
+    } catch (IOException err) {
+      throw new RuntimeException("Error getting metastore password: " + err.getMessage(), err);
+    }
 
     if (LOG.isDebugEnabled()) {
       for (Entry<Object, Object> e : prop.entrySet()) {
@@ -1068,14 +1082,14 @@ public class ObjectStore implements RawS
     return keys;
   }
 
-  private SerDeInfo converToSerDeInfo(MSerDeInfo ms) throws MetaException {
+  private SerDeInfo convertToSerDeInfo(MSerDeInfo ms) throws MetaException {
     if (ms == null) {
       throw new MetaException("Invalid SerDeInfo object");
     }
     return new SerDeInfo(ms.getName(), ms.getSerializationLib(), convertMap(ms.getParameters()));
   }
 
-  private MSerDeInfo converToMSerDeInfo(SerDeInfo ms) throws MetaException {
+  private MSerDeInfo convertToMSerDeInfo(SerDeInfo ms) throws MetaException {
     if (ms == null) {
       throw new MetaException("Invalid SerDeInfo object");
     }
@@ -1107,7 +1121,7 @@ public class ObjectStore implements RawS
 
     StorageDescriptor sd = new StorageDescriptor(noFS ? null : convertToFieldSchemas(mFieldSchemas),
         msd.getLocation(), msd.getInputFormat(), msd.getOutputFormat(), msd
-        .isCompressed(), msd.getNumBuckets(), converToSerDeInfo(msd
+        .isCompressed(), msd.getNumBuckets(), convertToSerDeInfo(msd
         .getSerDeInfo()), convertList(msd.getBucketCols()), convertToOrders(msd
         .getSortCols()), convertMap(msd.getParameters()));
     SkewedInfo skewedInfo = new SkewedInfo(convertList(msd.getSkewedColNames()),
@@ -1219,7 +1233,7 @@ public class ObjectStore implements RawS
     }
     return new MStorageDescriptor(mcd, sd
         .getLocation(), sd.getInputFormat(), sd.getOutputFormat(), sd
-        .isCompressed(), sd.getNumBuckets(), converToMSerDeInfo(sd
+        .isCompressed(), sd.getNumBuckets(), convertToMSerDeInfo(sd
         .getSerdeInfo()), sd.getBucketCols(),
         convertToMOrders(sd.getSortCols()), sd.getParameters(),
         (null == sd.getSkewedInfo()) ? null
@@ -2382,7 +2396,7 @@ public class ObjectStore implements RawS
    * Makes a JDO query filter string.
    * Makes a JDO query filter string for tables or partitions.
    * @param dbName Database name.
-   * @param table Table. If null, the query returned is over tables in a database.
+   * @param mtable Table. If null, the query returned is over tables in a database.
    *   If not null, the query returned is over partitions in a table.
    * @param filter The filter from which JDOQL filter will be made.
    * @param params Parameters for the filter. Some parameters may be added here.
@@ -5704,7 +5718,7 @@ public class ObjectStore implements RawS
       pm.makePersistent(mStatsObj);
     }
   }
-
+  
   @Override
   public boolean updateTableColumnStatistics(ColumnStatistics colStats)
     throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
@@ -6166,7 +6180,7 @@ public class ObjectStore implements RawS
     boolean commited = false;
     long delCnt;
     LOG.debug("Begin executing cleanupEvents");
-    Long expiryTime = HiveConf.getLongVar(getConf(), ConfVars.METASTORE_EVENT_EXPIRY_DURATION) * 1000L;
+    Long expiryTime = HiveConf.getTimeVar(getConf(), ConfVars.METASTORE_EVENT_EXPIRY_DURATION, TimeUnit.MILLISECONDS);
     Long curTime = System.currentTimeMillis();
     try {
       openTransaction();

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java Thu Sep  4 02:49:46 2014
@@ -44,6 +44,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
@@ -551,4 +552,5 @@ public interface RawStore extends Config
 
   public AggrStats get_aggr_stats_for(String dbName, String tblName,
     List<String> partNames, List<String> colNames) throws MetaException, NoSuchObjectException;
+  
 }

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java Thu Sep  4 02:49:46 2014
@@ -23,6 +23,7 @@ import java.lang.reflect.InvocationTarge
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
 import java.lang.reflect.UndeclaredThrowableException;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.lang.exception.ExceptionUtils;
 import org.apache.commons.logging.Log;
@@ -80,8 +81,8 @@ public class RetryingHMSHandler implemen
     boolean gotNewConnectUrl = false;
     boolean reloadConf = HiveConf.getBoolVar(hiveConf,
         HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF);
-    int retryInterval = HiveConf.getIntVar(hiveConf,
-        HiveConf.ConfVars.HMSHANDLERINTERVAL);
+    long retryInterval = HiveConf.getTimeVar(hiveConf,
+        HiveConf.ConfVars.HMSHANDLERINTERVAL, TimeUnit.MILLISECONDS);
     int retryLimit = HiveConf.getIntVar(hiveConf,
         HiveConf.ConfVars.HMSHANDLERATTEMPTS);
 

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java Thu Sep  4 02:49:46 2014
@@ -24,6 +24,7 @@ import java.lang.reflect.InvocationTarge
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
 import java.lang.reflect.UndeclaredThrowableException;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -48,18 +49,18 @@ public class RetryingMetaStoreClient imp
 
   private final IMetaStoreClient base;
   private final int retryLimit;
-  private final int retryDelaySeconds;
+  private final long retryDelaySeconds;
 
 
 
   protected RetryingMetaStoreClient(HiveConf hiveConf, HiveMetaHookLoader hookLoader,
       Class<? extends IMetaStoreClient> msClientClass) throws MetaException {
     this.retryLimit = hiveConf.getIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES);
-    this.retryDelaySeconds =
-        hiveConf.getIntVar(HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY);
+    this.retryDelaySeconds = hiveConf.getTimeVar(
+        HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS);
 
     reloginExpiringKeytabUser();
-    this.base = (IMetaStoreClient) MetaStoreUtils.newInstance(msClientClass, new Class[] {
+    this.base = MetaStoreUtils.newInstance(msClientClass, new Class[] {
         HiveConf.class, HiveMetaHookLoader.class}, new Object[] {hiveConf, hookLoader});
   }
 

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java Thu Sep  4 02:49:46 2014
@@ -65,20 +65,20 @@ public class StatObjectConverter {
      if (statsObj.getStatsData().isSetBooleanStats()) {
        BooleanColumnStatsData boolStats = statsObj.getStatsData().getBooleanStats();
        mColStats.setBooleanStats(
-           boolStats.isSetNumTrues() ? boolStats.getNumTrues() : null, 
+           boolStats.isSetNumTrues() ? boolStats.getNumTrues() : null,
            boolStats.isSetNumFalses() ? boolStats.getNumFalses() : null,
            boolStats.isSetNumNulls() ? boolStats.getNumNulls() : null);
      } else if (statsObj.getStatsData().isSetLongStats()) {
        LongColumnStatsData longStats = statsObj.getStatsData().getLongStats();
        mColStats.setLongStats(
-           longStats.isSetNumNulls() ? longStats.getNumNulls() : null, 
+           longStats.isSetNumNulls() ? longStats.getNumNulls() : null,
            longStats.isSetNumDVs() ? longStats.getNumDVs() : null,
            longStats.isSetLowValue() ? longStats.getLowValue() : null,
            longStats.isSetHighValue() ? longStats.getHighValue() : null);
      } else if (statsObj.getStatsData().isSetDoubleStats()) {
        DoubleColumnStatsData doubleStats = statsObj.getStatsData().getDoubleStats();
        mColStats.setDoubleStats(
-           doubleStats.isSetNumNulls() ? doubleStats.getNumNulls() : null, 
+           doubleStats.isSetNumNulls() ? doubleStats.getNumNulls() : null,
            doubleStats.isSetNumDVs() ? doubleStats.getNumDVs() : null,
            doubleStats.isSetLowValue() ? doubleStats.getLowValue() : null,
            doubleStats.isSetHighValue() ? doubleStats.getHighValue() : null);
@@ -87,20 +87,20 @@ public class StatObjectConverter {
        String low = decimalStats.isSetLowValue() ? createJdoDecimalString(decimalStats.getLowValue()) : null;
        String high = decimalStats.isSetHighValue() ? createJdoDecimalString(decimalStats.getHighValue()) : null;
        mColStats.setDecimalStats(
-           decimalStats.isSetNumNulls() ? decimalStats.getNumNulls() : null, 
-           decimalStats.isSetNumDVs() ? decimalStats.getNumDVs() : null, 
+           decimalStats.isSetNumNulls() ? decimalStats.getNumNulls() : null,
+           decimalStats.isSetNumDVs() ? decimalStats.getNumDVs() : null,
                low, high);
      } else if (statsObj.getStatsData().isSetStringStats()) {
        StringColumnStatsData stringStats = statsObj.getStatsData().getStringStats();
        mColStats.setStringStats(
-           stringStats.isSetNumNulls() ? stringStats.getNumNulls() : null, 
+           stringStats.isSetNumNulls() ? stringStats.getNumNulls() : null,
            stringStats.isSetNumDVs() ? stringStats.getNumDVs() : null,
-           stringStats.isSetMaxColLen() ? stringStats.getMaxColLen() : null, 
+           stringStats.isSetMaxColLen() ? stringStats.getMaxColLen() : null,
            stringStats.isSetAvgColLen() ? stringStats.getAvgColLen() : null);
      } else if (statsObj.getStatsData().isSetBinaryStats()) {
        BinaryColumnStatsData binaryStats = statsObj.getStatsData().getBinaryStats();
        mColStats.setBinaryStats(
-           binaryStats.isSetNumNulls() ? binaryStats.getNumNulls() : null, 
+           binaryStats.isSetNumNulls() ? binaryStats.getNumNulls() : null,
            binaryStats.isSetMaxColLen() ? binaryStats.getMaxColLen() : null,
            binaryStats.isSetAvgColLen() ? binaryStats.getAvgColLen() : null);
      }
@@ -109,9 +109,9 @@ public class StatObjectConverter {
 
   public static void setFieldsIntoOldStats(
       MTableColumnStatistics mStatsObj, MTableColumnStatistics oldStatsObj) {
-	  if (mStatsObj.getAvgColLen() != null) {
-	    oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen());
-	  }
+    if (mStatsObj.getAvgColLen() != null) {
+      oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen());
+    }
     if (mStatsObj.getLongHighValue() != null) {
       oldStatsObj.setLongHighValue(mStatsObj.getLongHighValue());
     }
@@ -131,19 +131,19 @@ public class StatObjectConverter {
       oldStatsObj.setDecimalHighValue(mStatsObj.getDecimalHighValue());
     }
     if (mStatsObj.getMaxColLen() != null) {
-  	  oldStatsObj.setMaxColLen(mStatsObj.getMaxColLen());
+      oldStatsObj.setMaxColLen(mStatsObj.getMaxColLen());
     }
     if (mStatsObj.getNumDVs() != null) {
-  	  oldStatsObj.setNumDVs(mStatsObj.getNumDVs());
+      oldStatsObj.setNumDVs(mStatsObj.getNumDVs());
     }
     if (mStatsObj.getNumFalses() != null) {
-  	  oldStatsObj.setNumFalses(mStatsObj.getNumFalses());
+      oldStatsObj.setNumFalses(mStatsObj.getNumFalses());
     }
     if (mStatsObj.getNumTrues() != null) {
-  	  oldStatsObj.setNumTrues(mStatsObj.getNumTrues());
+      oldStatsObj.setNumTrues(mStatsObj.getNumTrues());
     }
     if (mStatsObj.getNumNulls() != null) {
-  	  oldStatsObj.setNumNulls(mStatsObj.getNumNulls());
+      oldStatsObj.setNumNulls(mStatsObj.getNumNulls());
     }
     oldStatsObj.setLastAnalyzed(mStatsObj.getLastAnalyzed());
   }
@@ -152,13 +152,13 @@ public class StatObjectConverter {
       MPartitionColumnStatistics mStatsObj, MPartitionColumnStatistics oldStatsObj) {
     if (mStatsObj.getAvgColLen() != null) {
           oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen());
-	  }
+    }
     if (mStatsObj.getLongHighValue() != null) {
-		  oldStatsObj.setLongHighValue(mStatsObj.getLongHighValue());
-	  }
-	  if (mStatsObj.getDoubleHighValue() != null) {
-		  oldStatsObj.setDoubleHighValue(mStatsObj.getDoubleHighValue());
-	  }
+      oldStatsObj.setLongHighValue(mStatsObj.getLongHighValue());
+    }
+    if (mStatsObj.getDoubleHighValue() != null) {
+      oldStatsObj.setDoubleHighValue(mStatsObj.getDoubleHighValue());
+    }
     oldStatsObj.setLastAnalyzed(mStatsObj.getLastAnalyzed());
     if (mStatsObj.getLongLowValue() != null) {
       oldStatsObj.setLongLowValue(mStatsObj.getLongLowValue());
@@ -292,20 +292,20 @@ public class StatObjectConverter {
     if (statsObj.getStatsData().isSetBooleanStats()) {
       BooleanColumnStatsData boolStats = statsObj.getStatsData().getBooleanStats();
       mColStats.setBooleanStats(
-          boolStats.isSetNumTrues() ? boolStats.getNumTrues() : null, 
+          boolStats.isSetNumTrues() ? boolStats.getNumTrues() : null,
           boolStats.isSetNumFalses() ? boolStats.getNumFalses() : null,
           boolStats.isSetNumNulls() ? boolStats.getNumNulls() : null);
     } else if (statsObj.getStatsData().isSetLongStats()) {
       LongColumnStatsData longStats = statsObj.getStatsData().getLongStats();
       mColStats.setLongStats(
-          longStats.isSetNumNulls() ? longStats.getNumNulls() : null, 
+          longStats.isSetNumNulls() ? longStats.getNumNulls() : null,
           longStats.isSetNumDVs() ? longStats.getNumDVs() : null,
           longStats.isSetLowValue() ? longStats.getLowValue() : null,
           longStats.isSetHighValue() ? longStats.getHighValue() : null);
     } else if (statsObj.getStatsData().isSetDoubleStats()) {
       DoubleColumnStatsData doubleStats = statsObj.getStatsData().getDoubleStats();
       mColStats.setDoubleStats(
-          doubleStats.isSetNumNulls() ? doubleStats.getNumNulls() : null, 
+          doubleStats.isSetNumNulls() ? doubleStats.getNumNulls() : null,
           doubleStats.isSetNumDVs() ? doubleStats.getNumDVs() : null,
           doubleStats.isSetLowValue() ? doubleStats.getLowValue() : null,
           doubleStats.isSetHighValue() ? doubleStats.getHighValue() : null);
@@ -314,20 +314,20 @@ public class StatObjectConverter {
       String low = decimalStats.isSetLowValue() ? createJdoDecimalString(decimalStats.getLowValue()) : null;
       String high = decimalStats.isSetHighValue() ? createJdoDecimalString(decimalStats.getHighValue()) : null;
       mColStats.setDecimalStats(
-          decimalStats.isSetNumNulls() ? decimalStats.getNumNulls() : null, 
-          decimalStats.isSetNumDVs() ? decimalStats.getNumDVs() : null, 
+          decimalStats.isSetNumNulls() ? decimalStats.getNumNulls() : null,
+          decimalStats.isSetNumDVs() ? decimalStats.getNumDVs() : null,
               low, high);
     } else if (statsObj.getStatsData().isSetStringStats()) {
       StringColumnStatsData stringStats = statsObj.getStatsData().getStringStats();
       mColStats.setStringStats(
-          stringStats.isSetNumNulls() ? stringStats.getNumNulls() : null, 
+          stringStats.isSetNumNulls() ? stringStats.getNumNulls() : null,
           stringStats.isSetNumDVs() ? stringStats.getNumDVs() : null,
-          stringStats.isSetMaxColLen() ? stringStats.getMaxColLen() : null, 
+          stringStats.isSetMaxColLen() ? stringStats.getMaxColLen() : null,
           stringStats.isSetAvgColLen() ? stringStats.getAvgColLen() : null);
     } else if (statsObj.getStatsData().isSetBinaryStats()) {
       BinaryColumnStatsData binaryStats = statsObj.getStatsData().getBinaryStats();
       mColStats.setBinaryStats(
-          binaryStats.isSetNumNulls() ? binaryStats.getNumNulls() : null, 
+          binaryStats.isSetNumNulls() ? binaryStats.getNumNulls() : null,
           binaryStats.isSetMaxColLen() ? binaryStats.getMaxColLen() : null,
           binaryStats.isSetAvgColLen() ? binaryStats.getAvgColLen() : null);
     }

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java Thu Sep  4 02:49:46 2014
@@ -40,6 +40,7 @@ import javax.sql.DataSource;
 import java.io.IOException;
 import java.sql.*;
 import java.util.*;
+import java.util.concurrent.TimeUnit;
 
 /**
  * A handler to answer transaction related calls that come into the metastore
@@ -119,7 +120,7 @@ public class TxnHandler {
       throw new RuntimeException(e);
     }
 
-    timeout = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT) * 1000;
+    timeout = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS);
     deadlockCnt = 0;
     buildJumpTable();
   }

Modified: hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java (original)
+++ hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java Thu Sep  4 02:49:46 2014
@@ -43,6 +43,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
@@ -719,5 +720,4 @@ public class DummyRawStoreControlledComm
     return null;
   }
 
-
 }

Modified: hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java (original)
+++ hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java Thu Sep  4 02:49:46 2014
@@ -44,6 +44,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
@@ -735,6 +736,7 @@ public class DummyRawStoreForJdoConnecti
       throws MetaException {
     return null;
   }
+  
 }
 
 

Modified: hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java?rev=1622396&r1=1622395&r2=1622396&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java (original)
+++ hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java Thu Sep  4 02:49:46 2014
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hive.metastore.txn;
 
-import junit.framework.Assert;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -26,11 +25,11 @@ import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.junit.After;
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 import static junit.framework.Assert.*;
 
@@ -868,7 +867,7 @@ public class TestTxnHandler {
 
   @Test
   public void testHeartbeatLock() throws Exception {
-    conf.setIntVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 1);
+    conf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 1, TimeUnit.SECONDS);
     HeartbeatRequest h = new HeartbeatRequest();
     LockComponent comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
     comp.setTablename("mytable");



Mime
View raw message