incubator-hcatalog-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From tof...@apache.org
Subject svn commit: r1373130 - in /incubator/hcatalog/trunk: ./ hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/ ivy/ src/java/org/apache/hcatalog/common/ src/java/org/apache/hcatalog/mapreduce/ src/test/org/apache/hcatalog/common/ webhcat/java-clie...
Date Tue, 14 Aug 2012 21:51:25 GMT
Author: toffer
Date: Tue Aug 14 21:51:25 2012
New Revision: 1373130

URL: http://svn.apache.org/viewvc?rev=1373130&view=rev
Log:
HCATALOG-370 Create a HiveMetaStoreClient cache in hcatalog (amalakar via fcliu)

Added:
    incubator/hcatalog/trunk/src/java/org/apache/hcatalog/common/HiveClientCache.java
    incubator/hcatalog/trunk/src/test/org/apache/hcatalog/common/TestHiveClientCache.java
Modified:
    incubator/hcatalog/trunk/CHANGES.txt
    incubator/hcatalog/trunk/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/PigHCatUtil.java
    incubator/hcatalog/trunk/ivy.xml
    incubator/hcatalog/trunk/ivy/libraries.properties
    incubator/hcatalog/trunk/src/java/org/apache/hcatalog/common/HCatConstants.java
    incubator/hcatalog/trunk/src/java/org/apache/hcatalog/common/HCatUtil.java
    incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/DefaultOutputCommitterContainer.java
    incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java
    incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileOutputFormatContainer.java
    incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/HCatOutputFormat.java
    incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/InitializeInput.java
    incubator/hcatalog/trunk/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatClientHMSImpl.java
    incubator/hcatalog/trunk/webhcat/java-client/src/test/java/org/apache/hcatalog/api/TestHCatClient.java

Modified: incubator/hcatalog/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/CHANGES.txt?rev=1373130&r1=1373129&r2=1373130&view=diff
==============================================================================
--- incubator/hcatalog/trunk/CHANGES.txt (original)
+++ incubator/hcatalog/trunk/CHANGES.txt Tue Aug 14 21:51:25 2012
@@ -23,6 +23,8 @@ Trunk (unreleased changes)
   INCOMPATIBLE CHANGES
 
   NEW FEATURES
+  HCAT-370 Create a HiveMetaStoreClient cache in hcatalog (amalakar via fcliu) 
+
   HCAT-419 Java APIs for HCatalog DDL commands (avandana via fcliu)
 
   HCAT-182 Web services interface for HCatalog access and Pig, Hive, and MR job execution
(ctdean, Rachel Gollub, and thejas via gates)

Modified: incubator/hcatalog/trunk/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/PigHCatUtil.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/PigHCatUtil.java?rev=1373130&r1=1373129&r2=1373130&view=diff
==============================================================================
--- incubator/hcatalog/trunk/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/PigHCatUtil.java
(original)
+++ incubator/hcatalog/trunk/hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/PigHCatUtil.java
Tue Aug 14 21:51:25 2012
@@ -89,8 +89,8 @@ public class PigHCatUtil {
     return job.getConfiguration().get(HCatConstants.HCAT_METASTORE_PRINCIPAL);
   }
 
-  private static HiveMetaStoreClient createHiveMetaClient(String serverUri,
-      String serverKerberosPrincipal, Class<?> clazz) throws Exception {
+  private static HiveMetaStoreClient getHiveMetaClient(String serverUri,
+                                                       String serverKerberosPrincipal, Class<?>
clazz) throws Exception {
     HiveConf hiveConf = new HiveConf(clazz);
 
     if (serverUri != null){
@@ -104,7 +104,7 @@ public class PigHCatUtil {
     }
 
     try {
-      return new HiveMetaStoreClient(hiveConf,null);
+        return HCatUtil.getHiveClient(hiveConf);
     } catch (Exception e){
       throw new Exception("Could not instantiate a HiveMetaStoreClient connecting to server
uri:["+serverUri+"]",e);
     }
@@ -140,14 +140,14 @@ public class PigHCatUtil {
     Table table = null;
     HiveMetaStoreClient client = null;
     try {
-      client = createHiveMetaClient(hcatServerUri, hcatServerPrincipal, PigHCatUtil.class);
+      client = getHiveMetaClient(hcatServerUri, hcatServerPrincipal, PigHCatUtil.class);
       table = client.getTable(dbName, tableName);
     } catch (NoSuchObjectException nsoe){
       throw new PigException("Table not found : " + nsoe.getMessage(), PIG_EXCEPTION_CODE);
// prettier error messages to frontend
     } catch (Exception e) {
       throw new IOException(e);
     } finally {
-      HCatUtil.closeHiveClientQuietly(client);
+        HCatUtil.closeHiveClientQuietly(client);
     }
     hcatTableCache.put(loc_server, table);
     return table;

Modified: incubator/hcatalog/trunk/ivy.xml
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/ivy.xml?rev=1373130&r1=1373129&r2=1373130&view=diff
==============================================================================
--- incubator/hcatalog/trunk/ivy.xml (original)
+++ incubator/hcatalog/trunk/ivy.xml Tue Aug 14 21:51:25 2012
@@ -59,6 +59,7 @@
       <exclude module="commons-pool"/>
       <exclude org="org.apache.geronimo.specs" module="geronimo-jta_1.1_spec"/>
     </dependency>
+    <dependency org="com.google.guava" name="guava" rev="${guava.version}"/>
 
     <!-- test dependencies -->
     <dependency org="junit" name="junit" rev="${junit.version}" conf="test->default"/>

Modified: incubator/hcatalog/trunk/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/ivy/libraries.properties?rev=1373130&r1=1373129&r2=1373130&view=diff
==============================================================================
--- incubator/hcatalog/trunk/ivy/libraries.properties (original)
+++ incubator/hcatalog/trunk/ivy/libraries.properties Tue Aug 14 21:51:25 2012
@@ -34,7 +34,7 @@ datanucleus-enhancer.version=2.0.3
 datanucleus-rdbms.version=2.0.3
 derby.version=10.4.2.0
 fb303.version=0.7.0
-guava.version=11.0
+guava.version=11.0.2
 hadoop.jars.version=1.0.3
 hbase.version=0.92.0
 high-scale-lib.version=1.1.1

Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/common/HCatConstants.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/common/HCatConstants.java?rev=1373130&r1=1373129&r2=1373130&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/common/HCatConstants.java (original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/common/HCatConstants.java Tue Aug
14 21:51:25 2012
@@ -50,7 +50,10 @@ public final class HCatConstants {
 
   public static final String HCAT_KEY_JOB_INFO =  HCAT_KEY_BASE + ".job.info";
 
-  private HCatConstants() { // restrict instantiation
+  // hcatalog specific configurations, that can be put in hive-site.xml
+  public static final String HCAT_HIVE_CLIENT_EXPIRY_TIME = "hcatalog.hive.client.cache.expiry.time";
+
+    private HCatConstants() { // restrict instantiation
   }
 
   public static final String HCAT_TABLE_SCHEMA = "hcat.table.schema";

Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/common/HCatUtil.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/common/HCatUtil.java?rev=1373130&r1=1373129&r2=1373130&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/common/HCatUtil.java (original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/common/HCatUtil.java Tue Aug 14
21:51:25 2012
@@ -68,9 +68,13 @@ import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import javax.security.auth.login.LoginException;
+
 public class HCatUtil {
 
     private static final Logger LOG = LoggerFactory.getLogger(HCatUtil.class);
+    private static volatile HiveClientCache hiveClientCache;
+    private final static int DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS = 2*60;
 
     public static boolean checkJobContextIfRunningFromBackend(JobContext j) {
         if (j.getConfiguration().get("mapred.task.id", "").equals("")) {
@@ -514,9 +518,32 @@ public class HCatUtil {
         }
     }
 
-    public static HiveMetaStoreClient createHiveClient(HiveConf hiveConf)
-            throws MetaException {
-        return new HiveMetaStoreClient(hiveConf);
+    /**
+     * Get or create a hive client depending on whether it exits in cache or not
+     * @param hiveConf The hive configuration
+     * @return the client
+     * @throws MetaException When HiveMetaStoreClient couldn't be created
+     * @throws IOException
+     */
+    public static HiveMetaStoreClient getHiveClient(HiveConf hiveConf)
+            throws MetaException, IOException {
+
+        // Singleton behaviour: create the cache instance if required. The cache needs to
be created lazily and
+        // using the expiry time available in hiveConf.
+
+        if(hiveClientCache == null ) {
+            synchronized (HiveMetaStoreClient.class) {
+                if(hiveClientCache == null) {
+                    hiveClientCache = new HiveClientCache(hiveConf.getInt(HCatConstants.HCAT_HIVE_CLIENT_EXPIRY_TIME,
+                            DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS));
+                }
+            }
+        }
+        try {
+            return hiveClientCache.get(hiveConf);
+        } catch (LoginException e) {
+            throw new IOException("Couldn't create hiveMetaStoreClient, Error getting UGI
for user", e);
+        }
     }
 
     public static void closeHiveClientQuietly(HiveMetaStoreClient client) {
@@ -524,11 +551,10 @@ public class HCatUtil {
             if (client != null)
                 client.close();
         } catch (Exception e) {
-            LOG.debug("Error closing metastore client", e);
+            LOG.debug("Error closing metastore client. Ignored the error.", e);
         }
     }
 
-
     public static HiveConf getHiveConf(Configuration conf)
       throws IOException {
 

Added: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/common/HiveClientCache.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/common/HiveClientCache.java?rev=1373130&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/common/HiveClientCache.java (added)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/common/HiveClientCache.java Tue
Aug 14 21:51:25 2012
@@ -0,0 +1,322 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hcatalog.common;
+
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.thrift.TException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.security.auth.login.LoginException;
+import java.io.IOException;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * A thread safe time expired cache for HiveMetaStoreClient
+ */
+class HiveClientCache {
+    final private Cache<HiveClientCacheKey, CacheableHiveMetaStoreClient> hiveCache;
+    private static final Logger LOG = LoggerFactory.getLogger(HiveClientCache.class);
+    private final int timeout;
+
+    private static final AtomicInteger nextId = new AtomicInteger(0);
+
+    // Since HiveMetaStoreClient is not threadsafe, hive clients are not  shared across threads.
+    // Thread local variable containing each thread's unique ID, is used as one of the keys
for the cache
+    // causing each thread to get a different client even if the hiveConf is same.
+    private static final ThreadLocal<Integer> threadId =
+            new ThreadLocal<Integer>() {
+                @Override protected Integer initialValue() {
+                    return nextId.getAndIncrement();
+                }
+            };
+
+    private int getThreadId() {
+        return threadId.get();
+    }
+
+    /**
+     * @param timeout the length of time in seconds after a client is created that it should
be automatically removed
+     */
+     public HiveClientCache(final int timeout) {
+         this.timeout = timeout;
+         RemovalListener<HiveClientCacheKey, CacheableHiveMetaStoreClient> removalListener
=
+             new RemovalListener<HiveClientCacheKey, CacheableHiveMetaStoreClient>()
{
+                 public void onRemoval(RemovalNotification<HiveClientCacheKey, CacheableHiveMetaStoreClient>
notification) {
+                     CacheableHiveMetaStoreClient hiveMetaStoreClient = notification.getValue();
+                     if (hiveMetaStoreClient != null) {
+                         hiveMetaStoreClient.setExpiredFromCache();
+                         hiveMetaStoreClient.tearDownIfUnused();
+                     }
+                 }
+             };
+         hiveCache = CacheBuilder.newBuilder()
+                 .expireAfterWrite(timeout, TimeUnit.SECONDS)
+                 .removalListener(removalListener)
+                 .build();
+
+         // Add a shutdown hook for cleanup, if there are elements remaining in the cache
which were not cleaned up.
+         // This is the best effort approach. Ignore any error while doing so. Notice that
most of the clients
+         // would get cleaned up via either the removalListener or the close() call, only
the active clients
+         // that are in the cache or expired but being used in other threads wont get cleaned.
The following code will only
+         // clean the active cache ones. The ones expired from cache but being hold by other
threads are in the mercy
+         // of finalize() being called.
+         Thread cleanupHiveClientShutdownThread = new Thread() {
+             @Override
+             public void run() {
+                 LOG.info("Cleaning up hive client cache in ShutDown hook");
+                 closeAllClientsQuietly();
+             }
+         };
+         Runtime.getRuntime().addShutdownHook(cleanupHiveClientShutdownThread);
+    }
+
+    /**
+     * Note: This doesn't check if they are being used or not, meant only to be called during
shutdown etc.
+     */
+    void closeAllClientsQuietly() {
+        try {
+            ConcurrentMap<HiveClientCacheKey, CacheableHiveMetaStoreClient> elements
= hiveCache.asMap();
+            for (CacheableHiveMetaStoreClient cacheableHiveMetaStoreClient : elements.values())
{
+                cacheableHiveMetaStoreClient.tearDown();
+            }
+        } catch (Exception e) {
+            LOG.warn("Clean up of hive clients in the cache failed. Ignored", e);
+        }
+    }
+
+    public void cleanup() {
+        hiveCache.cleanUp();
+    }
+
+    /**
+     * Returns a cached client if exists or else creates one, caches and returns it. It also
checks that the client is
+     * healthy and can be reused
+     * @param hiveConf
+     * @return the hive client
+     * @throws MetaException
+     * @throws IOException
+     * @throws LoginException
+     */
+    public HiveMetaStoreClient get(final HiveConf hiveConf) throws MetaException, IOException,
LoginException {
+        final HiveClientCacheKey cacheKey = HiveClientCacheKey.fromHiveConf(hiveConf, getThreadId());
+        CacheableHiveMetaStoreClient hiveMetaStoreClient = getOrCreate(cacheKey);
+        if (!hiveMetaStoreClient.isOpen()) {
+            hiveCache.invalidate(cacheKey);
+            hiveMetaStoreClient = getOrCreate(cacheKey);
+        }
+        hiveMetaStoreClient.acquire();
+        return hiveMetaStoreClient;
+    }
+
+    /**
+     * Return from cache if exists else create/cache and return
+     * @param cacheKey
+     * @return
+     * @throws IOException
+     * @throws MetaException
+     * @throws LoginException
+     */
+    private CacheableHiveMetaStoreClient getOrCreate(final HiveClientCacheKey cacheKey) throws
IOException, MetaException, LoginException {
+        try {
+            return hiveCache.get(cacheKey, new Callable<CacheableHiveMetaStoreClient>()
{
+                @Override
+                public CacheableHiveMetaStoreClient call() throws MetaException {
+                    return new CacheableHiveMetaStoreClient(cacheKey.getHiveConf(), timeout);
+                }
+            });
+        } catch (ExecutionException e) {
+            Throwable t = e.getCause();
+            if (t instanceof IOException) {
+                throw (IOException) t;
+            } else if (t instanceof MetaException) {
+                throw (MetaException) t;
+            } else if (t instanceof LoginException) {
+                throw (LoginException) t;
+            } else {
+                throw new IOException("Error creating hiveMetaStoreClient", t);
+            }
+        }
+    }
+
+    /**
+     * A class to wrap HiveConf and expose equality based only on UserGroupInformation and
the metaStoreURIs.
+     * This becomes the key for the cache and this way the same HiveMetaStoreClient would
be returned if
+     * UserGroupInformation and metaStoreURIs are same. This function can evolve to express
+     * the cases when HiveConf is different but the same hiveMetaStoreClient can be used
+     */
+    public static class HiveClientCacheKey {
+        final private String metaStoreURIs;
+        final private UserGroupInformation ugi;
+        final private HiveConf hiveConf;
+        final private int threadId;
+
+        private HiveClientCacheKey(HiveConf hiveConf, final int threadId) throws IOException,
LoginException {
+            this.metaStoreURIs = hiveConf.getVar(HiveConf.ConfVars.METASTOREURIS);
+            ugi = ShimLoader.getHadoopShims().getUGIForConf(hiveConf);
+            this.hiveConf = hiveConf;
+            this.threadId = threadId;
+        }
+
+        public static HiveClientCacheKey fromHiveConf(HiveConf hiveConf, final int threadId)
throws IOException, LoginException {
+            return new HiveClientCacheKey(hiveConf, threadId);
+        }
+
+        public HiveConf getHiveConf() {
+            return hiveConf;
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            if (this == o) return true;
+            if (o == null || getClass() != o.getClass()) return false;
+            HiveClientCacheKey that = (HiveClientCacheKey) o;
+            return new EqualsBuilder().
+                    append(this.metaStoreURIs,
+                            that.metaStoreURIs).
+                    append(this.ugi, that.ugi).
+                    append(this.threadId, that.threadId).isEquals();
+        }
+
+        @Override
+        public int hashCode() {
+            return new HashCodeBuilder().
+                    append(metaStoreURIs).
+                    append(ugi).
+                    append(threadId).toHashCode();
+        }
+    }
+
+    /**
+     * Add # of current users on HiveMetaStoreClient, so that the client can be cleaned when
no one is using it.
+     */
+    public static class CacheableHiveMetaStoreClient extends HiveMetaStoreClient  {
+        private  AtomicInteger users = new AtomicInteger(0);
+        private volatile boolean expiredFromCache = false;
+        private boolean isClosed = false;
+        private final long expiryTime;
+        private static final int EXPIRY_TIME_EXTENSION_IN_MILLIS = 60*1000;
+
+        public CacheableHiveMetaStoreClient(final HiveConf conf, final int timeout) throws
MetaException {
+            super(conf);
+            // Extend the expiry time with some extra time on top of guava expiry time to
make sure
+            // that items closed() are for sure expired and would never be returned by guava.
+            this.expiryTime = System.currentTimeMillis() + timeout*1000 + EXPIRY_TIME_EXTENSION_IN_MILLIS;
+        }
+
+        private void acquire() {
+            users.incrementAndGet();
+        }
+
+        private void release() {
+            users.decrementAndGet();
+        }
+
+        public void setExpiredFromCache() {
+            expiredFromCache = true;
+        }
+
+        public boolean isClosed() {
+            return isClosed;
+        }
+
+        /**
+         * Make a call to hive meta store and see if the client is still usable. Some calls
where the user provides
+         * invalid data renders the client unusable for future use (example: create a table
with very long table name)
+         * @return
+         */
+        protected boolean isOpen() {
+            try {
+                // Look for an unlikely database name and see if either MetaException or
TException is thrown
+                this.getDatabase("NonExistentDatabaseUsedForHealthCheck");
+            } catch (NoSuchObjectException e) {
+                return true; // It is okay if the database doesn't exist
+            } catch (MetaException e) {
+                return false;
+            } catch (TException e) {
+                return false;
+            }
+            return true;
+        }
+
+        /**
+         * Decrement the user count and piggyback this to set expiry flag as well, then 
teardown(), if conditions are met.
+         * This *MUST* be called by anyone who uses this client.
+         */
+        @Override
+        public void close(){
+            release();
+            if(System.currentTimeMillis() >= expiryTime)
+                setExpiredFromCache();
+            tearDownIfUnused();
+        }
+
+        /**
+         * Tear down only if
+         *  1. There are no active user
+         *  2. It has expired from the cache
+         */
+        private void tearDownIfUnused() {
+            if(users.get() == 0 && expiredFromCache) {
+                this.tearDown();
+            }
+        }
+
+        /**
+         * Close if not closed already
+         */
+        protected synchronized void tearDown() {
+            try {
+                if(!isClosed) {
+                    super.close();
+                }
+                isClosed = true;
+            } catch(Exception e) {
+                LOG.warn("Error closing hive metastore client. Ignored.", e);
+            }
+        }
+
+        /**
+         * Last effort to clean up, may not even get called.
+         * @throws Throwable
+         */
+        @Override
+        protected void finalize() throws Throwable {
+            try {
+                this.tearDown();
+            } finally {
+                super.finalize();
+            }
+        }
+    }
+}

Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/DefaultOutputCommitterContainer.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/DefaultOutputCommitterContainer.java?rev=1373130&r1=1373129&r2=1373130&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/DefaultOutputCommitterContainer.java
(original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/DefaultOutputCommitterContainer.java
Tue Aug 14 21:51:25 2012
@@ -93,7 +93,7 @@ class DefaultOutputCommitterContainer ex
         HiveMetaStoreClient client = null;
         try {
             HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
-            client = HCatUtil.createHiveClient(hiveConf);
+            client = HCatUtil.getHiveClient(hiveConf);
             String tokenStrForm = client.getTokenStrForm();
             if(tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE)
!= null) {
               client.cancelDelegationToken(tokenStrForm);

Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java?rev=1373130&r1=1373129&r2=1373130&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java
(original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java
Tue Aug 14 21:51:25 2012
@@ -158,7 +158,7 @@ class FileOutputCommitterContainer exten
         HiveMetaStoreClient client = null;
         try {
             HiveConf hiveConf = HCatUtil.getHiveConf(jobContext.getConfiguration());
-            client = HCatUtil.createHiveClient(hiveConf);
+            client = HCatUtil.getHiveClient(hiveConf);
             // cancel the deleg. tokens that were acquired for this job now that
             // we are done - we should cancel if the tokens were acquired by
             // HCatOutputFormat and not if they were supplied by Oozie.
@@ -273,7 +273,7 @@ class FileOutputCommitterContainer exten
 
         try {
             HiveConf hiveConf = HCatUtil.getHiveConf(conf);
-            client = HCatUtil.createHiveClient(hiveConf);
+            client = HCatUtil.getHiveClient(hiveConf);
 
             StorerInfo storer = InternalUtil.extractStorerInfo(table.getSd(),table.getParameters());
 

Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileOutputFormatContainer.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileOutputFormatContainer.java?rev=1373130&r1=1373129&r2=1373130&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileOutputFormatContainer.java
(original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileOutputFormatContainer.java
Tue Aug 14 21:51:25 2012
@@ -116,7 +116,7 @@ class FileOutputFormatContainer extends 
         HiveMetaStoreClient client = null;
         try {
             HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
-            client = HCatUtil.createHiveClient(hiveConf);
+            client = HCatUtil.getHiveClient(hiveConf);
             handleDuplicatePublish(context,
                     jobInfo,
                     client,

Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/HCatOutputFormat.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/HCatOutputFormat.java?rev=1373130&r1=1373129&r2=1373130&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/HCatOutputFormat.java
(original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/HCatOutputFormat.java
Tue Aug 14 21:51:25 2012
@@ -72,7 +72,7 @@ public class HCatOutputFormat extends HC
 
         Configuration conf = job.getConfiguration();
         HiveConf hiveConf = HCatUtil.getHiveConf(conf);
-        client = HCatUtil.createHiveClient(hiveConf);
+        client = HCatUtil.getHiveClient(hiveConf);
         Table table = client.getTable(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName());
 
         List<String> indexList = client.listIndexNames(outputJobInfo.getDatabaseName(),
outputJobInfo.getTableName(), Short.MAX_VALUE);

Modified: incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/InitializeInput.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/InitializeInput.java?rev=1373130&r1=1373129&r2=1373130&view=diff
==============================================================================
--- incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/InitializeInput.java (original)
+++ incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/InitializeInput.java Tue
Aug 14 21:51:25 2012
@@ -93,7 +93,7 @@ public class InitializeInput {
       } else {
         hiveConf = new HiveConf(HCatInputFormat.class);
       }
-      client = HCatUtil.createHiveClient(hiveConf);
+      client = HCatUtil.getHiveClient(hiveConf);
       Table table = client.getTable(inputJobInfo.getDatabaseName(),
                                     inputJobInfo.getTableName());
 

Added: incubator/hcatalog/trunk/src/test/org/apache/hcatalog/common/TestHiveClientCache.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/test/org/apache/hcatalog/common/TestHiveClientCache.java?rev=1373130&view=auto
==============================================================================
--- incubator/hcatalog/trunk/src/test/org/apache/hcatalog/common/TestHiveClientCache.java
(added)
+++ incubator/hcatalog/trunk/src/test/org/apache/hcatalog/common/TestHiveClientCache.java
Tue Aug 14 21:51:25 2012
@@ -0,0 +1,255 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hcatalog.common;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStore;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hcatalog.NoExitSecurityManager;
+import org.apache.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
+import org.apache.thrift.TException;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import static org.junit.Assert.*;
+
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.security.auth.login.LoginException;
+import java.io.IOException;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+public class TestHiveClientCache {
+
+    private static final Logger LOG = LoggerFactory.getLogger(TestHiveClientCache.class);
+    final HiveConf hiveConf = new HiveConf();
+
+    @BeforeClass
+    public static void setUp() throws Exception {
+    }
+
+    @AfterClass
+    public static void tearDown() throws Exception {
+    }
+
+    @Test
+    public void testCacheHit() throws IOException, MetaException, LoginException {
+
+        HiveClientCache cache = new HiveClientCache(1000);
+        HiveMetaStoreClient  client = cache.get(hiveConf);
+        assertNotNull(client);
+        client.close(); // close shouldn't matter
+
+        // Setting a non important configuration should return the same client only
+        hiveConf.setIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS, 10);
+        HiveMetaStoreClient  client2 = cache.get(hiveConf);
+        assertNotNull(client2);
+        assertEquals(client, client2);
+        client2.close();
+    }
+
+    @Test
+    public void testCacheMiss() throws IOException, MetaException, LoginException {
+        HiveClientCache cache = new HiveClientCache(1000);
+        HiveMetaStoreClient  client = cache.get(hiveConf);
+        assertNotNull(client);
+
+        // Set different uri as it is one of the criteria deciding whether to return the
same client or not
+        hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, " "); // URIs are checked for string
equivalence, even spaces make them different
+        HiveMetaStoreClient  client2 = cache.get(hiveConf);
+        assertNotNull(client2);
+        assertNotSame(client, client2);
+    }
+
+    /**
+     * Check that a new client is returned for the same configuration after the expiry time.
+     * Also verify that the expiry time configuration is honoured
+     */
+    @Test
+    public void testCacheExpiry() throws IOException, MetaException, LoginException, InterruptedException
{
+        HiveClientCache cache = new HiveClientCache(1);
+        HiveClientCache.CacheableHiveMetaStoreClient client = (HiveClientCache.CacheableHiveMetaStoreClient)cache.get(hiveConf);
+        assertNotNull(client);
+
+        Thread.sleep(2500);
+        HiveMetaStoreClient client2 = cache.get(hiveConf);
+        client.close();
+        assertTrue(client.isClosed()); // close() after *expiry time* and *a cache access*
should  have tore down the client
+
+        assertNotNull(client2);
+        assertNotSame(client, client2);
+    }
+
+    /**
+     * Check that a *new* client is created if asked from different threads even with
+     * the same hive configuration
+     * @throws ExecutionException
+     * @throws InterruptedException
+     */
+    @Test
+    public void testMultipleThreadAccess() throws ExecutionException, InterruptedException
{
+        final HiveClientCache cache = new HiveClientCache(1000);
+
+        class GetHiveClient implements Callable<HiveMetaStoreClient> {
+            @Override
+            public HiveMetaStoreClient call() throws IOException, MetaException, LoginException
{
+                return cache.get(hiveConf);
+            }
+        }
+
+        ExecutorService executor = Executors.newFixedThreadPool(2);
+
+        Callable<HiveMetaStoreClient> worker1 = new GetHiveClient();
+        Callable<HiveMetaStoreClient> worker2 = new GetHiveClient();
+        Future<HiveMetaStoreClient> clientFuture1 = executor.submit(worker1);
+        Future<HiveMetaStoreClient> clientFuture2 = executor.submit(worker2);
+        HiveMetaStoreClient client1 = clientFuture1.get();
+        HiveMetaStoreClient client2 = clientFuture2.get();
+        assertNotNull(client1);
+        assertNotNull(client2);
+        assertNotSame(client1, client2);
+    }
+
+    @Test
+    public void testCloseAllClients() throws IOException, MetaException, LoginException {
+        final HiveClientCache cache = new HiveClientCache(1000);
+        HiveClientCache.CacheableHiveMetaStoreClient client1 = (HiveClientCache.CacheableHiveMetaStoreClient)
cache.get(hiveConf);
+        hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, " "); // URIs are checked for string
equivalence, even spaces make them different
+        HiveClientCache.CacheableHiveMetaStoreClient client2 = (HiveClientCache.CacheableHiveMetaStoreClient)
cache.get(hiveConf);
+        cache.closeAllClientsQuietly();
+        assertTrue(client1.isClosed());
+        assertTrue(client2.isClosed());
+    }
+
+    /**
+     * Test that a long table name actually breaks the HMSC. Subsequently check that isOpen()
reflects
+     * and tells if the client is broken
+     */
+    @Test
+    public void testHMSCBreakability() throws IOException, MetaException, LoginException,
TException, AlreadyExistsException,
+        InvalidObjectException, NoSuchObjectException, InterruptedException {
+        // Setup
+        LocalMetaServer metaServer =  new LocalMetaServer();
+        metaServer.start();
+
+        final HiveClientCache cache = new HiveClientCache(1000);
+        HiveClientCache.CacheableHiveMetaStoreClient client =
+                (HiveClientCache.CacheableHiveMetaStoreClient) cache.get(metaServer.getHiveConf());
+
+        assertTrue(client.isOpen());
+
+        final String DB_NAME = "test_db";
+        final String LONG_TABLE_NAME = "long_table_name_" + new BigInteger(200, new Random()).toString(2);
+
+        try {
+            client.dropTable(DB_NAME, LONG_TABLE_NAME);
+        } catch (Exception e) {
+        }
+        try {
+            client.dropDatabase(DB_NAME);
+        } catch (Exception e) {
+        }
+
+        client.createDatabase(new Database(DB_NAME, "", null, null));
+
+        List<FieldSchema> fields = new ArrayList<FieldSchema>();
+        fields.add(new FieldSchema("colname", org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME,
""));
+        Table tbl = new Table();
+        tbl.setDbName(DB_NAME);
+        tbl.setTableName(LONG_TABLE_NAME);
+        StorageDescriptor sd = new StorageDescriptor();
+        sd.setCols(fields);
+        tbl.setSd(sd);
+        sd.setSerdeInfo(new SerDeInfo());
+
+        // Break the client
+        try {
+            client.createTable(tbl);
+            fail("Exception was expected while creating table with long name");
+        } catch (Exception e) {
+        }
+
+        assertFalse(client.isOpen());
+        metaServer.shutDown();
+    }
+
+    private static class LocalMetaServer implements Runnable {
+        public final int MS_PORT = 20101;
+        private final HiveConf hiveConf;
+        private final SecurityManager securityManager;
+        public final static int WAIT_TIME_FOR_BOOTUP = 30000;
+
+        public LocalMetaServer() {
+            securityManager = System.getSecurityManager();
+            System.setSecurityManager(new NoExitSecurityManager());
+            hiveConf = new HiveConf(TestHiveClientCache.class);
+            hiveConf.set("hive.metastore.local", "false");
+            hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:"
+                    + MS_PORT);
+            hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTRETRIES, 3);
+            hiveConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
+                    HCatSemanticAnalyzer.class.getName());
+            hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+            hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+            hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname,
+                    "false");
+            System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
+            System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
+        }
+
+        public void start() throws InterruptedException {
+            Thread thread = new Thread(this);
+            thread.start();
+            Thread.sleep(WAIT_TIME_FOR_BOOTUP); // Wait for the server to bootup
+        }
+
+        @Override
+        public void run() {
+            try {
+                HiveMetaStore.main(new String[]{"-v", "-p", String.valueOf(MS_PORT)});
+            } catch (Throwable t) {
+                LOG.error("Exiting. Got exception from metastore: ", t);
+            }
+        }
+
+        public HiveConf getHiveConf() {
+            return hiveConf;
+        }
+        public void shutDown() {
+            System.setSecurityManager(securityManager);
+        }
+    }
+}

Modified: incubator/hcatalog/trunk/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatClientHMSImpl.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatClientHMSImpl.java?rev=1373130&r1=1373129&r2=1373130&view=diff
==============================================================================
--- incubator/hcatalog/trunk/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatClientHMSImpl.java
(original)
+++ incubator/hcatalog/trunk/webhcat/java-client/src/main/java/org/apache/hcatalog/api/HCatClientHMSImpl.java
Tue Aug 14 21:51:25 2012
@@ -546,7 +546,7 @@ public class HCatClientHMSImpl extends H
         this.config = conf;
         try {
             hiveConfig = HCatUtil.getHiveConf(config);
-            hmsClient = HCatUtil.createHiveClient(hiveConfig);
+            hmsClient = HCatUtil.getHiveClient(hiveConfig);
         } catch (MetaException exp) {
             throw new HCatException("MetaException while creating HMS client",
                     exp);

Modified: incubator/hcatalog/trunk/webhcat/java-client/src/test/java/org/apache/hcatalog/api/TestHCatClient.java
URL: http://svn.apache.org/viewvc/incubator/hcatalog/trunk/webhcat/java-client/src/test/java/org/apache/hcatalog/api/TestHCatClient.java?rev=1373130&r1=1373129&r2=1373130&view=diff
==============================================================================
--- incubator/hcatalog/trunk/webhcat/java-client/src/test/java/org/apache/hcatalog/api/TestHCatClient.java
(original)
+++ incubator/hcatalog/trunk/webhcat/java-client/src/test/java/org/apache/hcatalog/api/TestHCatClient.java
Tue Aug 14 21:51:25 2012
@@ -17,14 +17,13 @@
  */
 package org.apache.hcatalog.api;
 
+import java.math.BigInteger;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStore;
@@ -35,24 +34,24 @@ import org.apache.hadoop.hive.ql.io.RCFi
 import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
 import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hcatalog.common.HCatException;
 import org.apache.hcatalog.data.schema.HCatFieldSchema;
 import org.apache.hcatalog.data.schema.HCatFieldSchema.Type;
-import org.apache.hcatalog.ExitException;
 import org.apache.hcatalog.NoExitSecurityManager;
-import org.junit.Assert;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class TestHCatClient extends TestCase {
+import static org.junit.Assert.assertTrue;
+
+public class TestHCatClient {
     private static final Logger LOG = LoggerFactory.getLogger(TestHCatClient.class);
-    private boolean  isServerRunning = false;
     private static final String msPort  = "20101";
-    private HiveConf  hcatConf;
-    private Thread  t;
-    private SecurityManager securityManager;
+    private static HiveConf  hcatConf;
+    private static SecurityManager securityManager;
 
     private static class RunMS implements Runnable {
 
@@ -66,28 +65,22 @@ public class TestHCatClient extends Test
         }
     }
 
-    @Override
-    protected void tearDown() throws Exception {
+    @AfterClass
+    public static void tearDown() throws Exception {
         LOG.info("Shutting down metastore.");
         System.setSecurityManager(securityManager);
     }
 
-    @Override
-    protected void setUp() throws Exception {
+    @BeforeClass
+    public static void startMetaStoreServer() throws Exception {
 
-        if (isServerRunning) {
-            return;
-        }
-
-        t = new Thread(new RunMS());
+        Thread t = new Thread(new RunMS());
         t.start();
         Thread.sleep(40000);
 
-        isServerRunning = true;
-
         securityManager = System.getSecurityManager();
         System.setSecurityManager(new NoExitSecurityManager());
-        hcatConf = new HiveConf(this.getClass());
+        hcatConf = new HiveConf(TestHCatClient.class);
         hcatConf.set("hive.metastore.local", "false");
         hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:"
                 + msPort);
@@ -102,6 +95,7 @@ public class TestHCatClient extends Test
         System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
     }
 
+    @Test
     public void testBasicDDLCommands() throws Exception {
         String db = "testdb";
         String tableOne = "testTable1";
@@ -161,6 +155,7 @@ public class TestHCatClient extends Test
         client.close();
     }
 
+    @Test
     public void testPartitionsHCatClientImpl() throws Exception {
         HCatClient client = HCatClient.create(new Configuration(hcatConf));
         String dbName = "ptnDB";
@@ -232,6 +227,7 @@ public class TestHCatClient extends Test
         client.close();
     }
 
+    @Test
     public void testDatabaseLocation() throws Exception{
         HCatClient client = HCatClient.create(new Configuration(hcatConf));
         String dbName = "locationDB";
@@ -245,6 +241,7 @@ public class TestHCatClient extends Test
         client.close();
     }
 
+    @Test
     public void testCreateTableLike() throws Exception {
         HCatClient client = HCatClient.create(new Configuration(hcatConf));
         String tableName = "tableone";
@@ -265,6 +262,7 @@ public class TestHCatClient extends Test
         client.close();
     }
 
+    @Test
     public void testRenameTable() throws Exception {
         HCatClient client = HCatClient.create(new Configuration(hcatConf));
         String tableName = "temptable";
@@ -289,14 +287,13 @@ public class TestHCatClient extends Test
         client.close();
     }
 
+    @Test
     public void testTransportFailure() throws Exception {
         HCatClient client = HCatClient.create(new Configuration(hcatConf));
-        String tableName = "Temptable";
         boolean isExceptionCaught = false;
-        Random random = new Random();
-        for (int i = 0; i < 80; i++) {
-            tableName = tableName + random.nextInt(100);
-        }
+        // Table creation with a long table name causes ConnectionFailureException
+        final String tableName = "Temptable" + new BigInteger(200, new Random()).toString(2);
+
         ArrayList<HCatFieldSchema> cols = new ArrayList<HCatFieldSchema>();
         cols.add(new HCatFieldSchema("id", Type.INT, "id columns"));
         cols.add(new HCatFieldSchema("value", Type.STRING, "id columns"));
@@ -320,12 +317,11 @@ public class TestHCatClient extends Test
 
         } finally {
             client.close();
-            if(isExceptionCaught == false){
-                Assert.fail("The expection exception was never thrown.");
-            }
+            assertTrue("The expected exception was never thrown.", isExceptionCaught);
         }
     }
 
+    @Test
     public void testOtherFailure() throws Exception {
         HCatClient client = HCatClient.create(new Configuration(hcatConf));
         String tableName = "Temptable";
@@ -353,9 +349,7 @@ public class TestHCatClient extends Test
             assertTrue(newTable.getTableName().equalsIgnoreCase(newName));
         } finally {
             client.close();
-            if (isExceptionCaught == false) {
-                Assert.fail("The expection exception was never thrown.");
-            }
+            assertTrue("The expected exception was never thrown.", isExceptionCaught);
         }
     }
 }



Mime
View raw message