hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ga...@apache.org
Subject [12/51] [abbrv] hive git commit: HIVE-17980 Moved HiveMetaStoreClient plus a few remaining classes.
Date Tue, 21 Nov 2017 23:56:55 GMT
http://git-wip-us.apache.org/repos/asf/hive/blob/824d701f/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
new file mode 100644
index 0000000..8a073c5
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -0,0 +1,2694 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.net.InetAddress;
+import java.net.URI;
+import java.net.UnknownHostException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NoSuchElementException;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.security.PrivilegedExceptionAction;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.metastore.api.*;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.utils.ObjectPair;
+import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.thrift.TApplicationException;
+import org.apache.thrift.TException;
+import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.transport.TFramedTransport;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Lists;
+
+/**
+ * Hive Metastore Client.
+ * The public implementation of IMetaStoreClient. Methods not inherited from IMetaStoreClient
+ * are not public and can change. Hence this is marked as unstable.
+ * For users who require retry mechanism when the connection between metastore and client is
+ * broken, RetryingMetaStoreClient class should be used.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
+  /**
+   * Capabilities of the current client. If this client talks to a MetaStore server in a manner
+   * implying the usage of some expanded features that require client-side support that this client
+   * doesn't have (e.g. a getting a table of a new type), it will get back failures when the
+   * capability checking is enabled (the default).
+   */
+  public final static ClientCapabilities VERSION = new ClientCapabilities(
+      Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES));
+  // Test capability for tests.
+  public final static ClientCapabilities TEST_VERSION = new ClientCapabilities(
+      Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES, ClientCapability.TEST_CAPABILITY));
+
+  ThriftHiveMetastore.Iface client = null;
+  private TTransport transport = null;
+  private boolean isConnected = false;
+  private URI metastoreUris[];
+  private final HiveMetaHookLoader hookLoader;
+  protected final Configuration conf;  // Keep a copy of HiveConf so if Session conf changes, we may need to get a new HMS client.
+  protected boolean fastpath = false;
+  private String tokenStrForm;
+  private final boolean localMetaStore;
+  private final MetaStoreFilterHook filterHook;
+  private final int fileMetadataBatchSize;
+
+  private Map<String, String> currentMetaVars;
+
+  private static final AtomicInteger connCount = new AtomicInteger(0);
+
+  // for thrift connects
+  private int retries = 5;
+  private long retryDelaySeconds = 0;
+  private final ClientCapabilities version;
+
+  static final protected Logger LOG = LoggerFactory.getLogger(HiveMetaStoreClient.class);
+
+  public HiveMetaStoreClient(Configuration conf) throws MetaException {
+    this(conf, null, true);
+  }
+
+  public HiveMetaStoreClient(Configuration conf, HiveMetaHookLoader hookLoader) throws MetaException {
+    this(conf, hookLoader, true);
+  }
+
+  public HiveMetaStoreClient(Configuration conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded)
+    throws MetaException {
+
+    this.hookLoader = hookLoader;
+    if (conf == null) {
+      conf = MetastoreConf.newMetastoreConf();
+      this.conf = conf;
+    } else {
+      this.conf = new Configuration(conf);
+    }
+    version = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) ? TEST_VERSION : VERSION;
+    filterHook = loadFilterHooks();
+    fileMetadataBatchSize = MetastoreConf.getIntVar(
+        conf, ConfVars.BATCH_RETRIEVE_OBJECTS_MAX);
+
+    String msUri = MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS);
+    localMetaStore = MetastoreConf.isEmbeddedMetaStore(msUri);
+    if (localMetaStore) {
+      if (!allowEmbedded) {
+        throw new MetaException("Embedded metastore is not allowed here. Please configure "
+            + ConfVars.THRIFT_URIS.toString() + "; it is currently set to [" + msUri + "]");
+      }
+      // instantiate the metastore server handler directly instead of connecting
+      // through the network
+      client = HiveMetaStore.newRetryingHMSHandler("hive client", this.conf, true);
+      isConnected = true;
+      snapshotActiveConf();
+      return;
+    }
+
+    // get the number retries
+    retries = MetastoreConf.getIntVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES);
+    retryDelaySeconds = MetastoreConf.getTimeVar(conf,
+        ConfVars.CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS);
+
+    // user wants file store based configuration
+    if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS) != null) {
+      String metastoreUrisString[] = MetastoreConf.getVar(conf,
+          ConfVars.THRIFT_URIS).split(",");
+      metastoreUris = new URI[metastoreUrisString.length];
+      try {
+        int i = 0;
+        for (String s : metastoreUrisString) {
+          URI tmpUri = new URI(s);
+          if (tmpUri.getScheme() == null) {
+            throw new IllegalArgumentException("URI: " + s
+                + " does not have a scheme");
+          }
+          metastoreUris[i++] = new URI(
+              tmpUri.getScheme(),
+              tmpUri.getUserInfo(),
+              HadoopThriftAuthBridge.getBridge().getCanonicalHostName(tmpUri.getHost()),
+              tmpUri.getPort(),
+              tmpUri.getPath(),
+              tmpUri.getQuery(),
+              tmpUri.getFragment()
+          );
+
+        }
+        // make metastore URIS random
+        List<?> uriList = Arrays.asList(metastoreUris);
+        Collections.shuffle(uriList);
+        metastoreUris = (URI[]) uriList.toArray();
+      } catch (IllegalArgumentException e) {
+        throw (e);
+      } catch (Exception e) {
+        MetaStoreUtils.logAndThrowMetaException(e);
+      }
+    } else {
+      LOG.error("NOT getting uris from conf");
+      throw new MetaException("MetaStoreURIs not found in conf file");
+    }
+
+    //If HADOOP_PROXY_USER is set in env or property,
+    //then need to create metastore client that proxies as that user.
+    String HADOOP_PROXY_USER = "HADOOP_PROXY_USER";
+    String proxyUser = System.getenv(HADOOP_PROXY_USER);
+    if (proxyUser == null) {
+      proxyUser = System.getProperty(HADOOP_PROXY_USER);
+    }
+    //if HADOOP_PROXY_USER is set, create DelegationToken using real user
+    if(proxyUser != null) {
+      LOG.info(HADOOP_PROXY_USER + " is set. Using delegation "
+          + "token for HiveMetaStore connection.");
+      try {
+        UserGroupInformation.getLoginUser().getRealUser().doAs(
+            new PrivilegedExceptionAction<Void>() {
+              @Override
+              public Void run() throws Exception {
+                open();
+                return null;
+              }
+            });
+        String delegationTokenPropString = "DelegationTokenForHiveMetaStoreServer";
+        String delegationTokenStr = getDelegationToken(proxyUser, proxyUser);
+        SecurityUtils.setTokenStr(UserGroupInformation.getCurrentUser(), delegationTokenStr,
+            delegationTokenPropString);
+        MetastoreConf.setVar(this.conf, ConfVars.TOKEN_SIGNATURE, delegationTokenPropString);
+        close();
+      } catch (Exception e) {
+        LOG.error("Error while setting delegation token for " + proxyUser, e);
+        if(e instanceof MetaException) {
+          throw (MetaException)e;
+        } else {
+          throw new MetaException(e.getMessage());
+        }
+      }
+    }
+    // finally open the store
+    open();
+  }
+
+  private MetaStoreFilterHook loadFilterHooks() throws IllegalStateException {
+    Class<? extends MetaStoreFilterHook> authProviderClass = MetastoreConf.
+        getClass(conf, ConfVars.FILTER_HOOK, DefaultMetaStoreFilterHookImpl.class,
+            MetaStoreFilterHook.class);
+    String msg = "Unable to create instance of " + authProviderClass.getName() + ": ";
+    try {
+      Constructor<? extends MetaStoreFilterHook> constructor =
+          authProviderClass.getConstructor(Configuration.class);
+      return constructor.newInstance(conf);
+    } catch (NoSuchMethodException | SecurityException | IllegalAccessException | InstantiationException | IllegalArgumentException | InvocationTargetException e) {
+      throw new IllegalStateException(msg + e.getMessage(), e);
+    }
+  }
+
+  /**
+   * Swaps the first element of the metastoreUris array with a random element from the
+   * remainder of the array.
+   */
+  private void promoteRandomMetaStoreURI() {
+    if (metastoreUris.length <= 1) {
+      return;
+    }
+    Random rng = new Random();
+    int index = rng.nextInt(metastoreUris.length - 1) + 1;
+    URI tmp = metastoreUris[0];
+    metastoreUris[0] = metastoreUris[index];
+    metastoreUris[index] = tmp;
+  }
+
+  @VisibleForTesting
+  public TTransport getTTransport() {
+    return transport;
+  }
+
+  @Override
+  public boolean isLocalMetaStore() {
+    return localMetaStore;
+  }
+
+  @Override
+  public boolean isCompatibleWith(Configuration conf) {
+    // Make a copy of currentMetaVars, there is a race condition that
+	// currentMetaVars might be changed during the execution of the method
+    Map<String, String> currentMetaVarsCopy = currentMetaVars;
+    if (currentMetaVarsCopy == null) {
+      return false; // recreate
+    }
+    boolean compatible = true;
+    for (ConfVars oneVar : MetastoreConf.metaVars) {
+      // Since metaVars are all of different types, use string for comparison
+      String oldVar = currentMetaVarsCopy.get(oneVar.getVarname());
+      String newVar = MetastoreConf.getAsString(conf, oneVar);
+      if (oldVar == null ||
+          (oneVar.isCaseSensitive() ? !oldVar.equals(newVar) : !oldVar.equalsIgnoreCase(newVar))) {
+        LOG.info("Mestastore configuration " + oneVar.toString() +
+            " changed from " + oldVar + " to " + newVar);
+        compatible = false;
+      }
+    }
+    return compatible;
+  }
+
+  @Override
+  public void setHiveAddedJars(String addedJars) {
+    MetastoreConf.setVar(conf, ConfVars.ADDED_JARS, addedJars);
+  }
+
+  @Override
+  public void reconnect() throws MetaException {
+    if (localMetaStore) {
+      // For direct DB connections we don't yet support reestablishing connections.
+      throw new MetaException("For direct MetaStore DB connections, we don't support retries" +
+          " at the client level.");
+    } else {
+      close();
+      // Swap the first element of the metastoreUris[] with a random element from the rest
+      // of the array. Rationale being that this method will generally be called when the default
+      // connection has died and the default connection is likely to be the first array element.
+      promoteRandomMetaStoreURI();
+      open();
+    }
+  }
+
+  /**
+   * @param dbname
+   * @param tbl_name
+   * @param new_tbl
+   * @throws InvalidOperationException
+   * @throws MetaException
+   * @throws TException
+   * @see
+   *   org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table(
+   *   java.lang.String, java.lang.String,
+   *   org.apache.hadoop.hive.metastore.api.Table)
+   */
+  @Override
+  public void alter_table(String dbname, String tbl_name, Table new_tbl)
+      throws InvalidOperationException, MetaException, TException {
+    alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null);
+  }
+
+  @Override
+  public void alter_table(String defaultDatabaseName, String tblName, Table table,
+      boolean cascade) throws InvalidOperationException, MetaException, TException {
+    EnvironmentContext environmentContext = new EnvironmentContext();
+    if (cascade) {
+      environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE);
+    }
+    alter_table_with_environmentContext(defaultDatabaseName, tblName, table, environmentContext);
+  }
+
+  @Override
+  public void alter_table_with_environmentContext(String dbname, String tbl_name, Table new_tbl,
+      EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException {
+    client.alter_table_with_environment_context(dbname, tbl_name, new_tbl, envContext);
+  }
+
+  /**
+   * @param dbname
+   * @param name
+   * @param part_vals
+   * @param newPart
+   * @throws InvalidOperationException
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#rename_partition(
+   *      java.lang.String, java.lang.String, java.util.List, org.apache.hadoop.hive.metastore.api.Partition)
+   */
+  @Override
+  public void renamePartition(final String dbname, final String name, final List<String> part_vals, final Partition newPart)
+      throws InvalidOperationException, MetaException, TException {
+    client.rename_partition(dbname, name, part_vals, newPart);
+  }
+
+  private void open() throws MetaException {
+    isConnected = false;
+    TTransportException tte = null;
+    boolean useSSL = MetastoreConf.getBoolVar(conf, ConfVars.USE_SSL);
+    boolean useSasl = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_SASL);
+    boolean useFramedTransport = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_FRAMED_TRANSPORT);
+    boolean useCompactProtocol = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_COMPACT_PROTOCOL);
+    int clientSocketTimeout = (int) MetastoreConf.getTimeVar(conf,
+        ConfVars.CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
+
+    for (int attempt = 0; !isConnected && attempt < retries; ++attempt) {
+      for (URI store : metastoreUris) {
+        LOG.info("Trying to connect to metastore with URI " + store);
+
+        try {
+          if (useSSL) {
+            try {
+              String trustStorePath = MetastoreConf.getVar(conf, ConfVars.SSL_TRUSTSTORE_PATH).trim();
+              if (trustStorePath.isEmpty()) {
+                throw new IllegalArgumentException(ConfVars.SSL_TRUSTSTORE_PATH.toString()
+                    + " Not configured for SSL connection");
+              }
+              String trustStorePassword =
+                  MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.SSL_TRUSTSTORE_PASSWORD);
+
+              // Create an SSL socket and connect
+              transport = SecurityUtils.getSSLSocket(store.getHost(), store.getPort(), clientSocketTimeout,
+                  trustStorePath, trustStorePassword );
+              LOG.info("Opened an SSL connection to metastore, current connections: " + connCount.incrementAndGet());
+            } catch(IOException e) {
+              throw new IllegalArgumentException(e);
+            } catch(TTransportException e) {
+              tte = e;
+              throw new MetaException(e.toString());
+            }
+          } else {
+            transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout);
+          }
+
+          if (useSasl) {
+            // Wrap thrift connection with SASL for secure connection.
+            try {
+              HadoopThriftAuthBridge.Client authBridge =
+                HadoopThriftAuthBridge.getBridge().createClient();
+
+              // check if we should use delegation tokens to authenticate
+              // the call below gets hold of the tokens if they are set up by hadoop
+              // this should happen on the map/reduce tasks if the client added the
+              // tokens into hadoop's credential store in the front end during job
+              // submission.
+              String tokenSig = MetastoreConf.getVar(conf, ConfVars.TOKEN_SIGNATURE);
+              // tokenSig could be null
+              tokenStrForm = SecurityUtils.getTokenStrForm(tokenSig);
+
+              if(tokenStrForm != null) {
+                LOG.info("HMSC::open(): Found delegation token. Creating DIGEST-based thrift connection.");
+                // authenticate using delegation tokens via the "DIGEST" mechanism
+                transport = authBridge.createClientTransport(null, store.getHost(),
+                    "DIGEST", tokenStrForm, transport,
+                        MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
+              } else {
+                LOG.info("HMSC::open(): Could not find delegation token. Creating KERBEROS-based thrift connection.");
+                String principalConfig =
+                    MetastoreConf.getVar(conf, ConfVars.KERBEROS_PRINCIPAL);
+                transport = authBridge.createClientTransport(
+                    principalConfig, store.getHost(), "KERBEROS", null,
+                    transport, MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
+              }
+            } catch (IOException ioe) {
+              LOG.error("Couldn't create client transport", ioe);
+              throw new MetaException(ioe.toString());
+            }
+          } else {
+            if (useFramedTransport) {
+              transport = new TFramedTransport(transport);
+            }
+          }
+
+          final TProtocol protocol;
+          if (useCompactProtocol) {
+            protocol = new TCompactProtocol(transport);
+          } else {
+            protocol = new TBinaryProtocol(transport);
+          }
+          client = new ThriftHiveMetastore.Client(protocol);
+          try {
+            if (!transport.isOpen()) {
+              transport.open();
+              LOG.info("Opened a connection to metastore, current connections: " + connCount.incrementAndGet());
+            }
+            isConnected = true;
+          } catch (TTransportException e) {
+            tte = e;
+            if (LOG.isDebugEnabled()) {
+              LOG.warn("Failed to connect to the MetaStore Server...", e);
+            } else {
+              // Don't print full exception trace if DEBUG is not on.
+              LOG.warn("Failed to connect to the MetaStore Server...");
+            }
+          }
+
+          if (isConnected && !useSasl && MetastoreConf.getBoolVar(conf, ConfVars.EXECUTE_SET_UGI)){
+            // Call set_ugi, only in unsecure mode.
+            try {
+              UserGroupInformation ugi = SecurityUtils.getUGI();
+              client.set_ugi(ugi.getUserName(), Arrays.asList(ugi.getGroupNames()));
+            } catch (LoginException e) {
+              LOG.warn("Failed to do login. set_ugi() is not successful, " +
+                       "Continuing without it.", e);
+            } catch (IOException e) {
+              LOG.warn("Failed to find ugi of client set_ugi() is not successful, " +
+                  "Continuing without it.", e);
+            } catch (TException e) {
+              LOG.warn("set_ugi() not successful, Likely cause: new client talking to old server. "
+                  + "Continuing without it.", e);
+            }
+          }
+        } catch (MetaException e) {
+          LOG.error("Unable to connect to metastore with URI " + store
+                    + " in attempt " + attempt, e);
+        }
+        if (isConnected) {
+          break;
+        }
+      }
+      // Wait before launching the next round of connection retries.
+      if (!isConnected && retryDelaySeconds > 0) {
+        try {
+          LOG.info("Waiting " + retryDelaySeconds + " seconds before next connection attempt.");
+          Thread.sleep(retryDelaySeconds * 1000);
+        } catch (InterruptedException ignore) {}
+      }
+    }
+
+    if (!isConnected) {
+      throw new MetaException("Could not connect to meta store using any of the URIs provided." +
+        " Most recent failure: " + StringUtils.stringifyException(tte));
+    }
+
+    snapshotActiveConf();
+
+    LOG.info("Connected to metastore.");
+  }
+
+  private void snapshotActiveConf() {
+    currentMetaVars = new HashMap<>(MetastoreConf.metaVars.length);
+    for (ConfVars oneVar : MetastoreConf.metaVars) {
+      currentMetaVars.put(oneVar.getVarname(), MetastoreConf.getAsString(conf, oneVar));
+    }
+  }
+
+  @Override
+  public String getTokenStrForm() throws IOException {
+    return tokenStrForm;
+   }
+
+  @Override
+  public void close() {
+    isConnected = false;
+    currentMetaVars = null;
+    try {
+      if (null != client) {
+        client.shutdown();
+      }
+    } catch (TException e) {
+      LOG.debug("Unable to shutdown metastore client. Will try closing transport directly.", e);
+    }
+    // Transport would have got closed via client.shutdown(), so we dont need this, but
+    // just in case, we make this call.
+    if ((transport != null) && transport.isOpen()) {
+      transport.close();
+      LOG.info("Closed a connection to metastore, current connections: " + connCount.decrementAndGet());
+    }
+  }
+
+  @Override
+  public void setMetaConf(String key, String value) throws TException {
+    client.setMetaConf(key, value);
+  }
+
+  @Override
+  public String getMetaConf(String key) throws TException {
+    return client.getMetaConf(key);
+  }
+
+  /**
+   * @param new_part
+   * @return the added partition
+   * @throws InvalidObjectException
+   * @throws AlreadyExistsException
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partition(org.apache.hadoop.hive.metastore.api.Partition)
+   */
+  @Override
+  public Partition add_partition(Partition new_part) throws TException {
+    return add_partition(new_part, null);
+  }
+
+  public Partition add_partition(Partition new_part, EnvironmentContext envContext)
+      throws TException {
+    Partition p = client.add_partition_with_environment_context(new_part, envContext);
+    return fastpath ? p : deepCopy(p);
+  }
+
+  /**
+   * @param new_parts
+   * @throws InvalidObjectException
+   * @throws AlreadyExistsException
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partitions(List)
+   */
+  @Override
+  public int add_partitions(List<Partition> new_parts) throws TException {
+    return client.add_partitions(new_parts);
+  }
+
+  @Override
+  public List<Partition> add_partitions(
+      List<Partition> parts, boolean ifNotExists, boolean needResults) throws TException {
+    if (parts.isEmpty()) {
+      return needResults ? new ArrayList<>() : null;
+    }
+    Partition part = parts.get(0);
+    AddPartitionsRequest req = new AddPartitionsRequest(
+        part.getDbName(), part.getTableName(), parts, ifNotExists);
+    req.setNeedResult(needResults);
+    AddPartitionsResult result = client.add_partitions_req(req);
+    return needResults ? filterHook.filterPartitions(result.getPartitions()) : null;
+  }
+
+  @Override
+  public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TException {
+    return client.add_partitions_pspec(partitionSpec.toPartitionSpec());
+  }
+
+  /**
+   * @param table_name
+   * @param db_name
+   * @param part_vals
+   * @return the appended partition
+   * @throws InvalidObjectException
+   * @throws AlreadyExistsException
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String,
+   *      java.lang.String, java.util.List)
+   */
+  @Override
+  public Partition appendPartition(String db_name, String table_name,
+      List<String> part_vals) throws TException {
+    return appendPartition(db_name, table_name, part_vals, null);
+  }
+
+  public Partition appendPartition(String db_name, String table_name, List<String> part_vals,
+      EnvironmentContext envContext) throws TException {
+    Partition p = client.append_partition_with_environment_context(db_name, table_name,
+        part_vals, envContext);
+    return fastpath ? p : deepCopy(p);
+  }
+
+  @Override
+  public Partition appendPartition(String dbName, String tableName, String partName)
+      throws TException {
+    return appendPartition(dbName, tableName, partName, null);
+  }
+
+  public Partition appendPartition(String dbName, String tableName, String partName,
+      EnvironmentContext envContext) throws TException {
+    Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName,
+        partName, envContext);
+    return fastpath ? p : deepCopy(p);
+  }
+
+  /**
+   * Exchange the partition between two tables
+   * @param partitionSpecs partitions specs of the parent partition to be exchanged
+   * @param destDb the db of the destination table
+   * @param destinationTableName the destination table name
+   * @return new partition after exchanging
+   */
+  @Override
+  public Partition exchange_partition(Map<String, String> partitionSpecs,
+      String sourceDb, String sourceTable, String destDb,
+      String destinationTableName) throws MetaException,
+      NoSuchObjectException, InvalidObjectException, TException {
+    return client.exchange_partition(partitionSpecs, sourceDb, sourceTable,
+        destDb, destinationTableName);
+  }
+
+  /**
+   * Exchange the partitions between two tables
+   * @param partitionSpecs partitions specs of the parent partition to be exchanged
+   * @param destDb the db of the destination table
+   * @param destinationTableName the destination table name
+   * @return new partitions after exchanging
+   */
+  @Override
+  public List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
+      String sourceDb, String sourceTable, String destDb,
+      String destinationTableName) throws MetaException,
+      NoSuchObjectException, InvalidObjectException, TException {
+    return client.exchange_partitions(partitionSpecs, sourceDb, sourceTable,
+        destDb, destinationTableName);
+  }
+
+  @Override
+  public void validatePartitionNameCharacters(List<String> partVals)
+      throws TException, MetaException {
+    client.partition_name_has_valid_characters(partVals, true);
+  }
+
+  /**
+   * Create a new Database
+   * @param db
+   * @throws AlreadyExistsException
+   * @throws InvalidObjectException
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(Database)
+   */
+  @Override
+  public void createDatabase(Database db)
+      throws AlreadyExistsException, InvalidObjectException, MetaException, TException {
+    client.create_database(db);
+  }
+
+  /**
+   * @param tbl
+   * @throws MetaException
+   * @throws NoSuchObjectException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table)
+   */
+  @Override
+  public void createTable(Table tbl) throws AlreadyExistsException,
+      InvalidObjectException, MetaException, NoSuchObjectException, TException {
+    createTable(tbl, null);
+  }
+
+  public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException,
+      InvalidObjectException, MetaException, NoSuchObjectException, TException {
+    HiveMetaHook hook = getHook(tbl);
+    if (hook != null) {
+      hook.preCreateTable(tbl);
+    }
+    boolean success = false;
+    try {
+      // Subclasses can override this step (for example, for temporary tables)
+      create_table_with_environment_context(tbl, envContext);
+      if (hook != null) {
+        hook.commitCreateTable(tbl);
+      }
+      success = true;
+    }
+    finally {
+      if (!success && (hook != null)) {
+        try {
+          hook.rollbackCreateTable(tbl);
+        } catch (Exception e){
+          LOG.error("Create rollback failed with", e);
+        }
+      }
+    }
+  }
+
+  @Override
+  public void createTableWithConstraints(Table tbl,
+    List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+    List<SQLUniqueConstraint> uniqueConstraints,
+    List<SQLNotNullConstraint> notNullConstraints)
+        throws AlreadyExistsException, InvalidObjectException,
+        MetaException, NoSuchObjectException, TException {
+    HiveMetaHook hook = getHook(tbl);
+    if (hook != null) {
+      hook.preCreateTable(tbl);
+    }
+    boolean success = false;
+    try {
+      // Subclasses can override this step (for example, for temporary tables)
+      client.create_table_with_constraints(tbl, primaryKeys, foreignKeys,
+          uniqueConstraints, notNullConstraints);
+      if (hook != null) {
+        hook.commitCreateTable(tbl);
+      }
+      success = true;
+    } finally {
+      if (!success && (hook != null)) {
+        hook.rollbackCreateTable(tbl);
+      }
+    }
+  }
+
+  @Override
+  public void dropConstraint(String dbName, String tableName, String constraintName) throws
+    NoSuchObjectException, MetaException, TException {
+    client.drop_constraint(new DropConstraintRequest(dbName, tableName, constraintName));
+  }
+
+  @Override
+  public void addPrimaryKey(List<SQLPrimaryKey> primaryKeyCols) throws
+    NoSuchObjectException, MetaException, TException {
+    client.add_primary_key(new AddPrimaryKeyRequest(primaryKeyCols));
+  }
+
+  @Override
+  public void addForeignKey(List<SQLForeignKey> foreignKeyCols) throws
+    NoSuchObjectException, MetaException, TException {
+    client.add_foreign_key(new AddForeignKeyRequest(foreignKeyCols));
+  }
+
+  @Override
+  public void addUniqueConstraint(List<SQLUniqueConstraint> uniqueConstraintCols) throws
+    NoSuchObjectException, MetaException, TException {
+    client.add_unique_constraint(new AddUniqueConstraintRequest(uniqueConstraintCols));
+  }
+
+  @Override
+  public void addNotNullConstraint(List<SQLNotNullConstraint> notNullConstraintCols) throws
+    NoSuchObjectException, MetaException, TException {
+    client.add_not_null_constraint(new AddNotNullConstraintRequest(notNullConstraintCols));
+  }
+
+  /**
+   * @param type
+   * @return true or false
+   * @throws AlreadyExistsException
+   * @throws InvalidObjectException
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_type(org.apache.hadoop.hive.metastore.api.Type)
+   */
+  public boolean createType(Type type) throws AlreadyExistsException,
+      InvalidObjectException, MetaException, TException {
+    return client.create_type(type);
+  }
+
+  /**
+   * @param name
+   * @throws NoSuchObjectException
+   * @throws InvalidOperationException
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_database(java.lang.String, boolean, boolean)
+   */
+  @Override
+  public void dropDatabase(String name)
+      throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+    dropDatabase(name, true, false, false);
+  }
+
+  @Override
+  public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb)
+      throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+    dropDatabase(name, deleteData, ignoreUnknownDb, false);
+  }
+
+  @Override
+  public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade)
+      throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+    try {
+      getDatabase(name);
+    } catch (NoSuchObjectException e) {
+      if (!ignoreUnknownDb) {
+        throw e;
+      }
+      return;
+    }
+
+    if (cascade) {
+       List<String> tableList = getAllTables(name);
+       for (String table : tableList) {
+         try {
+           // Subclasses can override this step (for example, for temporary tables)
+           dropTable(name, table, deleteData, true);
+         } catch (UnsupportedOperationException e) {
+           // Ignore Index tables, those will be dropped with parent tables
+         }
+        }
+    }
+    client.drop_database(name, deleteData, cascade);
+  }
+
+  /**
+   * @param tbl_name
+   * @param db_name
+   * @param part_vals
+   * @return true or false
+   * @throws NoSuchObjectException
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String,
+   *      java.lang.String, java.util.List, boolean)
+   */
+  public boolean dropPartition(String db_name, String tbl_name,
+      List<String> part_vals) throws NoSuchObjectException, MetaException,
+      TException {
+    return dropPartition(db_name, tbl_name, part_vals, true, null);
+  }
+
+  public boolean dropPartition(String db_name, String tbl_name, List<String> part_vals,
+      EnvironmentContext env_context) throws NoSuchObjectException, MetaException, TException {
+    return dropPartition(db_name, tbl_name, part_vals, true, env_context);
+  }
+
+  @Override
+  public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData)
+      throws NoSuchObjectException, MetaException, TException {
+    return dropPartition(dbName, tableName, partName, deleteData, null);
+  }
+
+  private static EnvironmentContext getEnvironmentContextWithIfPurgeSet() {
+    Map<String, String> warehouseOptions = new HashMap<>();
+    warehouseOptions.put("ifPurge", "TRUE");
+    return new EnvironmentContext(warehouseOptions);
+  }
+
+  /*
+  public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData, boolean ifPurge)
+      throws NoSuchObjectException, MetaException, TException {
+
+    return dropPartition(dbName, tableName, partName, deleteData,
+                         ifPurge? getEnvironmentContextWithIfPurgeSet() : null);
+  }
+  */
+
+  public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData,
+      EnvironmentContext envContext) throws NoSuchObjectException, MetaException, TException {
+    return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName,
+        deleteData, envContext);
+  }
+
+  /**
+   * @param db_name
+   * @param tbl_name
+   * @param part_vals
+   * @param deleteData
+   *          delete the underlying data or just delete the table in metadata
+   * @return true or false
+   * @throws NoSuchObjectException
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String,
+   *      java.lang.String, java.util.List, boolean)
+   */
+  @Override
+  public boolean dropPartition(String db_name, String tbl_name,
+      List<String> part_vals, boolean deleteData) throws NoSuchObjectException,
+      MetaException, TException {
+    return dropPartition(db_name, tbl_name, part_vals, deleteData, null);
+  }
+
+  @Override
+  public boolean dropPartition(String db_name, String tbl_name,
+      List<String> part_vals, PartitionDropOptions options) throws TException {
+    return dropPartition(db_name, tbl_name, part_vals, options.deleteData,
+                         options.purgeData? getEnvironmentContextWithIfPurgeSet() : null);
+  }
+
+  public boolean dropPartition(String db_name, String tbl_name, List<String> part_vals,
+      boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException,
+      MetaException, TException {
+    return client.drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData,
+        envContext);
+  }
+
+  @Override
+  public List<Partition> dropPartitions(String dbName, String tblName,
+                                        List<ObjectPair<Integer, byte[]>> partExprs, PartitionDropOptions options)
+      throws TException {
+    RequestPartsSpec rps = new RequestPartsSpec();
+    List<DropPartitionsExpr> exprs = new ArrayList<>(partExprs.size());
+    for (ObjectPair<Integer, byte[]> partExpr : partExprs) {
+      DropPartitionsExpr dpe = new DropPartitionsExpr();
+      dpe.setExpr(partExpr.getSecond());
+      dpe.setPartArchiveLevel(partExpr.getFirst());
+      exprs.add(dpe);
+    }
+    rps.setExprs(exprs);
+    DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps);
+    req.setDeleteData(options.deleteData);
+    req.setNeedResult(options.returnResults);
+    req.setIfExists(options.ifExists);
+    if (options.purgeData) {
+      LOG.info("Dropped partitions will be purged!");
+      req.setEnvironmentContext(getEnvironmentContextWithIfPurgeSet());
+    }
+    return client.drop_partitions_req(req).getPartitions();
+  }
+
+  @Override
+  public List<Partition> dropPartitions(String dbName, String tblName,
+      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
+      boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException {
+
+    return dropPartitions(dbName, tblName, partExprs,
+                          PartitionDropOptions.instance()
+                                              .deleteData(deleteData)
+                                              .ifExists(ifExists)
+                                              .returnResults(needResult));
+
+  }
+
+  @Override
+  public List<Partition> dropPartitions(String dbName, String tblName,
+      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
+      boolean ifExists) throws NoSuchObjectException, MetaException, TException {
+    // By default, we need the results from dropPartitions();
+    return dropPartitions(dbName, tblName, partExprs,
+                          PartitionDropOptions.instance()
+                                              .deleteData(deleteData)
+                                              .ifExists(ifExists));
+  }
+
+  /**
+   * {@inheritDoc}
+   * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+   */
+  @Override
+  public void dropTable(String dbname, String name, boolean deleteData,
+      boolean ignoreUnknownTab) throws MetaException, TException,
+      NoSuchObjectException, UnsupportedOperationException {
+    dropTable(dbname, name, deleteData, ignoreUnknownTab, null);
+  }
+
+  /**
+   * Drop the table and choose whether to save the data in the trash.
+   * @param ifPurge completely purge the table (skipping trash) while removing
+   *                data from warehouse
+   * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+   */
+  @Override
+  public void dropTable(String dbname, String name, boolean deleteData,
+      boolean ignoreUnknownTab, boolean ifPurge)
+      throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException {
+    //build new environmentContext with ifPurge;
+    EnvironmentContext envContext = null;
+    if(ifPurge){
+      Map<String, String> warehouseOptions;
+      warehouseOptions = new HashMap<>();
+      warehouseOptions.put("ifPurge", "TRUE");
+      envContext = new EnvironmentContext(warehouseOptions);
+    }
+    dropTable(dbname, name, deleteData, ignoreUnknownTab, envContext);
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  @Deprecated
+  public void dropTable(String tableName, boolean deleteData)
+      throws MetaException, UnknownTableException, TException, NoSuchObjectException {
+    dropTable(DEFAULT_DATABASE_NAME, tableName, deleteData, false, null);
+  }
+
+  /**
+   * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+   */
+  @Override
+  public void dropTable(String dbname, String name)
+      throws NoSuchObjectException, MetaException, TException {
+    dropTable(dbname, name, true, true, null);
+  }
+
+  /**
+   * Drop the table and choose whether to: delete the underlying table data;
+   * throw if the table doesn't exist; save the data in the trash.
+   *
+   * @param dbname
+   * @param name
+   * @param deleteData
+   *          delete the underlying data or just delete the table in metadata
+   * @param ignoreUnknownTab
+   *          don't throw if the requested table doesn't exist
+   * @param envContext
+   *          for communicating with thrift
+   * @throws MetaException
+   *           could not drop table properly
+   * @throws NoSuchObjectException
+   *           the table wasn't found
+   * @throws TException
+   *           a thrift communication error occurred
+   * @throws UnsupportedOperationException
+   *           dropping an index table is not allowed
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String,
+   *      java.lang.String, boolean)
+   */
+  public void dropTable(String dbname, String name, boolean deleteData,
+      boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException,
+      NoSuchObjectException, UnsupportedOperationException {
+    Table tbl;
+    try {
+      tbl = getTable(dbname, name);
+    } catch (NoSuchObjectException e) {
+      if (!ignoreUnknownTab) {
+        throw e;
+      }
+      return;
+    }
+    if (MetaStoreUtils.isIndexTable(tbl)) {
+      throw new UnsupportedOperationException("Cannot drop index tables");
+    }
+    HiveMetaHook hook = getHook(tbl);
+    if (hook != null) {
+      hook.preDropTable(tbl);
+    }
+    boolean success = false;
+    try {
+      drop_table_with_environment_context(dbname, name, deleteData, envContext);
+      if (hook != null) {
+        hook.commitDropTable(tbl, deleteData || (envContext != null && "TRUE".equals(envContext.getProperties().get("ifPurge"))));
+      }
+      success=true;
+    } catch (NoSuchObjectException e) {
+      if (!ignoreUnknownTab) {
+        throw e;
+      }
+    } finally {
+      if (!success && (hook != null)) {
+        hook.rollbackDropTable(tbl);
+      }
+    }
+  }
+
+  /**
+   * Truncate the table/partitions in the DEFAULT database.
+   * @param dbName
+   *          The db to which the table to be truncate belongs to
+   * @param tableName
+   *          The table to truncate
+   * @param partNames
+   *          List of partitions to truncate. NULL will truncate the whole table/all partitions
+   * @throws MetaException
+   * @throws TException
+   *           Could not truncate table properly.
+   */
+  @Override
+  public void truncateTable(String dbName, String tableName, List<String> partNames) throws MetaException, TException {
+    client.truncate_table(dbName, tableName, partNames);
+  }
+
+  /**
+   * Recycles the files recursively from the input path to the cmroot directory either by copying or moving it.
+   *
+   * @param request Inputs for path of the data files to be recycled to cmroot and
+   *                isPurge flag when set to true files which needs to be recycled are not moved to Trash
+   * @return Response which is currently void
+   */
+  @Override
+  public CmRecycleResponse recycleDirToCmPath(CmRecycleRequest request) throws MetaException, TException {
+    return client.cm_recycle(request);
+  }
+
+  /**
+   * @param type
+   * @return true if the type is dropped
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_type(java.lang.String)
+   */
+  public boolean dropType(String type) throws NoSuchObjectException, MetaException, TException {
+    return client.drop_type(type);
+  }
+
+  /**
+   * @param name
+   * @return map of types
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type_all(java.lang.String)
+   */
+  public Map<String, Type> getTypeAll(String name) throws MetaException,
+      TException {
+    Map<String, Type> result = null;
+    Map<String, Type> fromClient = client.get_type_all(name);
+    if (fromClient != null) {
+      result = new LinkedHashMap<>();
+      for (String key : fromClient.keySet()) {
+        result.put(key, deepCopy(fromClient.get(key)));
+      }
+    }
+    return result;
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public List<String> getDatabases(String databasePattern)
+    throws MetaException {
+    try {
+      return filterHook.filterDatabases(client.get_databases(databasePattern));
+    } catch (Exception e) {
+      MetaStoreUtils.logAndThrowMetaException(e);
+    }
+    return null;
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public List<String> getAllDatabases() throws MetaException {
+    try {
+      return filterHook.filterDatabases(client.get_all_databases());
+    } catch (Exception e) {
+      MetaStoreUtils.logAndThrowMetaException(e);
+    }
+    return null;
+  }
+
+  /**
+   * @param tbl_name
+   * @param db_name
+   * @param max_parts
+   * @return list of partitions
+   * @throws NoSuchObjectException
+   * @throws MetaException
+   * @throws TException
+   */
+  @Override
+  public List<Partition> listPartitions(String db_name, String tbl_name,
+      short max_parts) throws NoSuchObjectException, MetaException, TException {
+    List<Partition> parts = client.get_partitions(db_name, tbl_name, max_parts);
+    return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+  }
+
+  @Override
+  public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException {
+    return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs(
+        client.get_partitions_pspec(dbName, tableName, maxParts)));
+  }
+
+  @Override
+  public List<Partition> listPartitions(String db_name, String tbl_name,
+      List<String> part_vals, short max_parts)
+      throws NoSuchObjectException, MetaException, TException {
+    List<Partition> parts = client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts);
+    return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+  }
+
+  @Override
+  public List<Partition> listPartitionsWithAuthInfo(String db_name,
+      String tbl_name, short max_parts, String user_name, List<String> group_names)
+       throws NoSuchObjectException, MetaException, TException {
+    List<Partition> parts = client.get_partitions_with_auth(db_name, tbl_name, max_parts,
+        user_name, group_names);
+    return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts));
+  }
+
+  @Override
+  public List<Partition> listPartitionsWithAuthInfo(String db_name,
+      String tbl_name, List<String> part_vals, short max_parts,
+      String user_name, List<String> group_names) throws NoSuchObjectException,
+      MetaException, TException {
+    List<Partition> parts = client.get_partitions_ps_with_auth(db_name,
+        tbl_name, part_vals, max_parts, user_name, group_names);
+    return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+  }
+
+  /**
+   * Get list of partitions matching specified filter
+   * @param db_name the database name
+   * @param tbl_name the table name
+   * @param filter the filter string,
+   *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+   *    be done only on string partition keys.
+   * @param max_parts the maximum number of partitions to return,
+   *    all partitions are returned if -1 is passed
+   * @return list of partitions
+   * @throws MetaException
+   * @throws NoSuchObjectException
+   * @throws TException
+   */
+  @Override
+  public List<Partition> listPartitionsByFilter(String db_name, String tbl_name,
+      String filter, short max_parts) throws MetaException,
+         NoSuchObjectException, TException {
+    List<Partition> parts = client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts);
+    return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts));
+  }
+
+  @Override
+  public PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name,
+                                                       String filter, int max_parts) throws MetaException,
+         NoSuchObjectException, TException {
+    return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs(
+        client.get_part_specs_by_filter(db_name, tbl_name, filter, max_parts)));
+  }
+
+  @Override
+  public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr,
+      String default_partition_name, short max_parts, List<Partition> result)
+          throws TException {
+    assert result != null;
+    PartitionsByExprRequest req = new PartitionsByExprRequest(
+        db_name, tbl_name, ByteBuffer.wrap(expr));
+    if (default_partition_name != null) {
+      req.setDefaultPartitionName(default_partition_name);
+    }
+    if (max_parts >= 0) {
+      req.setMaxParts(max_parts);
+    }
+    PartitionsByExprResult r;
+    try {
+      r = client.get_partitions_by_expr(req);
+    } catch (TApplicationException te) {
+      // TODO: backward compat for Hive <= 0.12. Can be removed later.
+      if (te.getType() != TApplicationException.UNKNOWN_METHOD
+          && te.getType() != TApplicationException.WRONG_METHOD_NAME) {
+        throw te;
+      }
+      throw new IncompatibleMetastoreException(
+          "Metastore doesn't support listPartitionsByExpr: " + te.getMessage());
+    }
+    if (fastpath) {
+      result.addAll(r.getPartitions());
+    } else {
+      r.setPartitions(filterHook.filterPartitions(r.getPartitions()));
+      // TODO: in these methods, do we really need to deepcopy?
+      deepCopyPartitions(r.getPartitions(), result);
+    }
+    return !r.isSetHasUnknownPartitions() || r.isHasUnknownPartitions(); // Assume the worst.
+  }
+
+  /**
+   * @param name
+   * @return the database
+   * @throws NoSuchObjectException
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_database(java.lang.String)
+   */
+  @Override
+  public Database getDatabase(String name) throws NoSuchObjectException,
+      MetaException, TException {
+    Database d = client.get_database(name);
+    return fastpath ? d :deepCopy(filterHook.filterDatabase(d));
+  }
+
+  /**
+   * @param tbl_name
+   * @param db_name
+   * @param part_vals
+   * @return the partition
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String,
+   *      java.lang.String, java.util.List)
+   */
+  @Override
+  public Partition getPartition(String db_name, String tbl_name,
+      List<String> part_vals) throws NoSuchObjectException, MetaException, TException {
+    Partition p = client.get_partition(db_name, tbl_name, part_vals);
+    return fastpath ? p : deepCopy(filterHook.filterPartition(p));
+  }
+
+  @Override
+  public List<Partition> getPartitionsByNames(String db_name, String tbl_name,
+      List<String> part_names) throws NoSuchObjectException, MetaException, TException {
+    List<Partition> parts = client.get_partitions_by_names(db_name, tbl_name, part_names);
+    return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+  }
+
+  @Override
+  public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request)
+      throws MetaException, TException, NoSuchObjectException {
+    return client.get_partition_values(request);
+  }
+
+  @Override
+  public Partition getPartitionWithAuthInfo(String db_name, String tbl_name,
+      List<String> part_vals, String user_name, List<String> group_names)
+      throws MetaException, UnknownTableException, NoSuchObjectException,
+      TException {
+    Partition p = client.get_partition_with_auth(db_name, tbl_name, part_vals, user_name,
+        group_names);
+    return fastpath ? p : deepCopy(filterHook.filterPartition(p));
+  }
+
+  /**
+   * @param name
+   * @param dbname
+   * @return the table
+   * @throws NoSuchObjectException
+   * @throws MetaException
+   * @throws TException
+   * @throws NoSuchObjectException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_table(java.lang.String,
+   *      java.lang.String)
+   */
+  @Override
+  public Table getTable(String dbname, String name) throws MetaException,
+      TException, NoSuchObjectException {
+    GetTableRequest req = new GetTableRequest(dbname, name);
+    req.setCapabilities(version);
+    Table t = client.get_table_req(req).getTable();
+    return fastpath ? t : deepCopy(filterHook.filterTable(t));
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  @Deprecated
+  public Table getTable(String tableName) throws MetaException, TException,
+      NoSuchObjectException {
+    Table t = getTable(DEFAULT_DATABASE_NAME, tableName);
+    return fastpath ? t : filterHook.filterTable(t);
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
+      throws MetaException, InvalidOperationException, UnknownDBException, TException {
+    GetTablesRequest req = new GetTablesRequest(dbName);
+    req.setTblNames(tableNames);
+    req.setCapabilities(version);
+    List<Table> tabs = client.get_table_objects_by_name_req(req).getTables();
+    return fastpath ? tabs : deepCopyTables(filterHook.filterTables(tabs));
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public List<String> listTableNamesByFilter(String dbName, String filter, short maxTables)
+      throws MetaException, TException, InvalidOperationException, UnknownDBException {
+    return filterHook.filterTableNames(dbName,
+        client.get_table_names_by_filter(dbName, filter, maxTables));
+  }
+
+  /**
+   * @param name
+   * @return the type
+   * @throws MetaException
+   * @throws TException
+   * @throws NoSuchObjectException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type(java.lang.String)
+   */
+  public Type getType(String name) throws NoSuchObjectException, MetaException, TException {
+    return deepCopy(client.get_type(name));
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public List<String> getTables(String dbname, String tablePattern) throws MetaException {
+    try {
+      return filterHook.filterTableNames(dbname, client.get_tables(dbname, tablePattern));
+    } catch (Exception e) {
+      MetaStoreUtils.logAndThrowMetaException(e);
+    }
+    return null;
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public List<String> getTables(String dbname, String tablePattern, TableType tableType) throws MetaException {
+    try {
+      return filterHook.filterTableNames(dbname, client.get_tables_by_type(dbname, tablePattern, tableType.toString()));
+    } catch (Exception e) {
+      MetaStoreUtils.logAndThrowMetaException(e);
+    }
+    return null;
+  }
+
+  @Override
+  public List<TableMeta> getTableMeta(String dbPatterns, String tablePatterns, List<String> tableTypes)
+      throws MetaException {
+    try {
+      return filterNames(client.get_table_meta(dbPatterns, tablePatterns, tableTypes));
+    } catch (Exception e) {
+      MetaStoreUtils.logAndThrowMetaException(e);
+    }
+    return null;
+  }
+
+  private List<TableMeta> filterNames(List<TableMeta> metas) throws MetaException {
+    Map<String, TableMeta> sources = new LinkedHashMap<>();
+    Map<String, List<String>> dbTables = new LinkedHashMap<>();
+    for (TableMeta meta : metas) {
+      sources.put(meta.getDbName() + "." + meta.getTableName(), meta);
+      List<String> tables = dbTables.get(meta.getDbName());
+      if (tables == null) {
+        dbTables.put(meta.getDbName(), tables = new ArrayList<>());
+      }
+      tables.add(meta.getTableName());
+    }
+    List<TableMeta> filtered = new ArrayList<>();
+    for (Map.Entry<String, List<String>> entry : dbTables.entrySet()) {
+      for (String table : filterHook.filterTableNames(entry.getKey(), entry.getValue())) {
+        filtered.add(sources.get(entry.getKey() + "." + table));
+      }
+    }
+    return filtered;
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public List<String> getAllTables(String dbname) throws MetaException {
+    try {
+      return filterHook.filterTableNames(dbname, client.get_all_tables(dbname));
+    } catch (Exception e) {
+      MetaStoreUtils.logAndThrowMetaException(e);
+    }
+    return null;
+  }
+
+  @Override
+  public boolean tableExists(String databaseName, String tableName) throws MetaException,
+      TException, UnknownDBException {
+    try {
+      GetTableRequest req = new GetTableRequest(databaseName, tableName);
+      req.setCapabilities(version);
+      return filterHook.filterTable(client.get_table_req(req).getTable()) != null;
+    } catch (NoSuchObjectException e) {
+      return false;
+    }
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  @Deprecated
+  public boolean tableExists(String tableName) throws MetaException,
+      TException, UnknownDBException {
+    return tableExists(DEFAULT_DATABASE_NAME, tableName);
+  }
+
+  @Override
+  public List<String> listPartitionNames(String dbName, String tblName,
+      short max) throws NoSuchObjectException, MetaException, TException {
+    return filterHook.filterPartitionNames(dbName, tblName,
+        client.get_partition_names(dbName, tblName, max));
+  }
+
+  @Override
+  public List<String> listPartitionNames(String db_name, String tbl_name,
+      List<String> part_vals, short max_parts)
+      throws MetaException, TException, NoSuchObjectException {
+    return filterHook.filterPartitionNames(db_name, tbl_name,
+        client.get_partition_names_ps(db_name, tbl_name, part_vals, max_parts));
+  }
+
+  /**
+   * Get number of partitions matching specified filter
+   * @param db_name the database name
+   * @param tbl_name the table name
+   * @param filter the filter string,
+   *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+   *    be done only on string partition keys.
+   * @return number of partitions
+   * @throws MetaException
+   * @throws NoSuchObjectException
+   * @throws TException
+   */
+  @Override
+  public int getNumPartitionsByFilter(String db_name, String tbl_name,
+                                      String filter) throws MetaException,
+          NoSuchObjectException, TException {
+    return client.get_num_partitions_by_filter(db_name, tbl_name, filter);
+  }
+
+  @Override
+  public void alter_partition(String dbName, String tblName, Partition newPart)
+      throws InvalidOperationException, MetaException, TException {
+    client.alter_partition_with_environment_context(dbName, tblName, newPart, null);
+  }
+
+  @Override
+  public void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext)
+      throws InvalidOperationException, MetaException, TException {
+    client.alter_partition_with_environment_context(dbName, tblName, newPart, environmentContext);
+  }
+
+  @Override
+  public void alter_partitions(String dbName, String tblName, List<Partition> newParts)
+      throws InvalidOperationException, MetaException, TException {
+    client.alter_partitions_with_environment_context(dbName, tblName, newParts, null);
+  }
+
+  @Override
+  public void alter_partitions(String dbName, String tblName, List<Partition> newParts, EnvironmentContext environmentContext)
+  throws InvalidOperationException, MetaException, TException {
+    client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext);
+}
+
+  @Override
+  public void alterDatabase(String dbName, Database db)
+      throws MetaException, NoSuchObjectException, TException {
+    client.alter_database(dbName, db);
+  }
+  /**
+   * @param db
+   * @param tableName
+   * @throws UnknownTableException
+   * @throws UnknownDBException
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_fields(java.lang.String,
+   *      java.lang.String)
+   */
+  @Override
+  public List<FieldSchema> getFields(String db, String tableName)
+      throws MetaException, TException, UnknownTableException,
+      UnknownDBException {
+    List<FieldSchema> fields = client.get_fields(db, tableName);
+    return fastpath ? fields : deepCopyFieldSchemas(fields);
+  }
+
+  /**
+   * create an index
+   * @param index the index object
+   * @param indexTable which stores the index data
+   * @throws InvalidObjectException
+   * @throws MetaException
+   * @throws NoSuchObjectException
+   * @throws TException
+   * @throws AlreadyExistsException
+   */
+  @Override
+  public void createIndex(Index index, Table indexTable) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException {
+    client.add_index(index, indexTable);
+  }
+
+  /**
+   * @param dbname
+   * @param base_tbl_name
+   * @param idx_name
+   * @param new_idx
+   * @throws InvalidOperationException
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_index(java.lang.String,
+   *      java.lang.String, java.lang.String, org.apache.hadoop.hive.metastore.api.Index)
+   */
+  @Override
+  public void alter_index(String dbname, String base_tbl_name, String idx_name, Index new_idx)
+      throws InvalidOperationException, MetaException, TException {
+    client.alter_index(dbname, base_tbl_name, idx_name, new_idx);
+  }
+
+  /**
+   * @param dbName
+   * @param tblName
+   * @param indexName
+   * @return the index
+   * @throws MetaException
+   * @throws UnknownTableException
+   * @throws NoSuchObjectException
+   * @throws TException
+   */
+  @Override
+  public Index getIndex(String dbName, String tblName, String indexName)
+      throws MetaException, UnknownTableException, NoSuchObjectException,
+      TException {
+    return deepCopy(filterHook.filterIndex(client.get_index_by_name(dbName, tblName, indexName)));
+  }
+
+  /**
+   * list indexes of the give base table
+   * @param dbName
+   * @param tblName
+   * @param max
+   * @return the list of indexes
+   * @throws NoSuchObjectException
+   * @throws MetaException
+   * @throws TException
+   */
+  @Override
+  public List<String> listIndexNames(String dbName, String tblName, short max)
+      throws MetaException, TException {
+    return filterHook.filterIndexNames(dbName, tblName, client.get_index_names(dbName, tblName, max));
+  }
+
+  /**
+   * list all the index names of the give base table.
+   *
+   * @param dbName
+   * @param tblName
+   * @param max
+   * @return list of indexes
+   * @throws MetaException
+   * @throws TException
+   */
+  @Override
+  public List<Index> listIndexes(String dbName, String tblName, short max)
+      throws NoSuchObjectException, MetaException, TException {
+    return filterHook.filterIndexes(client.get_indexes(dbName, tblName, max));
+  }
+
+  @Override
+  public List<SQLPrimaryKey> getPrimaryKeys(PrimaryKeysRequest req)
+    throws MetaException, NoSuchObjectException, TException {
+    return client.get_primary_keys(req).getPrimaryKeys();
+  }
+
+  @Override
+  public List<SQLForeignKey> getForeignKeys(ForeignKeysRequest req) throws MetaException,
+    NoSuchObjectException, TException {
+    return client.get_foreign_keys(req).getForeignKeys();
+  }
+
+  @Override
+  public List<SQLUniqueConstraint> getUniqueConstraints(UniqueConstraintsRequest req)
+    throws MetaException, NoSuchObjectException, TException {
+    return client.get_unique_constraints(req).getUniqueConstraints();
+  }
+
+  @Override
+  public List<SQLNotNullConstraint> getNotNullConstraints(NotNullConstraintsRequest req)
+    throws MetaException, NoSuchObjectException, TException {
+    return client.get_not_null_constraints(req).getNotNullConstraints();
+  }
+
+
+  /** {@inheritDoc} */
+  @Override
+  @Deprecated
+  //use setPartitionColumnStatistics instead
+  public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
+    throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+    InvalidInputException{
+    return client.update_table_column_statistics(statsObj);
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  @Deprecated
+  //use setPartitionColumnStatistics instead
+  public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj)
+    throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+    InvalidInputException{
+    return client.update_partition_column_statistics(statsObj);
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request)
+    throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+    InvalidInputException{
+    return client.set_aggr_stats_for(request);
+  }
+
+  @Override
+  public void flushCache() {
+    try {
+      client.flushCache();
+    } catch (TException e) {
+      // Not much we can do about it honestly
+      LOG.warn("Got error flushing the cache", e);
+    }
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName,
+      List<String> colNames) throws NoSuchObjectException, MetaException, TException,
+      InvalidInputException, InvalidObjectException {
+    return client.get_table_statistics_req(
+        new TableStatsRequest(dbName, tableName, colNames)).getTableStats();
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
+      String dbName, String tableName, List<String> partNames, List<String> colNames)
+          throws NoSuchObjectException, MetaException, TException {
+    return client.get_partitions_statistics_req(
+        new PartitionsStatsRequest(dbName, tableName, colNames, partNames)).getPartStats();
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName,
+    String colName) throws NoSuchObjectException, InvalidObjectException, MetaException,
+    TException, InvalidInputException
+  {
+    return client.delete_partition_column_statistics(dbName, tableName, partName, colName);
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName)
+    throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+    InvalidInputException
+  {
+    return client.delete_table_column_statistics(dbName, tableName, colName);
+  }
+
+  /**
+   * @param db
+   * @param tableName
+   * @throws UnknownTableException
+   * @throws UnknownDBException
+   * @throws MetaException
+   * @throws TException
+   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_schema(java.lang.String,
+   *      java.lang.String)
+   */
+  @Override
+  public List<FieldSchema> getSchema(String db, String tableName)
+      throws MetaException, TException, UnknownTableException,
+      UnknownDBException {
+      EnvironmentContext envCxt = null;
+      String addedJars = MetastoreConf.getVar(conf, ConfVars.ADDED_JARS);
+      if(org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) {
+         Map<String, String> props = new HashMap<String, String>();
+         props.put("hive.added.jars.path", addedJars);
+         envCxt = new EnvironmentContext(props);
+       }
+
+    List<FieldSchema> fields = client.get_schema_with_environment_context(db, tableName, envCxt);
+    return fastpath ? fields : deepCopyFieldSchemas(fields);
+  }
+
+  @Override
+  public String getConfigValue(String name, String defaultValue)
+      throws TException, ConfigValSecurityException {
+    return client.get_config_value(name, defaultValue);
+  }
+
+  @Override
+  public Partition getPartition(String db, String tableName, String partName)
+      throws MetaException, TException, UnknownTableException, NoSuchObjectException {
+    Partition p = client.get_partition_by_name(db, tableName, partName);
+    return fastpath ? p : deepCopy(filterHook.filterPartition(p));
+  }
+
+  public Partition appendPartitionByName(String dbName, String tableName, String partName)
+      throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
+    return appendPartitionByName(dbName, tableName, partName, null);
+  }
+
+  public Partition appendPartitionByName(String dbName, String tableName, String partName,
+      EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException,
+      MetaException, TException {
+    Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName,
+        partName, envContext);
+    return fastpath ? p : deepCopy(p);
+  }
+
+  public boolean dropPartitionByName(String dbName, String tableName, String partName,
+      boolean deleteData) throws NoSuchObjectException, MetaException, TException {
+    return dropPartitionByName(dbName, tableName, partName, deleteData, null);
+  }
+
+  public boolean dropPartitionByName(String dbName, String tableName, String partName,
+      boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException,
+      MetaException, TException {
+    return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName,
+        deleteData, envContext);
+  }
+
+  private HiveMetaHook getHook(Table tbl) throws MetaException {
+    if (hookLoader == null) {
+      return null;
+    }
+    return hookLoader.getHook(tbl);
+  }
+
+  @Override
+  public List<String> partitionNameToVals(String name) throws MetaException, TException {
+    return client.partition_name_to_vals(name);
+  }
+
+  @Override
+  public Map<String, String> partitionNameToSpec(String name) throws MetaException, TException{
+    return client.partition_name_to_spec(name);
+  }
+
+  /**
+   * @param partition
+   * @return
+   */
+  private Partition deepCopy(Partition partition) {
+    Partition copy = null;
+    if (partition != null) {
+      copy = new Partition(partition);
+    }
+    return copy;
+  }
+
+  private Database deepCopy(Database database) {
+    Database copy = null;
+    if (database != null) {
+      copy = new Database(database);
+    }
+    return copy;
+  }
+
+  protected Table deepCopy(Table table) {
+    Table copy = null;
+    if (table != null) {
+      copy = new Table(table);
+    }
+    return copy;
+  }
+
+  private Index deepCopy(Index index) {
+    Index copy = null;
+    if (index != null) {
+      copy = new Index(index);
+    }
+    return copy;
+  }
+
+  private Type deepCopy(Type type) {
+    Type copy = null;
+    if (type != null) {
+      copy = new Type(type);
+    }
+    return copy;
+  }
+
+  private FieldSchema deepCopy(FieldSchema schema) {
+    FieldSchema copy = null;
+    if (schema != null) {
+      copy = new FieldSchema(schema);
+    }
+    return copy;
+  }
+
+  private Function deepCopy(Function func) {
+    Function copy = null;
+    if (func != null) {
+      copy = new Function(func);
+    }
+    return copy;
+  }
+
+  protected PrincipalPrivilegeSet deepCopy(PrincipalPrivilegeSet pps) {
+    PrincipalPrivilegeSet copy = null;
+    if (pps != null) {
+      copy = new PrincipalPrivilegeSet(pps);
+    }
+    return copy;
+  }
+
+  private List<Partition> deepCopyPartitions(List<Partition> partitions) {
+    return deepCopyPartitions(partitions, null);
+  }
+
+  private List<Partition> deepCopyPartitions(
+      Collection<Partition> src, List<Partition> dest) {
+    if (src == null) {
+      return dest;
+    }
+    if (dest == null) {
+      dest = new ArrayList<Partition>(src.size());
+    }
+    for (Partition part : src) {
+      dest.add(deepCopy(part));
+    }
+    return dest;
+  }
+
+  private List<Table> deepCopyTables(List<Table> tables) {
+    List<Table> copy = null;
+    if (tables != null) {
+      copy = new ArrayList<Table>();
+      for (Table tab : tables) {
+        copy.add(deepCopy(tab));
+      }
+    }
+    return copy;
+  }
+
+  protected List<FieldSchema> deepCopyFieldSchemas(List<FieldSchema> schemas) {
+    List<FieldSchema> copy = null;
+    if (schemas != null) {
+      copy = new ArrayList<FieldSchema>();
+      for (FieldSchema schema : schemas) {
+        copy.add(deepCopy(schema));
+      }
+    }
+    return copy;
+  }
+
+  @Override
+  public boolean dropIndex(String dbName, String tblName, String name,
+      boolean deleteData) throws NoSuchObjectException, MetaException,
+      TException {
+    return client.drop_index_by_name(dbName, tblName, name, deleteData);
+  }
+
+  @Override
+  public boolean grant_role(String roleName, String userName,
+      PrincipalType principalType, String grantor, PrincipalType grantorType,
+      boolean grantOption) throws MetaException, TException {
+    GrantRevokeRoleRequest req = new GrantRevokeRoleRequest();
+    req.setRequestType(GrantRevokeType.GRANT);
+    req.setRoleName(roleName);
+    req.setPrincipalName(userName);
+    req.setPrincipalType(principalType);
+    req.setGrantor(grantor);
+    req.setGrantorType(grantorType);
+    req.setGrantOption(grantOption);
+    GrantRevokeRoleResponse res = client.grant_revoke_role(req);
+    if (!res.isSetSuccess()) {
+      throw new MetaException("GrantRevokeResponse missing success field");
+    }
+    return res.isSuccess();
+  }
+
+  @Override
+  public boolean create_role(Role role)
+      throws MetaException, TException {
+    return client.create_role(role);
+  }
+
+  @Override
+  public boolean drop_role(String roleName) throws MetaException, TException {
+    return client.drop_role(roleName);
+  }
+
+  @Override
+  public List<Role> list_roles(String principalName,
+      PrincipalType principalType) throws MetaException, TException {
+    return client.list_roles(principalName, principalType);
+  }
+
+  @Override
+  public List<String> listRoleNames() throws MetaException, TException {
+    return client.get_role_names();
+  }
+
+  @Override
+  public GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest req)
+      throws MetaException, TException {
+    return client.get_principals_in_role(req);
+  }
+
+  @Override
+  public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(
+      GetRoleGrantsForPrincipalRequest getRolePrincReq) throws MetaException, TException {
+    return client.get_role_grants_for_principal(getRolePrincReq);
+  }
+
+  @Override
+  public boolean grant_privileges(PrivilegeBag privileges)
+      throws MetaException, TException {
+    GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest();
+    req.setRequestType(GrantRevokeType.GRANT);
+    req.setPrivileges(privileges);
+    GrantRevokePrivilegeResponse res = client.grant_revoke_privileges(req);
+    if (!res.isSetSuccess()) {
+      throw new MetaException("GrantRevokePrivilegeResponse missing success field");
+    }
+    return res.isSuccess();
+  }
+
+  @Override
+  public boolean revoke_role(String roleName, String userName,
+      PrincipalType principalType, boolean grantOption) throws MetaException, TException {
+    GrantRevokeRoleRequest req = new GrantRevokeRoleRequest();
+    req.setRequestType(GrantRevokeType.REVOKE);
+    req.setRoleName(roleName);
+    req.setPrincipalName(userName);
+    req.setPrincipalType(principalType);
+    req.setGrantOption(grantOption);
+    GrantRevokeRoleResponse res = client.grant_revoke_role(req);
+    if (!res.isSetSuccess()) {
+      throw new MetaException("GrantRevokeResponse missing success field");
+    }
+    return res.isSuccess();
+  }
+
+  @Override
+  public boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) throws MetaException,
+      TException {
+    GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest();
+    req.setRequestType(GrantRevokeType.REVOKE);
+    req.setPrivileges(privileges);
+    req.setRevokeGrantOption(grantOption);
+    GrantRevokePrivilegeResponse res = client.grant_revoke_privileges(req);
+    if (!res.isSetSuccess()) {
+      throw new MetaException("GrantRevokePrivilegeResponse missing success field");
+    }
+    return res.isSuccess();
+  }
+
+  @Override
+  public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject,
+      String userName, List<String> groupNames) throws MetaException,
+      TException {
+    return client.get_privilege_set(hiveObject, userName, groupNames);
+  }
+
+  @Override
+  public List<HiveObjectPrivilege> list_privileges(String principalName,
+      PrincipalType principalType, HiveObjectRef hiveObject)
+      throws MetaException, TException {
+    return client.list_privileges(principalName, principalType, hiveObject);
+  }
+
+  public String getDelegationToken(String renewerKerberosPrincipalName) throws
+  MetaException, TException, IOException {
+    //a convenience method that makes the intended owner for the delegation
+    //token request the current user
+    String owner = SecurityUtils.getUser();
+    return getDelegationToken(owner, renewerKerberosPrincipalName);
+  }
+
+  @Override
+  public String getDelegationToken(String owner, String renewerKerberosPrincipalName) throws
+  MetaException, TException {
+    // This is expected to be a no-op, so we will return null when we use local metastore.
+    if (localMetaStore) {
+      return null;
+    }
+    return client.get_delegation_token(owner, renewerKerberosPrincipalName);
+  }
+
+  @Override
+  public long renewDelegationToken(String tokenStrForm) throws MetaException, TException {
+    if (localMetaStore) {
+      return 0;
+    }
+    return client.renew_delegation_token(tokenStrForm);
+
+  }
+
+  @Override
+  public void cancelDelegationToken(String tokenStrForm) throws MetaException, TException {
+    if (localMetaStore) {
+      return;
+    }
+    client.cancel_delegation_token(tokenStrForm);
+  }
+
+  @Override
+  public boolean addToken(String tokenIdentifier, String delegationToken) throws TException {
+     return client.add_token(tokenIdentifier, delegationToken);
+  }
+
+  @Override
+  public boolean removeToken(String tokenIdentifier) throws TException {
+    return client.remove_token(tokenIdentifier);
+  }
+
+  @Override
+  public String getToken(String tokenIdentifier) throws TException {
+    return client.get_token(tokenIdentifier);
+  }
+
+  @Override
+  public List<String> getAllTokenIdentifiers() throws TException {
+    return client.get_all_token_identifiers();
+  }
+
+  @Override
+  public int addMasterKey(String key) throws MetaException, TException {
+    return client.add_master_key(key);
+  }
+
+  @Override
+  public void updateMasterKey(Integer seqNo, String key)
+      throws NoSuchObjectException, MetaException, TException {
+    client.update_master_key(seqNo, key);
+  }
+
+  @Override
+  public boolean removeMasterKey(Integer keySeq) throws TException {
+    return client.remove_master_key(keySeq);
+  }
+
+  @Override
+  public String[] getMasterKeys() throws TException {
+    List<String> keyList = client.get_master_keys();
+    return keyList.toArray(new String[keyList.size()]);
+  }
+
+  @Override
+  public ValidTxnList getValidTxns() throws TException {
+    return TxnUtils.createValidReadTxnList(client.get_open_txns(), 0);
+  }
+
+  @Override
+  public ValidTxnList getValidTxns(long currentTxn) throws TException {
+    return TxnUtils.createValidReadTxnList(client.get_open_txns(), currentTxn);
+  }
+
+  @Override
+  public long openTxn(String user) throws TException {
+    OpenTxnsResponse txns = openTxns(user, 1);
+    return txns.getTxn_ids().get(0);
+  }
+
+  @Override
+  public OpenTxnsResponse openTxns(String user, int numTxns) throws TException {
+    String hostname = null;
+    try {
+      hostname = InetAddress.getLocalHost().getHostName();
+    } catch (UnknownHostException e) {
+      LOG.error("Unable to resolve my host name " + e.getMessage());
+      throw new RuntimeException(e);
+    }
+    return client.open_txns(new OpenTxnRequest(numTxns, user, hostname));
+  }
+
+  @Override
+  public void rollbackTxn(long txnid) throws NoSuchTxnException, TException {
+    client.abort_txn(new AbortTxnRequest(txnid));
+  }
+
+  @Override
+  public void commitTxn(long txnid)
+      throws NoSuchTxnException, TxnAbortedException, TException {
+    client.commit_txn(new CommitTxnRequest(txnid));
+  }
+
+  @Override
+  public GetOpenTxnsInfoResponse showTxns() throws TException {
+    return client.get_open_txns_info();
+  }
+
+  @Override
+  public void abortTxns(List<Long> txnids) throws NoSuchTxnException, TException {
+    client.abort_txns(new AbortTxnsRequest(txnids));
+  }
+
+  @Override
+  public LockResponse lock(LockRequest request)
+      throws NoSuchTxnException, TxnAbortedException, TException {
+    return client.lock(request);
+  }
+
+  @Override
+  public LockResponse checkLock(long lockid)
+      throws NoSuchTxnException, TxnAbortedException, NoSuchLockException,
+      TException {
+    return client.check_lock(new CheckLockRequest(lockid));
+  }
+
+  @Override
+  public void unlock(long lockid)
+      throws NoSuchLockException, TxnOpenException, TException {
+    client.unlock(new UnlockRequest(lockid));
+  }
+
+  @Override
+  @Deprecated
+  public ShowLocksResponse showLocks() throws TException {
+    return client.show_locks(new ShowLocksRequest());
+  }
+
+  @Override
+  public ShowLocksResponse showLocks(ShowLocksRequest request) throws TException {
+    return client.show_locks(request);
+  }
+
+  @Override
+  public void heartbeat(long txnid, long lockid)
+      throws NoSuchLockException, NoSuchTxnException, TxnAbortedException,
+      TException {
+    HeartbeatRequest hb = new HeartbeatRequest();
+    hb.setLockid(lockid);
+    hb.setTxnid(txnid);
+    client.heartbeat(hb);
+  }
+
+  @Override
+  public HeartbeatTxnRangeResponse heartbeatTxnRange(long min, long max)
+    throws NoSuchTxnException, TxnAbortedException, TException {
+    HeartbeatTxnRangeRequest rqst = new HeartbeatTxnRangeRequest(min, max);
+    return client.heartbeat_txn_range(rqst);
+  }
+
+  @Override
+  @Deprecated
+  public void compact(String dbname, String tableName, String partitionName,  CompactionType type)
+      throws TException {
+    CompactionRequest cr = new CompactionRequest();
+    if (dbname == null) cr.setDbname(DEFAULT_DATABASE_NAME);
+    else cr.setDbname(dbname);
+    cr.setTablename(tableName);
+    if (partitionName != null) cr.setPartitionname(partitionName);
+    cr.setType(type);
+    client.compact(cr);
+  }
+  @Deprecated
+  @Override
+  public void compact(String dbname, String tableName, String partitionName, CompactionType type,
+                      Map<String, String> tblproperties) throws TException {
+    compact2(dbname, tableName, partitionName, type, tblproperties);
+  }
+
+  @Override
+  public CompactionResponse compact2(String dbname, String tableName, String partitionName, CompactionType type,
+                      Map<String, String> tblproperties) throws TException {
+    CompactionRequest cr = new CompactionRequest();
+    if (dbname == null) cr.setDbname(DEFAULT_DATABASE_NAME);
+    else cr.setDbname(dbname);
+    cr.setTablename(tableName);
+    if (partitionName != null) cr.setPartitionname(partitionName);
+    cr.setType(type);
+    cr.setProperties(tblproperties);
+    return client.compact2(cr);
+  }
+  @Override
+  public ShowCompactResponse showCompactions() throws TException {
+    return client.show_compact(new ShowCompactRequest());
+  }
+
+  @Deprecated
+  @Override
+  public void addDynamicPartitions(long txnId, String dbName, String tableName,
+                                   List<String> partNames) throws TException {
+    client.add_dynamic_partitions(new AddDynamicPartitions(txnId, dbName, tableName, partNames));
+  }
+  @Override
+  public void addDynamicPartitions(long txnId, String dbName, String tableName,
+                                   List<String> partNames, DataOperationType operationType) throws TException {
+    AddDynamicPartitions adp = new AddDynamicPartitions(txnId, dbName, tableName, partNames);
+    adp.setOperationType(operationType);
+    client.add_dynamic_partitions(adp);
+  }
+
+  @Override
+  public void insertTable(Table table, boolean overwrite) throws MetaException {
+    boolean failed = true;
+    HiveMetaHook hook = getHook(table);
+    if (hook == null || !(hook instanceof DefaultHiveMetaHook)) {
+      return;
+    }
+    DefaultHiveMetaHook hiveMetaHook = (DefaultHiveMetaHook) hook;
+    try {
+      hiveMetaHook.commitInsertTable(table, overwrite);
+      failed = false;
+    }
+    finally {
+      if (failed) {
+        hiveMetaHook.rollbackInsertTable(table, overwrite);
+      }
+    }
+  }
+
+  @InterfaceAudience.LimitedPrivate({"HCatalog"})
+  @Override
+  public NotificationEventResponse getNextNotification(long lastEventId, int maxEvents,
+                                                       NotificationFilter filter) throws TException {
+    NotificationEventRequest rqst = new NotificationEventRequest(lastEventId);
+    rqst.setMaxEvents(maxEvents);
+    NotificationEventResponse rsp = client.get_next_notification(rqst);
+    LOG.debug("Got back " + rsp.getEventsSize() + " events");
+    if (filter == null) {
+      return rsp;
+    } else {
+      NotificationEventResponse filtered = new NotificationEventResponse();
+      if (rsp != null && rsp.getEvents() != null) {
+        for (NotificationEvent e : rsp.getEvents()) {
+          if (filter.accept(e)) filtered.addToEvents(e);
+        }
+      }
+      return filtered;
+    }
+  }
+
+  @InterfaceAudience.LimitedPrivate({"HCatalog"})
+  @Override
+  public CurrentNotificationEventId getCurrentNotificationEventId() throws TException {
+    return client.get_current_notificationEventId();
+  }
+
+  @InterfaceAudience.LimitedPrivate({"HCatalog"})
+  @Override
+  public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst)
+          throws TException {
+    return client.get_notification_events_count(rqst);
+  }
+
+  @InterfaceAudience.LimitedPrivate({"Apache Hive, HCatalog"})
+  @Override
+  public FireEventResponse fireListenerEvent(FireEventRequest rqst) throws TException {
+    return client.fire_listener_event(rqst);
+  }
+
+  /**
+   * Creates a synchronized wrapper for any {@link IMetaStoreClient}.
+   * This may be used by multi-threaded applications until we have
+   * fixed all reentra

<TRUNCATED>

Mime
View raw message