drill-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From j..@apache.org
Subject drill git commit: DRILL-4256: Create HiveConf per HiveStoragePlugin and reuse it wherever needed.
Date Wed, 20 Jan 2016 18:50:16 GMT
Repository: drill
Updated Branches:
  refs/heads/master 9cc7e116f -> 88ea7a25a


DRILL-4256: Create HiveConf per HiveStoragePlugin and reuse it wherever needed.

Creating new instances of HiveConf() are very costly, we should avoid creating new ones as much as possible.
Also get rid of hiveConfigOverride and use HiveConf in HiveStoregPlugin wherever we need the HiveConf.


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/88ea7a25
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/88ea7a25
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/88ea7a25

Branch: refs/heads/master
Commit: 88ea7a25a3ed6d04f1a12ce98d5211082dc18211
Parents: 9cc7e11
Author: vkorukanti <venki.korukanti@gmail.com>
Authored: Mon Jan 11 15:01:02 2016 -0800
Committer: vkorukanti <venki.korukanti@gmail.com>
Committed: Wed Jan 20 09:44:52 2016 -0800

----------------------------------------------------------------------
 .../planner/sql/HivePartitionDescriptor.java    |  2 +-
 ...onvertHiveParquetScanToDrillParquetScan.java | 21 +++++---
 .../store/hive/DrillHiveMetaStoreClient.java    | 54 ++++++++------------
 .../hive/HiveDrillNativeParquetSubScan.java     | 20 +++++---
 .../hive/HiveDrillNativeScanBatchCreator.java   | 15 +-----
 .../exec/store/hive/HiveMetadataProvider.java   |  9 ++--
 .../drill/exec/store/hive/HiveReadEntry.java    | 10 +---
 .../drill/exec/store/hive/HiveRecordReader.java | 14 ++---
 .../apache/drill/exec/store/hive/HiveScan.java  | 11 ++--
 .../exec/store/hive/HiveScanBatchCreator.java   | 10 ++--
 .../exec/store/hive/HiveStoragePlugin.java      | 25 ++++++++-
 .../drill/exec/store/hive/HiveSubScan.java      | 37 ++++++++++++--
 .../drill/exec/store/hive/HiveUtilities.java    | 24 ++-------
 .../store/hive/schema/HiveSchemaFactory.java    | 22 ++------
 14 files changed, 146 insertions(+), 128 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/88ea7a25/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/HivePartitionDescriptor.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/HivePartitionDescriptor.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/HivePartitionDescriptor.java
index 5009bf1..e1eb25e 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/HivePartitionDescriptor.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/HivePartitionDescriptor.java
@@ -105,7 +105,7 @@ public class HivePartitionDescriptor extends AbstractPartitionDescriptor {
       }
     }
 
-    HiveReadEntry newReadEntry = new HiveReadEntry(origReadEntry.table, newPartitions, origReadEntry.hiveConfigOverride);
+    HiveReadEntry newReadEntry = new HiveReadEntry(origReadEntry.table, newPartitions);
 
     return hiveScan.clone(newReadEntry);
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/88ea7a25/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveParquetScanToDrillParquetScan.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveParquetScanToDrillParquetScan.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveParquetScanToDrillParquetScan.java
index 722776b..a1933be 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveParquetScanToDrillParquetScan.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveParquetScanToDrillParquetScan.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.exec.planner.sql.logical;
 
+import com.google.common.base.Strings;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import org.apache.calcite.plan.RelOptRuleCall;
@@ -40,6 +41,7 @@ import org.apache.drill.exec.store.hive.HiveReadEntry;
 import org.apache.drill.exec.store.hive.HiveScan;
 import org.apache.drill.exec.store.hive.HiveTable.HivePartition;
 import org.apache.drill.exec.store.hive.HiveUtilities;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
@@ -86,17 +88,18 @@ public class ConvertHiveParquetScanToDrillParquetScan extends StoragePluginOptim
   @Override
   public boolean matches(RelOptRuleCall call) {
     final DrillScanRel scanRel = (DrillScanRel) call.rel(0);
-    final PlannerSettings settings = PrelUtil.getPlannerSettings(call.getPlanner());
 
     if (!(scanRel.getGroupScan() instanceof HiveScan) || ((HiveScan) scanRel.getGroupScan()).isNativeReader()) {
       return false;
     }
 
     final HiveScan hiveScan = (HiveScan) scanRel.getGroupScan();
+    final HiveConf hiveConf = hiveScan.getHiveConf();
     final Table hiveTable = hiveScan.hiveReadEntry.getTable();
 
     final Class<? extends InputFormat> tableInputFormat =
-        getInputFormatFromSD(MetaStoreUtils.getTableMetadata(hiveTable), hiveScan.hiveReadEntry, hiveTable.getSd());
+        getInputFormatFromSD(MetaStoreUtils.getTableMetadata(hiveTable), hiveScan.hiveReadEntry, hiveTable.getSd(),
+            hiveConf);
     if (tableInputFormat == null || !tableInputFormat.equals(MapredParquetInputFormat.class)) {
       return false;
     }
@@ -111,7 +114,8 @@ public class ConvertHiveParquetScanToDrillParquetScan extends StoragePluginOptim
     for (HivePartition partition : partitions) {
       final StorageDescriptor partitionSD = partition.getPartition().getSd();
       Class<? extends InputFormat> inputFormat = getInputFormatFromSD(
-          HiveUtilities.getPartitionMetadata(partition.getPartition(), hiveTable), hiveScan.hiveReadEntry, partitionSD);
+          HiveUtilities.getPartitionMetadata(partition.getPartition(), hiveTable), hiveScan.hiveReadEntry, partitionSD,
+          hiveConf);
       if (inputFormat == null || !inputFormat.equals(tableInputFormat)) {
         return false;
       }
@@ -139,11 +143,16 @@ public class ConvertHiveParquetScanToDrillParquetScan extends StoragePluginOptim
    * @return {@link InputFormat} class or null if a failure has occurred. Failure is logged as warning.
    */
   private Class<? extends InputFormat> getInputFormatFromSD(final Properties properties,
-      final HiveReadEntry hiveReadEntry, final StorageDescriptor sd) {
+      final HiveReadEntry hiveReadEntry, final StorageDescriptor sd, final HiveConf hiveConf) {
     final Table hiveTable = hiveReadEntry.getTable();
     try {
-      final JobConf job = new JobConf();
-      HiveUtilities.addConfToJob(job, properties, hiveReadEntry.hiveConfigOverride);
+      final String inputFormatName = sd.getInputFormat();
+      if (!Strings.isNullOrEmpty(inputFormatName)) {
+        return (Class<? extends InputFormat>) Class.forName(inputFormatName);
+      }
+
+      final JobConf job = new JobConf(hiveConf);
+      HiveUtilities.addConfToJob(job, properties);
       return HiveUtilities.getInputFormatClass(job, sd, hiveTable);
     } catch (final Exception e) {
       logger.warn("Failed to get InputFormat class from Hive table '{}.{}'. StorageDescriptor [{}]",

http://git-wip-us.apache.org/repos/asf/drill/blob/88ea7a25/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/DrillHiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/DrillHiveMetaStoreClient.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/DrillHiveMetaStoreClient.java
index 8920b6a..17e3478 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/DrillHiveMetaStoreClient.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/DrillHiveMetaStoreClient.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.exec.store.hive;
 
+import com.google.common.base.Strings;
 import com.google.common.cache.CacheBuilder;
 import com.google.common.cache.CacheLoader;
 import com.google.common.cache.LoadingCache;
@@ -41,7 +42,6 @@ import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collections;
 import java.util.List;
-import java.util.Map;
 import java.util.Objects;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
@@ -58,8 +58,6 @@ public abstract class DrillHiveMetaStoreClient extends HiveMetaStoreClient {
   public final String HIVE_METASTORE_CACHE_EXPIRE_AFTER_WRITE = "write";
   public final String HIVE_METASTORE_CACHE_EXPIRE_AFTER_ACCESS = "access";
 
-  protected final Map<String, String> hiveConfigOverride;
-
   protected final LoadingCache<String, List<String>> databases;
   protected final LoadingCache<String, List<String>> tableNameLoader;
   protected final LoadingCache<TableName, HiveReadEntry> tableLoaders;
@@ -73,16 +71,12 @@ public abstract class DrillHiveMetaStoreClient extends HiveMetaStoreClient {
    * @param processUserMetaStoreClient MetaStoreClient of process user. Useful for generating the delegation tokens when
    *                                   SASL (KERBEROS or custom SASL implementations) is enabled.
    * @param hiveConf Conf including authorization configuration
-   * @param hiveConfigOverride
    * @param userName User who is trying to access the Hive metadata
-   * @param ignoreAuthzErrors When browsing info schema, we want to ignore permission denied errors. If a permission
-   *                          denied error occurs while accessing metadata for an object, it will not be shown in the
-   *                          info schema.
    * @return
    * @throws MetaException
    */
   public static DrillHiveMetaStoreClient createClientWithAuthz(final DrillHiveMetaStoreClient processUserMetaStoreClient,
-      final HiveConf hiveConf, final Map<String, String> hiveConfigOverride, final String userName) throws MetaException {
+      final HiveConf hiveConf, final String userName) throws MetaException {
     try {
       boolean delegationTokenGenerated = false;
 
@@ -118,7 +112,7 @@ public abstract class DrillHiveMetaStoreClient extends HiveMetaStoreClient {
       return ugiForRpc.doAs(new PrivilegedExceptionAction<DrillHiveMetaStoreClient>() {
         @Override
         public DrillHiveMetaStoreClient run() throws Exception {
-          return new HiveClientWithAuthzWithCaching(hiveConfForClient, hiveConfigOverride, ugiForRpc, userName);
+          return new HiveClientWithAuthzWithCaching(hiveConfForClient, ugiForRpc, userName);
         }
       });
     } catch (final Exception e) {
@@ -130,38 +124,36 @@ public abstract class DrillHiveMetaStoreClient extends HiveMetaStoreClient {
    * Create a DrillMetaStoreClient that can be shared across multiple users. This is created when impersonation is
    * disabled.
    * @param hiveConf
-   * @param hiveConfigOverride
    * @return
    * @throws MetaException
    */
-  public static DrillHiveMetaStoreClient createNonCloseableClientWithCaching(final HiveConf hiveConf,
-      final Map<String, String> hiveConfigOverride) throws MetaException {
-    return new NonCloseableHiveClientWithCaching(hiveConf, hiveConfigOverride);
+  public static DrillHiveMetaStoreClient createNonCloseableClientWithCaching(final HiveConf hiveConf)
+      throws MetaException {
+    return new NonCloseableHiveClientWithCaching(hiveConf);
   }
 
-  private DrillHiveMetaStoreClient(final HiveConf hiveConf, final Map<String, String> hiveConfigOverride)
-      throws MetaException {
+  private DrillHiveMetaStoreClient(final HiveConf hiveConf) throws MetaException {
     super(hiveConf);
 
     int hmsCacheTTL = 60; // default is 60 seconds
     boolean expireAfterWrite = true; // default is expire after write.
 
-    if (hiveConfigOverride.containsKey(HIVE_METASTORE_CACHE_TTL)) {
-      hmsCacheTTL = Integer.valueOf(hiveConfigOverride.get(HIVE_METASTORE_CACHE_TTL));
+    final String ttl = hiveConf.get(HIVE_METASTORE_CACHE_TTL);
+    if (!Strings.isNullOrEmpty(ttl)) {
+      hmsCacheTTL = Integer.valueOf(ttl);
       logger.warn("Hive metastore cache ttl is set to {} seconds.", hmsCacheTTL);
     }
 
-    if (hiveConfigOverride.containsKey(HIVE_METASTORE_CACHE_EXPIRE)) {
-      if (hiveConfigOverride.get(HIVE_METASTORE_CACHE_EXPIRE).equalsIgnoreCase(HIVE_METASTORE_CACHE_EXPIRE_AFTER_WRITE)) {
+    final String expiry = hiveConf.get(HIVE_METASTORE_CACHE_EXPIRE);
+    if (!Strings.isNullOrEmpty(expiry)) {
+      if (expiry.equalsIgnoreCase(HIVE_METASTORE_CACHE_EXPIRE_AFTER_WRITE)) {
         expireAfterWrite = true;
-      } else if (hiveConfigOverride.get(HIVE_METASTORE_CACHE_EXPIRE).equalsIgnoreCase(HIVE_METASTORE_CACHE_EXPIRE_AFTER_ACCESS)) {
+      } else if (expiry.equalsIgnoreCase(HIVE_METASTORE_CACHE_EXPIRE_AFTER_ACCESS)) {
         expireAfterWrite = false;
       }
       logger.warn("Hive metastore cache expire policy is set to {}", expireAfterWrite? "expireAfterWrite" : "expireAfterAccess");
     }
 
-    this.hiveConfigOverride = hiveConfigOverride;
-
     final CacheBuilder cacheBuilder = CacheBuilder
         .newBuilder();
 
@@ -226,7 +218,7 @@ public abstract class DrillHiveMetaStoreClient extends HiveMetaStoreClient {
 
   /** Helper method which gets table metadata. Retries once if the first call to fetch the metadata fails */
   protected static HiveReadEntry getHiveReadEntryHelper(final IMetaStoreClient mClient, final String dbName,
-      final String tableName, final Map<String, String> hiveConfigOverride) throws TException {
+      final String tableName) throws TException {
     Table t = null;
     try {
       t = mClient.getTable(dbName, tableName);
@@ -256,7 +248,7 @@ public abstract class DrillHiveMetaStoreClient extends HiveMetaStoreClient {
       hivePartitions = null;
     }
 
-    return new HiveReadEntry(new HiveTable(t), hivePartitions, hiveConfigOverride);
+    return new HiveReadEntry(new HiveTable(t), hivePartitions);
   }
 
   /**
@@ -269,10 +261,9 @@ public abstract class DrillHiveMetaStoreClient extends HiveMetaStoreClient {
     private final UserGroupInformation ugiForRpc;
     private HiveAuthorizationHelper authorizer;
 
-    private HiveClientWithAuthzWithCaching(final HiveConf hiveConf, final Map<String, String> hiveConfigOverride,
-        final UserGroupInformation ugiForRpc, final String userName)
-        throws TException {
-      super(hiveConf, hiveConfigOverride);
+    private HiveClientWithAuthzWithCaching(final HiveConf hiveConf, final UserGroupInformation ugiForRpc,
+        final String userName) throws TException {
+      super(hiveConf);
       this.ugiForRpc = ugiForRpc;
       this.authorizer = new HiveAuthorizationHelper(this, hiveConf, userName);
     }
@@ -355,9 +346,8 @@ public abstract class DrillHiveMetaStoreClient extends HiveMetaStoreClient {
    * HiveMetaStoreClient that provides a shared MetaStoreClient implementation with caching.
    */
   private static class NonCloseableHiveClientWithCaching extends DrillHiveMetaStoreClient {
-    private NonCloseableHiveClientWithCaching(final HiveConf hiveConf,
-        final Map<String, String> hiveConfigOverride) throws MetaException {
-      super(hiveConf, hiveConfigOverride);
+    private NonCloseableHiveClientWithCaching(final HiveConf hiveConf) throws MetaException {
+      super(hiveConf);
     }
 
     @Override
@@ -426,7 +416,7 @@ public abstract class DrillHiveMetaStoreClient extends HiveMetaStoreClient {
     @Override
     public HiveReadEntry load(TableName key) throws Exception {
       synchronized (DrillHiveMetaStoreClient.this) {
-        return getHiveReadEntryHelper(DrillHiveMetaStoreClient.this, key.getDatabaseName(), key.getTableName(), hiveConfigOverride);
+        return getHiveReadEntryHelper(DrillHiveMetaStoreClient.this, key.getDatabaseName(), key.getTableName());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/88ea7a25/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeParquetSubScan.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeParquetSubScan.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeParquetSubScan.java
index b37b258..1ae7b10 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeParquetSubScan.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeParquetSubScan.java
@@ -17,10 +17,13 @@
  */
 package org.apache.drill.exec.store.hive;
 
+import com.fasterxml.jackson.annotation.JacksonInject;
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import com.fasterxml.jackson.annotation.JsonTypeName;
+import org.apache.drill.common.exceptions.ExecutionSetupException;
 import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.exec.store.StoragePluginRegistry;
 
 import java.io.IOException;
 import java.util.List;
@@ -33,17 +36,20 @@ public class HiveDrillNativeParquetSubScan extends HiveSubScan {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveDrillNativeParquetSubScan.class);
 
   @JsonCreator
-  public HiveDrillNativeParquetSubScan(@JsonProperty("userName") String userName,
+  public HiveDrillNativeParquetSubScan(@JacksonInject StoragePluginRegistry registry,
+                                       @JsonProperty("userName") String userName,
                                        @JsonProperty("splits") List<String> splits,
                                        @JsonProperty("hiveReadEntry") HiveReadEntry hiveReadEntry,
                                        @JsonProperty("splitClasses") List<String> splitClasses,
-                                       @JsonProperty("columns") List<SchemaPath> columns)
-      throws IOException, ReflectiveOperationException {
-    super(userName, splits, hiveReadEntry, splitClasses, columns);
+                                       @JsonProperty("columns") List<SchemaPath> columns,
+                                       @JsonProperty("storagePluginName") String pluginName)
+      throws IOException, ExecutionSetupException, ReflectiveOperationException {
+    super(registry, userName, splits, hiveReadEntry, splitClasses, columns, pluginName);
   }
 
-  public HiveDrillNativeParquetSubScan(final HiveSubScan subScan) throws IOException, ReflectiveOperationException {
-    this(subScan.getUserName(), subScan.getSplits(), subScan.getHiveReadEntry(), subScan.getSplitClasses(),
-        subScan.getColumns());
+  public HiveDrillNativeParquetSubScan(final HiveSubScan subScan)
+      throws IOException, ExecutionSetupException, ReflectiveOperationException {
+    super(subScan.getUserName(), subScan.getSplits(), subScan.getHiveReadEntry(), subScan.getSplitClasses(),
+        subScan.getColumns(), subScan.getStoragePlugin());
   }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/88ea7a25/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeScanBatchCreator.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeScanBatchCreator.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeScanBatchCreator.java
index 9f53971..ab321ba 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeScanBatchCreator.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeScanBatchCreator.java
@@ -38,7 +38,6 @@ import org.apache.drill.exec.store.RecordReader;
 import org.apache.drill.exec.store.parquet.ParquetDirectByteBufferAllocator;
 import org.apache.drill.exec.store.parquet.columnreaders.ParquetRecordReader;
 import org.apache.drill.exec.util.ImpersonationUtil;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -66,7 +65,6 @@ public class HiveDrillNativeScanBatchCreator implements BatchCreator<HiveDrillNa
     final List<InputSplit> splits = config.getInputSplits();
     final List<Partition> partitions = config.getPartitions();
     final List<SchemaPath> columns = config.getColumns();
-    final Map<String, String> hiveConfigOverride = config.getHiveReadEntry().hiveConfigOverride;
     final String partitionDesignator = context.getOptions()
         .getOption(ExecConstants.FILESYSTEM_PARTITION_COLUMN_LABEL).string_val;
 
@@ -100,7 +98,7 @@ public class HiveDrillNativeScanBatchCreator implements BatchCreator<HiveDrillNa
     int currentPartitionIndex = 0;
     final List<RecordReader> readers = Lists.newArrayList();
 
-    final Configuration conf = getConf(hiveConfigOverride);
+    final HiveConf conf = config.getHiveConf();
 
     // TODO: In future we can get this cache from Metadata cached on filesystem.
     final Map<String, ParquetMetadata> footerCache = Maps.newHashMap();
@@ -146,22 +144,13 @@ public class HiveDrillNativeScanBatchCreator implements BatchCreator<HiveDrillNa
     // If there are no readers created (which is possible when the table is empty or no row groups are matched),
     // create an empty RecordReader to output the schema
     if (readers.size() == 0) {
-      readers.add(new HiveRecordReader(table, null, null, columns, context, hiveConfigOverride,
+      readers.add(new HiveRecordReader(table, null, null, columns, context, conf,
         ImpersonationUtil.createProxyUgi(config.getUserName(), context.getQueryUserName())));
     }
 
     return new ScanBatch(config, context, oContext, readers.iterator(), partitionColumns, selectedPartitionColumns);
   }
 
-  private Configuration getConf(final Map<String, String> hiveConfigOverride) {
-    final HiveConf hiveConf = new HiveConf();
-    for(Entry<String, String> prop : hiveConfigOverride.entrySet()) {
-      hiveConf.set(prop.getKey(), prop.getValue());
-    }
-
-    return hiveConf;
-  }
-
   /**
    * Get the list of row group numbers for given file input split. Logic used here is same as how Hive's parquet input
    * format finds the row group numbers for input split.

http://git-wip-us.apache.org/repos/asf/drill/blob/88ea7a25/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveMetadataProvider.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveMetadataProvider.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveMetadataProvider.java
index c1aa9fa..4006e44 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveMetadataProvider.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveMetadataProvider.java
@@ -26,6 +26,7 @@ import org.apache.drill.exec.util.ImpersonationUtil;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
@@ -58,15 +59,17 @@ public class HiveMetadataProvider {
   private final UserGroupInformation ugi;
   private final boolean isPartitionedTable;
   private final Map<Partition, List<InputSplitWrapper>> partitionInputSplitMap;
+  private final HiveConf hiveConf;
   private List<InputSplitWrapper> tableInputSplits;
 
   private final Stopwatch watch = new Stopwatch();
 
-  public HiveMetadataProvider(final String userName, final HiveReadEntry hiveReadEntry) {
+  public HiveMetadataProvider(final String userName, final HiveReadEntry hiveReadEntry, final HiveConf hiveConf) {
     this.hiveReadEntry = hiveReadEntry;
     this.ugi = ImpersonationUtil.createProxyUgi(userName);
     isPartitionedTable = hiveReadEntry.getTable().getPartitionKeysSize() > 0;
     partitionInputSplitMap = Maps.newHashMap();
+    this.hiveConf = hiveConf;
   }
 
   /**
@@ -238,8 +241,8 @@ public class HiveMetadataProvider {
       return ugi.doAs(new PrivilegedExceptionAction<List<InputSplitWrapper>>() {
         public List<InputSplitWrapper> run() throws Exception {
           final List<InputSplitWrapper> splits = Lists.newArrayList();
-          final JobConf job = new JobConf();
-          HiveUtilities.addConfToJob(job, properties, hiveReadEntry.hiveConfigOverride);
+          final JobConf job = new JobConf(hiveConf);
+          HiveUtilities.addConfToJob(job, properties);
           job.setInputFormat(HiveUtilities.getInputFormatClass(job, sd, hiveReadEntry.getTable()));
           final Path path = new Path(sd.getLocation());
           final FileSystem fs = path.getFileSystem(job);

http://git-wip-us.apache.org/repos/asf/drill/blob/88ea7a25/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveReadEntry.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveReadEntry.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveReadEntry.java
index 481b5c4..4df33ec 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveReadEntry.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveReadEntry.java
@@ -17,15 +17,11 @@
  */
 package org.apache.drill.exec.store.hive;
 
-import java.util.ArrayList;
-import java.util.LinkedList;
 import java.util.List;
-import java.util.Map;
 
 import org.apache.calcite.schema.Schema.TableType;
 
 import org.apache.drill.exec.store.hive.HiveTable.HivePartition;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
 
@@ -40,16 +36,13 @@ public class HiveReadEntry {
   public HiveTable table;
   @JsonProperty("partitions")
   public List<HiveTable.HivePartition> partitions;
-  @JsonProperty("hiveConfigOverride")
-  public Map<String, String> hiveConfigOverride;
 
   @JsonIgnore
   private List<Partition> partitionsUnwrapped = Lists.newArrayList();
 
   @JsonCreator
   public HiveReadEntry(@JsonProperty("table") HiveTable table,
-                       @JsonProperty("partitions") List<HiveTable.HivePartition> partitions,
-                       @JsonProperty("hiveConfigOverride") Map<String, String> hiveConfigOverride) {
+                       @JsonProperty("partitions") List<HiveTable.HivePartition> partitions) {
     this.table = table;
     this.partitions = partitions;
     if (partitions != null) {
@@ -57,7 +50,6 @@ public class HiveReadEntry {
         partitionsUnwrapped.add(part.getPartition());
       }
     }
-    this.hiveConfigOverride = hiveConfigOverride;
   }
 
   @JsonIgnore

http://git-wip-us.apache.org/repos/asf/drill/blob/88ea7a25/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
index 73c126c..6fdca8f 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
@@ -40,6 +40,8 @@ import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.store.AbstractRecordReader;
 import org.apache.drill.exec.vector.AllocationHelper;
 import org.apache.drill.exec.vector.ValueVector;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Partition;
@@ -98,7 +100,7 @@ public class HiveRecordReader extends AbstractRecordReader {
   protected List<ValueVector> vectors = Lists.newArrayList();
   protected List<ValueVector> pVectors = Lists.newArrayList();
   protected boolean empty;
-  private Map<String, String> hiveConfigOverride;
+  private HiveConf hiveConf;
   private FragmentContext fragmentContext;
   private String defaultPartitionValue;
   private final UserGroupInformation proxyUgi;
@@ -106,13 +108,13 @@ public class HiveRecordReader extends AbstractRecordReader {
   protected static final int TARGET_RECORD_COUNT = 4000;
 
   public HiveRecordReader(Table table, Partition partition, InputSplit inputSplit, List<SchemaPath> projectedColumns,
-                          FragmentContext context, Map<String, String> hiveConfigOverride,
+                          FragmentContext context, final HiveConf hiveConf,
                           UserGroupInformation proxyUgi) throws ExecutionSetupException {
     this.table = table;
     this.partition = partition;
     this.inputSplit = inputSplit;
     this.empty = (inputSplit == null && partition == null);
-    this.hiveConfigOverride = hiveConfigOverride;
+    this.hiveConf = hiveConf;
     this.fragmentContext = context;
     this.proxyUgi = proxyUgi;
     this.managedBuffer = fragmentContext.getManagedBuffer().reallocIfNeeded(256);
@@ -120,17 +122,17 @@ public class HiveRecordReader extends AbstractRecordReader {
   }
 
   private void init() throws ExecutionSetupException {
-    final JobConf job = new JobConf();
+    final JobConf job = new JobConf(hiveConf);
 
     // Get the configured default val
-    defaultPartitionValue = HiveUtilities.getDefaultPartitionValue(hiveConfigOverride);
+    defaultPartitionValue = hiveConf.get(ConfVars.DEFAULTPARTITIONNAME.varname);
 
     try {
       final Properties tableProperties = MetaStoreUtils.getTableMetadata(table);
       final Properties partitionProperties =
           (partition == null) ?  tableProperties :
               HiveUtilities.getPartitionMetadata(partition, table);
-      HiveUtilities.addConfToJob(job, partitionProperties, hiveConfigOverride);
+      HiveUtilities.addConfToJob(job, partitionProperties);
 
       final SerDe tableSerDe = createSerDe(job, table.getSd().getSerdeInfo().getSerializationLib(), tableProperties);
       final StructObjectInspector tableOI = getStructOI(tableSerDe);

http://git-wip-us.apache.org/repos/asf/drill/blob/88ea7a25/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java
index 20c4e69..504d755 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java
@@ -96,7 +96,7 @@ public class HiveScan extends AbstractGroupScan {
     this.columns = columns;
     this.storagePlugin = storagePlugin;
     if (metadataProvider == null) {
-      this.metadataProvider = new HiveMetadataProvider(userName, hiveReadEntry);
+      this.metadataProvider = new HiveMetadataProvider(userName, hiveReadEntry, storagePlugin.getHiveConf());
     } else {
       this.metadataProvider = metadataProvider;
     }
@@ -166,8 +166,8 @@ public class HiveScan extends AbstractGroupScan {
         parts = null;
       }
 
-      final HiveReadEntry subEntry = new HiveReadEntry(hiveReadEntry.table, parts, hiveReadEntry.hiveConfigOverride);
-      return new HiveSubScan(getUserName(), encodedInputSplits, subEntry, splitTypes, columns);
+      final HiveReadEntry subEntry = new HiveReadEntry(hiveReadEntry.table, parts);
+      return new HiveSubScan(getUserName(), encodedInputSplits, subEntry, splitTypes, columns, storagePlugin);
     } catch (IOException | ReflectiveOperationException e) {
       throw new ExecutionSetupException(e);
     }
@@ -290,6 +290,11 @@ public class HiveScan extends AbstractGroupScan {
   }
 
   @JsonIgnore
+  public HiveConf getHiveConf() {
+    return storagePlugin.getHiveConf();
+  }
+
+  @JsonIgnore
   public boolean isNativeReader() {
     return false;
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/88ea7a25/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScanBatchCreator.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScanBatchCreator.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScanBatchCreator.java
index aeddf28..eee7343 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScanBatchCreator.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScanBatchCreator.java
@@ -26,6 +26,7 @@ import org.apache.drill.exec.physical.impl.ScanBatch;
 import org.apache.drill.exec.record.RecordBatch;
 import org.apache.drill.exec.store.RecordReader;
 import org.apache.drill.exec.util.ImpersonationUtil;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.mapred.InputSplit;
@@ -33,6 +34,7 @@ import org.apache.hadoop.mapred.InputSplit;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.security.UserGroupInformation;
 
+@SuppressWarnings("unused")
 public class HiveScanBatchCreator implements BatchCreator<HiveSubScan> {
 
   @Override
@@ -47,6 +49,8 @@ public class HiveScanBatchCreator implements BatchCreator<HiveSubScan> {
     final UserGroupInformation proxyUgi = ImpersonationUtil.createProxyUgi(config.getUserName(),
       context.getQueryUserName());
 
+    final HiveConf hiveConf = config.getHiveConf();
+
     // Native hive text record reader doesn't handle all types currently. For now use HiveRecordReader which uses
     // Hive InputFormat and SerDe classes to read the data.
     //if (table.getSd().getInputFormat().equals(TextInputFormat.class.getCanonicalName()) &&
@@ -60,16 +64,14 @@ public class HiveScanBatchCreator implements BatchCreator<HiveSubScan> {
     //} else {
       for (InputSplit split : splits) {
         readers.add(new HiveRecordReader(table,
-            (hasPartitions ? partitions.get(i++) : null),
-            split, config.getColumns(), context, config.getHiveReadEntry().hiveConfigOverride, proxyUgi));
+            (hasPartitions ? partitions.get(i++) : null), split, config.getColumns(), context, hiveConf, proxyUgi));
       }
     //}
 
     // If there are no readers created (which is possible when the table is empty), create an empty RecordReader to
     // output the schema
     if (readers.size() == 0) {
-      readers.add(new HiveRecordReader(table, null, null, config.getColumns(), context,
-          config.getHiveReadEntry().hiveConfigOverride, proxyUgi));
+      readers.add(new HiveRecordReader(table, null, null, config.getColumns(), context, hiveConf, proxyUgi));
     }
 
     return new ScanBatch(config, context, readers.iterator());

http://git-wip-us.apache.org/repos/asf/drill/blob/88ea7a25/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java
index c967c00..8f8fdba 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java
@@ -19,6 +19,8 @@ package org.apache.drill.exec.store.hive;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
 
 import com.google.common.collect.ImmutableSet;
@@ -41,6 +43,8 @@ import org.apache.drill.exec.store.hive.schema.HiveSchemaFactory;
 
 import com.fasterxml.jackson.core.type.TypeReference;
 import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 
 public class HiveStoragePlugin extends AbstractStoragePlugin {
 
@@ -50,12 +54,18 @@ public class HiveStoragePlugin extends AbstractStoragePlugin {
   private final HiveSchemaFactory schemaFactory;
   private final DrillbitContext context;
   private final String name;
+  private final HiveConf hiveConf;
 
   public HiveStoragePlugin(HiveStoragePluginConfig config, DrillbitContext context, String name) throws ExecutionSetupException {
     this.config = config;
     this.context = context;
-    this.schemaFactory = new HiveSchemaFactory(this, name, config.getHiveConfigOverride());
     this.name = name;
+    this.hiveConf = createHiveConf(config.getHiveConfigOverride());
+    this.schemaFactory = new HiveSchemaFactory(this, name, hiveConf);
+  }
+
+  public HiveConf getHiveConf() {
+    return hiveConf;
   }
 
   public HiveStoragePluginConfig getConfig() {
@@ -92,7 +102,7 @@ public class HiveStoragePlugin extends AbstractStoragePlugin {
 
   @Override
   public Set<StoragePluginOptimizerRule> getLogicalOptimizerRules(OptimizerRulesContext optimizerContext) {
-    final String defaultPartitionValue = HiveUtilities.getDefaultPartitionValue(config.getHiveConfigOverride());
+    final String defaultPartitionValue = hiveConf.get(ConfVars.DEFAULTPARTITIONNAME.varname);
 
     ImmutableSet.Builder<StoragePluginOptimizerRule> ruleBuilder = ImmutableSet.builder();
 
@@ -111,4 +121,15 @@ public class HiveStoragePlugin extends AbstractStoragePlugin {
 
     return ImmutableSet.of();
   }
+
+  private static HiveConf createHiveConf(final Map<String, String> hiveConfigOverride) {
+    final HiveConf hiveConf = new HiveConf();
+    for(Entry<String, String> config : hiveConfigOverride.entrySet()) {
+      final String key = config.getKey();
+      final String value = config.getValue();
+      hiveConf.set(key, value);
+      logger.trace("HiveConfig Override {}={}", key, value);
+    }
+    return hiveConf;
+  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/88ea7a25/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveSubScan.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveSubScan.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveSubScan.java
index 907539e..74b68a6 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveSubScan.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveSubScan.java
@@ -23,6 +23,7 @@ import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 
+import com.fasterxml.jackson.annotation.JacksonInject;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.drill.common.exceptions.ExecutionSetupException;
 import org.apache.drill.common.expression.SchemaPath;
@@ -35,6 +36,8 @@ import org.apache.drill.exec.physical.base.SubScan;
 import org.apache.drill.exec.physical.impl.ScanBatch;
 import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.store.RecordReader;
+import org.apache.drill.exec.store.StoragePluginRegistry;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.mapred.InputSplit;
@@ -58,17 +61,28 @@ public class HiveSubScan extends AbstractBase implements SubScan {
   protected Table table;
   @JsonIgnore
   protected List<Partition> partitions;
+  @JsonIgnore
+  protected HiveStoragePlugin storagePlugin;
 
   private List<String> splits;
   private List<String> splitClasses;
   protected List<SchemaPath> columns;
 
   @JsonCreator
-  public HiveSubScan(@JsonProperty("userName") String userName,
+  public HiveSubScan(@JacksonInject StoragePluginRegistry registry,
+                     @JsonProperty("userName") String userName,
                      @JsonProperty("splits") List<String> splits,
                      @JsonProperty("hiveReadEntry") HiveReadEntry hiveReadEntry,
                      @JsonProperty("splitClasses") List<String> splitClasses,
-                     @JsonProperty("columns") List<SchemaPath> columns) throws IOException, ReflectiveOperationException {
+                     @JsonProperty("columns") List<SchemaPath> columns,
+                     @JsonProperty("storagePluginName") String pluginName)
+      throws IOException, ExecutionSetupException, ReflectiveOperationException {
+    this(userName, splits, hiveReadEntry, splitClasses, columns, (HiveStoragePlugin)registry.getPlugin(pluginName));
+  }
+
+  public HiveSubScan(final String userName, final List<String> splits, final HiveReadEntry hiveReadEntry,
+      final List<String> splitClasses, final List<SchemaPath> columns, final HiveStoragePlugin plugin)
+    throws IOException, ReflectiveOperationException {
     super(userName);
     this.hiveReadEntry = hiveReadEntry;
     this.table = hiveReadEntry.getTable();
@@ -76,12 +90,24 @@ public class HiveSubScan extends AbstractBase implements SubScan {
     this.splits = splits;
     this.splitClasses = splitClasses;
     this.columns = columns;
+    this.storagePlugin = plugin;
 
     for (int i = 0; i < splits.size(); i++) {
       inputSplits.add(deserializeInputSplit(splits.get(i), splitClasses.get(i)));
     }
   }
 
+  @JsonProperty("storagePluginName")
+  @SuppressWarnings("unused")
+  public String getStoragePluginName() {
+    return storagePlugin.getName();
+  }
+
+  @JsonIgnore
+  public HiveStoragePlugin getStoragePlugin() {
+    return storagePlugin;
+  }
+
   public List<String> getSplits() {
     return splits;
   }
@@ -130,7 +156,7 @@ public class HiveSubScan extends AbstractBase implements SubScan {
   @Override
   public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) throws ExecutionSetupException {
     try {
-      return new HiveSubScan(getUserName(), splits, hiveReadEntry, splitClasses, columns);
+      return new HiveSubScan(getUserName(), splits, hiveReadEntry, splitClasses, columns, storagePlugin);
     } catch (IOException | ReflectiveOperationException e) {
       throw new ExecutionSetupException(e);
     }
@@ -145,4 +171,9 @@ public class HiveSubScan extends AbstractBase implements SubScan {
   public int getOperatorType() {
     return CoreOperatorType.HIVE_SUB_SCAN_VALUE;
   }
+
+  @JsonIgnore
+  public HiveConf getHiveConf() {
+    return storagePlugin.getHiveConf();
+  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/88ea7a25/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveUtilities.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveUtilities.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveUtilities.java
index 00597d8..e75afae 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveUtilities.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveUtilities.java
@@ -355,18 +355,6 @@ public class HiveUtilities {
     return null;
   }
 
-  public static String getDefaultPartitionValue(final Map<String, String> hiveConfigOverride) {
-    // Check if the default partition config in given Hive config override in Hive storage pluging definition.
-    String defaultPartitionValue = hiveConfigOverride.get(ConfVars.DEFAULTPARTITIONNAME.varname);
-    if (!Strings.isNullOrEmpty(defaultPartitionValue)) {
-      return defaultPartitionValue;
-    }
-
-    // Create a HiveConf and get the configured value. If any hive-site.xml file on the classpath has the property
-    // defined, it will be returned. Otherwise default value is returned.
-    return new HiveConf().getVar(ConfVars.DEFAULTPARTITIONNAME);
-  }
-
   /**
    * Utility method which gets table or partition {@link InputFormat} class. First it
    * tries to get the class name from given StorageDescriptor object. If it doesn't contain it tries to get it from
@@ -397,18 +385,12 @@ public class HiveUtilities {
    *
    * @param job {@link JobConf} instance.
    * @param properties New config properties
-   * @param hiveConfigOverride HiveConfig override.
+   * @param hiveConf HiveConf of Hive storage plugin
    */
-  public static void addConfToJob(final JobConf job, final Properties properties,
-      final Map<String, String> hiveConfigOverride) {
-    final HiveConf hiveConf = new HiveConf();
+  public static void addConfToJob(final JobConf job, final Properties properties) {
     for (Object obj : properties.keySet()) {
-      hiveConf.set((String) obj, (String) properties.get(obj));
-    }
-    for(Map.Entry<String, String> entry : hiveConfigOverride.entrySet()) {
-      hiveConf.set(entry.getKey(), entry.getValue());
+      job.set((String) obj, (String) properties.get(obj));
     }
-    job.addResource(hiveConf);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/drill/blob/88ea7a25/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java
index 05ab3a7..5eae544 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java
@@ -19,12 +19,10 @@ package org.apache.drill.exec.store.hive.schema;
 
 import java.io.IOException;
 import java.util.List;
-import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 
-import com.google.common.base.Stopwatch;
 import com.google.common.cache.CacheBuilder;
 import com.google.common.cache.CacheLoader;
 import com.google.common.cache.LoadingCache;
@@ -60,33 +58,22 @@ public class HiveSchemaFactory implements SchemaFactory {
   private final LoadingCache<String, DrillHiveMetaStoreClient> metaStoreClientLoadingCache;
 
   private final HiveStoragePlugin plugin;
-  private final Map<String, String> hiveConfigOverride;
   private final String schemaName;
   private final HiveConf hiveConf;
   private final boolean isDrillImpersonationEnabled;
   private final boolean isHS2DoAsSet;
 
-  public HiveSchemaFactory(final HiveStoragePlugin plugin, final String name, final Map<String, String> hiveConfigOverride) throws ExecutionSetupException {
+  public HiveSchemaFactory(final HiveStoragePlugin plugin, final String name, final HiveConf hiveConf) throws ExecutionSetupException {
     this.schemaName = name;
     this.plugin = plugin;
 
-    this.hiveConfigOverride = hiveConfigOverride;
-    hiveConf = new HiveConf();
-    if (hiveConfigOverride != null) {
-      for (Map.Entry<String, String> entry : hiveConfigOverride.entrySet()) {
-        final String property = entry.getKey();
-        final String value = entry.getValue();
-        hiveConf.set(property, value);
-        logger.trace("HiveConfig Override {}={}", property, value);
-      }
-    }
-
+    this.hiveConf = hiveConf;
     isHS2DoAsSet = hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS);
     isDrillImpersonationEnabled = plugin.getContext().getConfig().getBoolean(ExecConstants.IMPERSONATION_ENABLED);
 
     try {
       processUserMetastoreClient =
-          DrillHiveMetaStoreClient.createNonCloseableClientWithCaching(hiveConf, hiveConfigOverride);
+          DrillHiveMetaStoreClient.createNonCloseableClientWithCaching(hiveConf);
     } catch (MetaException e) {
       throw new ExecutionSetupException("Failure setting up Hive metastore client.", e);
     }
@@ -105,8 +92,7 @@ public class HiveSchemaFactory implements SchemaFactory {
         .build(new CacheLoader<String, DrillHiveMetaStoreClient>() {
           @Override
           public DrillHiveMetaStoreClient load(String userName) throws Exception {
-            return DrillHiveMetaStoreClient.createClientWithAuthz(processUserMetastoreClient, hiveConf,
-                HiveSchemaFactory.this.hiveConfigOverride, userName);
+            return DrillHiveMetaStoreClient.createClientWithAuthz(processUserMetastoreClient, hiveConf, userName);
           }
         });
   }


Mime
View raw message