hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ser...@apache.org
Subject svn commit: r1673969 [2/19] - in /hive/branches/llap: ./ beeline/src/java/org/apache/hive/beeline/ bin/ cli/src/java/org/apache/hadoop/hive/cli/ cli/src/test/org/apache/hadoop/hive/cli/ common/ common/src/java/org/apache/hadoop/hive/common/jsonexplain/...
Date Wed, 15 Apr 2015 22:04:07 GMT
Modified: hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java (original)
+++ hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java Wed Apr 15 22:04:00 2015
@@ -29,7 +29,6 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
@@ -44,7 +43,6 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hive.hcatalog.common.HCatConstants;
 import org.apache.hive.hcatalog.messaging.MessageFactory;
 
-import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
 /**
@@ -146,11 +144,9 @@ public class DbNotificationListener exte
     NotificationEvent event = new NotificationEvent(0, now(),
         HCatConstants.HCAT_ALTER_TABLE_EVENT,
         msgFactory.buildAlterTableMessage(before, after).toString());
-    if (event != null) {
-      event.setDbName(after.getDbName());
-      event.setTableName(after.getTableName());
-      enqueue(event);
-    }
+    event.setDbName(after.getDbName());
+    event.setTableName(after.getTableName());
+    enqueue(event);
   }
 
   /**
@@ -162,7 +158,7 @@ public class DbNotificationListener exte
     Table t = partitionEvent.getTable();
     NotificationEvent event = new NotificationEvent(0, now(),
         HCatConstants.HCAT_ADD_PARTITION_EVENT,
-        msgFactory.buildAddPartitionMessage(t, partitionEvent.getPartitions()).toString());
+        msgFactory.buildAddPartitionMessage(t, partitionEvent.getPartitionIterator()).toString());
     event.setDbName(t.getDbName());
     event.setTableName(t.getTableName());
     enqueue(event);
@@ -192,11 +188,9 @@ public class DbNotificationListener exte
     NotificationEvent event = new NotificationEvent(0, now(),
         HCatConstants.HCAT_ALTER_PARTITION_EVENT,
         msgFactory.buildAlterPartitionMessage(before, after).toString());
-    if (event != null) {
-      event.setDbName(before.getDbName());
-      event.setTableName(before.getTableName());
-      enqueue(event);
-    }
+    event.setDbName(before.getDbName());
+    event.setTableName(before.getTableName());
+    enqueue(event);
   }
 
   /**

Modified: hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java (original)
+++ hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java Wed Apr 15 22:04:00 2015
@@ -21,8 +21,6 @@ package org.apache.hive.hcatalog.listene
 
 import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
 
 import javax.jms.Connection;
 import javax.jms.ConnectionFactory;
@@ -130,15 +128,14 @@ public class NotificationListener extend
     // and message selector string as "HCAT_EVENT = HCAT_ADD_PARTITION"
     if (partitionEvent.getStatus()) {
       Table table = partitionEvent.getTable();
-      List<Partition> partitions = partitionEvent.getPartitions();
       String topicName = getTopicName(table);
       if (topicName != null && !topicName.equals("")) {
-        send(messageFactory.buildAddPartitionMessage(table, partitions), topicName);
+        send(messageFactory.buildAddPartitionMessage(table, partitionEvent.getPartitionIterator()), topicName);
       } else {
         LOG.info("Topic name not found in metastore. Suppressing HCatalog notification for "
-            + partitions.get(0).getDbName()
+            + partitionEvent.getTable().getDbName()
             + "."
-            + partitions.get(0).getTableName()
+            + partitionEvent.getTable().getTableName()
             + " To enable notifications for this table, please do alter table set properties ("
             + HCatConstants.HCAT_MSGBUS_TOPIC_NAME
             + "=<dbname>.<tablename>) or whatever you want topic name to be.");

Modified: hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java (original)
+++ hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java Wed Apr 15 22:04:00 2015
@@ -20,16 +20,14 @@
 package org.apache.hive.hcatalog.messaging;
 
 import org.apache.hadoop.hive.common.JavaUtils;
-import org.apache.hadoop.hive.common.classification.InterfaceAudience;
-import org.apache.hadoop.hive.common.classification.InterfaceStability;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hive.hcatalog.messaging.json.JSONMessageFactory;
 
+import java.util.Iterator;
 import java.util.List;
 
 /**
@@ -140,20 +138,10 @@ public abstract class MessageFactory {
     /**
      * Factory method for AddPartitionMessage.
      * @param table The Table to which the partitions are added.
-     * @param partitions The set of Partitions being added.
+     * @param partitions The iterator to set of Partitions being added.
      * @return AddPartitionMessage instance.
      */
-    public abstract AddPartitionMessage buildAddPartitionMessage(Table table, List<Partition> partitions);
-
-  /**
-   * Factory method for AddPartitionMessage.
-   * @param table The Table to which the partitions are added.
-   * @param partitionSpec The set of Partitions being added.
-   * @return AddPartitionMessage instance.
-   */
-  @InterfaceAudience.LimitedPrivate({"Hive"})
-  @InterfaceStability.Evolving
-  public abstract AddPartitionMessage buildAddPartitionMessage(Table table, PartitionSpecProxy partitionSpec);
+  public abstract AddPartitionMessage buildAddPartitionMessage(Table table, Iterator<Partition> partitions);
 
   /**
    * Factory method for building AlterPartitionMessage

Modified: hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageFactory.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageFactory.java (original)
+++ hive/branches/llap/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageFactory.java Wed Apr 15 22:04:00 2015
@@ -19,14 +19,14 @@
 
 package org.apache.hive.hcatalog.messaging.json;
 
+import com.google.common.base.Function;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hive.common.classification.InterfaceAudience;
-import org.apache.hadoop.hive.common.classification.InterfaceStability;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hive.hcatalog.messaging.AddPartitionMessage;
 import org.apache.hive.hcatalog.messaging.AlterPartitionMessage;
 import org.apache.hive.hcatalog.messaging.AlterTableMessage;
@@ -39,7 +39,12 @@ import org.apache.hive.hcatalog.messagin
 import org.apache.hive.hcatalog.messaging.MessageDeserializer;
 import org.apache.hive.hcatalog.messaging.MessageFactory;
 
-import java.util.*;
+import javax.annotation.Nullable;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
 
 /**
  * The JSON implementation of the MessageFactory. Constructs JSON implementations of
@@ -98,17 +103,9 @@ public class JSONMessageFactory extends
   }
 
   @Override
-  public AddPartitionMessage buildAddPartitionMessage(Table table, List<Partition> partitions) {
+  public AddPartitionMessage buildAddPartitionMessage(Table table, Iterator<Partition> partitionsIterator) {
     return new JSONAddPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(),
-        table.getTableName(), getPartitionKeyValues(table, partitions), now());
-  }
-
-  @Override
-  @InterfaceAudience.LimitedPrivate({"Hive"})
-  @InterfaceStability.Evolving
-  public AddPartitionMessage buildAddPartitionMessage(Table table, PartitionSpecProxy partitionSpec) {
-    return new JSONAddPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(),
-        table.getTableName(), getPartitionKeyValues(table, partitionSpec), now());
+        table.getTableName(), getPartitionKeyValues(table, partitionsIterator), now());
   }
 
   @Override
@@ -142,22 +139,12 @@ public class JSONMessageFactory extends
     return partitionKeys;
   }
 
-  private static List<Map<String, String>> getPartitionKeyValues(Table table, List<Partition> partitions) {
-    List<Map<String, String>> partitionList = new ArrayList<Map<String, String>>(partitions.size());
-    for (Partition partition : partitions)
-      partitionList.add(getPartitionKeyValues(table, partition));
-    return partitionList;
-  }
-
-  @InterfaceAudience.LimitedPrivate({"Hive"})
-  @InterfaceStability.Evolving
-  private static List<Map<String, String>> getPartitionKeyValues(Table table, PartitionSpecProxy partitionSpec) {
-    List<Map<String, String>> partitionList = new ArrayList<Map<String, String>>();
-    PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator();
-    while (iterator.hasNext()) {
-      Partition partition = iterator.next();
-      partitionList.add(getPartitionKeyValues(table, partition));
-    }
-    return partitionList;
+  private static List<Map<String, String>> getPartitionKeyValues(final Table table, Iterator<Partition> iterator) {
+    return Lists.newArrayList(Iterators.transform(iterator, new Function<Partition, Map<String, String>>() {
+      @Override
+      public Map<String, String> apply(@Nullable Partition partition) {
+        return getPartitionKeyValues(table, partition);
+      }
+    }));
   }
 }

Modified: hive/branches/llap/hcatalog/webhcat/java-client/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/webhcat/java-client/pom.xml?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/webhcat/java-client/pom.xml (original)
+++ hive/branches/llap/hcatalog/webhcat/java-client/pom.xml Wed Apr 15 22:04:00 2015
@@ -46,6 +46,11 @@
       <version>${project.version}</version>
     </dependency>
     <dependency>
+      <groupId>org.apache.hive.hcatalog</groupId>
+      <artifactId>hive-hcatalog-server-extensions</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hive</groupId>
       <artifactId>hive-exec</artifactId>
       <version>${project.version}</version>

Modified: hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java (original)
+++ hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java Wed Apr 15 22:04:00 2015
@@ -18,6 +18,7 @@
  */
 package org.apache.hive.hcatalog.api;
 
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
@@ -27,6 +28,7 @@ import org.apache.hadoop.hive.common.cla
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hive.hcatalog.api.repl.ReplicationTask;
 import org.apache.hive.hcatalog.common.HCatException;
 import org.apache.hive.hcatalog.data.schema.HCatFieldSchema;
 
@@ -379,6 +381,24 @@ public abstract class HCatClient {
     throws HCatException;
 
   /**
+   * Drops partition(s) that match the specified (and possibly partial) partition specification.
+   * A partial partition-specification is one where not all partition-keys have associated values. For example,
+   * for a table ('myDb.myTable') with 2 partition keys (dt string, region string),
+   * if for each dt ('20120101', '20120102', etc.) there can exist 3 regions ('us', 'uk', 'in'), then,
+   *  1. Complete partition spec: dropPartitions('myDb', 'myTable', {dt='20120101', region='us'}) would drop 1 partition.
+   *  2. Partial  partition spec: dropPartitions('myDb', 'myTable', {dt='20120101'}) would drop all 3 partitions,
+   *                              with dt='20120101' (i.e. region = 'us', 'uk' and 'in').
+   * @param dbName The database name.
+   * @param tableName The table name.
+   * @param partitionSpec The partition specification, {[col_name,value],[col_name2,value2]}.
+   * @param ifExists Hive returns an error if the partition specified does not exist, unless ifExists is set to true.
+   * @param deleteData Whether to delete the underlying data.
+   * @throws HCatException,ConnectionFailureException
+   */
+   public abstract void dropPartitions(String dbName, String tableName,
+                    Map<String, String> partitionSpec, boolean ifExists, boolean deleteData)
+    throws HCatException;
+  /**
    * List partitions by filter.
    *
    * @param dbName The database name.
@@ -467,6 +487,23 @@ public abstract class HCatClient {
    */
   public abstract String getMessageBusTopicName(String dbName, String tableName) throws HCatException;
 
+
+  /**
+   * Get an iterator that iterates over a list of replication tasks needed to replicate all the
+   * events that have taken place for a given db/table.
+   * @param lastEventId : The last event id that was processed for this reader. The returned
+   *                    replication tasks will start from this point forward
+   * @param maxEvents : Maximum number of events to consider for generating the
+   *                  replication tasks. If < 1, then all available events will be considered.
+   * @param dbName : The database name for which we're interested in the events for.
+   * @param tableName : The table name for which we're interested in the events for - if null,
+   *                  then this function will behave as if it were running at a db level.
+   * @return an iterator over a list of replication events that can be processed one by one.
+   * @throws HCatException
+   */
+  public abstract Iterator<ReplicationTask> getReplicationTasks(
+      long lastEventId, int maxEvents, String dbName, String tableName) throws HCatException;
+
   /**
    * Get a list of notifications
    * @param lastEventId The last event id that was consumed by this reader.  The returned

Modified: hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java (original)
+++ hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java Wed Apr 15 22:04:00 2015
@@ -21,9 +21,11 @@ package org.apache.hive.hcatalog.api;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
+import com.google.common.base.Function;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import org.apache.commons.lang.StringUtils;
@@ -63,6 +65,8 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hive.hcatalog.api.repl.HCatReplicationTaskIterator;
+import org.apache.hive.hcatalog.api.repl.ReplicationTask;
 import org.apache.hive.hcatalog.common.HCatConstants;
 import org.apache.hive.hcatalog.common.HCatException;
 import org.apache.hive.hcatalog.common.HCatUtil;
@@ -72,6 +76,8 @@ import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import javax.annotation.Nullable;
+
 /**
  * The HCatClientHMSImpl is the Hive Metastore client based implementation of
  * HCatClient.
@@ -567,33 +573,35 @@ public class HCatClientHMSImpl extends H
         && "TRUE".equalsIgnoreCase(table.getParameters().get("EXTERNAL"));
   }
 
-  private void dropPartitionsUsingExpressions(Table table, Map<String, String> partitionSpec, boolean ifExists)
-    throws SemanticException, TException {
+  private void dropPartitionsUsingExpressions(Table table, Map<String, String> partitionSpec,
+                                              boolean ifExists, boolean deleteData)
+      throws SemanticException, TException {
     LOG.info("HCatClient: Dropping partitions using partition-predicate Expressions.");
     ExprNodeGenericFuncDesc partitionExpression = new ExpressionBuilder(table, partitionSpec).build();
     ObjectPair<Integer, byte[]> serializedPartitionExpression =
         new ObjectPair<Integer, byte[]>(partitionSpec.size(),
             Utilities.serializeExpressionToKryo(partitionExpression));
     hmsClient.dropPartitions(table.getDbName(), table.getTableName(), Arrays.asList(serializedPartitionExpression),
-        !isExternal(table),  // Delete data?
-        false,               // Ignore Protection?
-        ifExists,            // Fail if table doesn't exist?
-        false);              // Need results back?
+        deleteData && !isExternal(table),  // Delete data?
+        false,                             // Ignore Protection?
+        ifExists,                          // Fail if table doesn't exist?
+        false);                            // Need results back?
   }
 
   private void dropPartitionsIteratively(String dbName, String tableName,
-                                         Map<String, String> partitionSpec, boolean ifExists) throws HCatException, TException {
+                                         Map<String, String> partitionSpec, boolean ifExists, boolean deleteData)
+      throws HCatException, TException {
     LOG.info("HCatClient: Dropping partitions iteratively.");
     List<Partition> partitions = hmsClient.listPartitionsByFilter(dbName, tableName,
         getFilterString(partitionSpec), (short) -1);
     for (Partition partition : partitions) {
-      dropPartition(partition, ifExists);
+      dropPartition(partition, ifExists, deleteData);
     }
   }
 
   @Override
   public void dropPartitions(String dbName, String tableName,
-                 Map<String, String> partitionSpec, boolean ifExists)
+                 Map<String, String> partitionSpec, boolean ifExists, boolean deleteData)
     throws HCatException {
     LOG.info("HCatClient dropPartitions(db=" + dbName + ",table=" + tableName + ", partitionSpec: ["+ partitionSpec + "]).");
     try {
@@ -602,17 +610,17 @@ public class HCatClientHMSImpl extends H
 
       if (hiveConfig.getBoolVar(HiveConf.ConfVars.METASTORE_CLIENT_DROP_PARTITIONS_WITH_EXPRESSIONS)) {
         try {
-          dropPartitionsUsingExpressions(table, partitionSpec, ifExists);
+          dropPartitionsUsingExpressions(table, partitionSpec, ifExists, deleteData);
         }
         catch (SemanticException parseFailure) {
           LOG.warn("Could not push down partition-specification to back-end, for dropPartitions(). Resorting to iteration.",
               parseFailure);
-          dropPartitionsIteratively(dbName, tableName, partitionSpec, ifExists);
+          dropPartitionsIteratively(dbName, tableName, partitionSpec, ifExists, deleteData);
         }
       }
       else {
         // Not using expressions.
-        dropPartitionsIteratively(dbName, tableName, partitionSpec, ifExists);
+        dropPartitionsIteratively(dbName, tableName, partitionSpec, ifExists, deleteData);
       }
     } catch (NoSuchObjectException e) {
       throw new ObjectNotFoundException(
@@ -627,10 +635,16 @@ public class HCatClientHMSImpl extends H
     }
   }
 
-  private void dropPartition(Partition partition, boolean ifExists)
+  @Override
+  public void dropPartitions(String dbName, String tableName,
+                             Map<String, String> partitionSpec, boolean ifExists) throws HCatException {
+    dropPartitions(dbName, tableName, partitionSpec, ifExists, true);
+  }
+
+  private void dropPartition(Partition partition, boolean ifExists, boolean deleteData)
     throws HCatException, MetaException, TException {
     try {
-      hmsClient.dropPartition(partition.getDbName(), partition.getTableName(), partition.getValues());
+      hmsClient.dropPartition(partition.getDbName(), partition.getTableName(), partition.getValues(), deleteData);
     } catch (NoSuchObjectException e) {
       if (!ifExists) {
         throw new ObjectNotFoundException(
@@ -965,18 +979,27 @@ public class HCatClientHMSImpl extends H
   }
 
   @Override
+  public Iterator<ReplicationTask> getReplicationTasks(
+      long lastEventId, int maxEvents, String dbName, String tableName) throws HCatException {
+    return new HCatReplicationTaskIterator(this,lastEventId,maxEvents,dbName,tableName);
+  }
+
+  @Override
   public List<HCatNotificationEvent> getNextNotification(long lastEventId, int maxEvents,
                                                          IMetaStoreClient.NotificationFilter filter)
       throws HCatException {
     try {
-      List<HCatNotificationEvent> events = new ArrayList<HCatNotificationEvent>();
       NotificationEventResponse rsp = hmsClient.getNextNotification(lastEventId, maxEvents, filter);
       if (rsp != null && rsp.getEvents() != null) {
-        for (NotificationEvent event : rsp.getEvents()) {
-          events.add(new HCatNotificationEvent(event));
-        }
+        return Lists.transform(rsp.getEvents(), new Function<NotificationEvent, HCatNotificationEvent>() {
+          @Override
+          public HCatNotificationEvent apply(@Nullable NotificationEvent notificationEvent) {
+            return new HCatNotificationEvent(notificationEvent);
+          }
+        });
+      } else {
+        return new ArrayList<HCatNotificationEvent>();
       }
-      return events;
     } catch (TException e) {
       throw new ConnectionFailureException("TException while getting notifications", e);
     }

Modified: hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatNotificationEvent.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatNotificationEvent.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatNotificationEvent.java (original)
+++ hive/branches/llap/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatNotificationEvent.java Wed Apr 15 22:04:00 2015
@@ -32,6 +32,8 @@ public class HCatNotificationEvent {
   private String tableName;
   private String message;
 
+  public enum Scope { DB, TABLE, UNKNOWN };
+
   HCatNotificationEvent(NotificationEvent event) {
     eventId = event.getEventId();
     eventTime = event.getEventTime();
@@ -45,6 +47,20 @@ public class HCatNotificationEvent {
     return eventId;
   }
 
+  public Scope getEventScope() {
+    // Eventually, we want this to be a richer description of having
+    // a DB, TABLE, ROLE, etc scope. For now, we have a trivial impl
+    // of having only DB and TABLE scopes, as determined by whether
+    // or not the tableName is null.
+    if (dbName != null){
+      if (tableName != null){
+        return Scope.TABLE;
+      }
+      return Scope.DB;
+    }
+    return Scope.UNKNOWN;
+  }
+
   public int getEventTime() {
     return eventTime;
   }

Modified: hive/branches/llap/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java (original)
+++ hive/branches/llap/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java Wed Apr 15 22:04:00 2015
@@ -18,18 +18,24 @@
  */
 package org.apache.hive.hcatalog.api;
 
+import java.io.IOException;
 import java.math.BigInteger;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
 
+import com.google.common.base.Function;
+import com.google.common.collect.Iterables;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStore;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 import org.apache.hadoop.hive.ql.WindowsPathUtil;
 import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
@@ -42,12 +48,17 @@ import org.apache.hadoop.hive.ql.metadat
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe;
 import org.apache.hadoop.mapred.TextInputFormat;
+import org.apache.hive.hcatalog.api.repl.Command;
+import org.apache.hive.hcatalog.api.repl.ReplicationTask;
+import org.apache.hive.hcatalog.api.repl.ReplicationUtils;
+import org.apache.hive.hcatalog.api.repl.StagingDirectoryProvider;
 import org.apache.hive.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
 import org.apache.hive.hcatalog.common.HCatConstants;
 import org.apache.hive.hcatalog.common.HCatException;
 import org.apache.hive.hcatalog.data.schema.HCatFieldSchema;
 import org.apache.hive.hcatalog.data.schema.HCatFieldSchema.Type;
 import org.apache.hive.hcatalog.NoExitSecurityManager;
+import org.apache.hive.hcatalog.listener.DbNotificationListener;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -63,6 +74,8 @@ import static org.junit.Assert.assertArr
 
 import org.apache.hadoop.util.Shell;
 
+import javax.annotation.Nullable;
+
 public class TestHCatClient {
   private static final Logger LOG = LoggerFactory.getLogger(TestHCatClient.class);
   private static final String msPort = "20101";
@@ -71,6 +84,8 @@ public class TestHCatClient {
   private static final String replicationTargetHCatPort = "20102";
   private static HiveConf replicationTargetHCatConf;
   private static SecurityManager securityManager;
+  private static boolean useExternalMS = false;
+  private static boolean useExternalMSForReplication = false;
 
   private static class RunMS implements Runnable {
 
@@ -101,18 +116,28 @@ public class TestHCatClient {
 
   @AfterClass
   public static void tearDown() throws Exception {
-    LOG.info("Shutting down metastore.");
-    System.setSecurityManager(securityManager);
+    if (!useExternalMS) {
+      LOG.info("Shutting down metastore.");
+      System.setSecurityManager(securityManager);
+    }
   }
 
   @BeforeClass
   public static void startMetaStoreServer() throws Exception {
 
     hcatConf = new HiveConf(TestHCatClient.class);
+    String metastoreUri = System.getProperty(HiveConf.ConfVars.METASTOREURIS.varname);
+    if (metastoreUri != null) {
+      hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, metastoreUri);
+      useExternalMS = true;
+      return;
+    }
     if (Shell.WINDOWS) {
       WindowsPathUtil.convertPathsFromWindowsToHdfs(hcatConf);
     }
 
+    System.setProperty(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS.varname,
+        DbNotificationListener.class.getName()); // turn on db notification listener on metastore
     Thread t = new Thread(new RunMS(msPort));
     t.start();
     Thread.sleep(10000);
@@ -162,8 +187,12 @@ public class TestHCatClient {
     assertTrue(testDb.getProperties().size() == 0);
     String warehouseDir = System
       .getProperty("test.warehouse.dir", "/user/hive/warehouse");
-    String expectedDir = warehouseDir.replaceFirst("pfile:///", "pfile:/");
-    assertEquals(expectedDir + "/" + db + ".db", testDb.getLocation());
+    if (useExternalMS) {
+      assertTrue(testDb.getLocation().matches(".*" + "/" + db + ".db"));
+    } else {
+      String expectedDir = warehouseDir.replaceFirst("pfile:///", "pfile:/");
+      assertEquals(expectedDir + "/" + db + ".db", testDb.getLocation());
+    }
     ArrayList<HCatFieldSchema> cols = new ArrayList<HCatFieldSchema>();
     cols.add(new HCatFieldSchema("id", Type.INT, "id comment"));
     cols.add(new HCatFieldSchema("value", Type.STRING, "value comment"));
@@ -213,7 +242,7 @@ public class TestHCatClient {
     assertEquals("checking " + serdeConstants.SERIALIZATION_NULL_FORMAT, Character.toString('\006'),
       table2.getSerdeParams().get(serdeConstants.SERIALIZATION_NULL_FORMAT));
     
-    assertEquals((expectedDir + "/" + db + ".db/" + tableTwo).toLowerCase(), table2.getLocation().toLowerCase());
+    assertTrue(table2.getLocation().toLowerCase().matches(".*" + ("/" + db + ".db/" + tableTwo).toLowerCase()));
 
     HCatCreateTableDesc tableDesc3 = HCatCreateTableDesc.create(db,
       tableThree, cols).fileFormat("orcfile").build();
@@ -372,7 +401,7 @@ public class TestHCatClient {
       .ifNotExists(true).location("/tmp/" + dbName).build();
     client.createDatabase(dbDesc);
     HCatDatabase newDB = client.getDatabase(dbName);
-    assertTrue(newDB.getLocation().equalsIgnoreCase("file:/tmp/" + dbName));
+    assertTrue(newDB.getLocation().matches(".*/tmp/" + dbName));
     client.close();
   }
 
@@ -792,6 +821,113 @@ public class TestHCatClient {
     }
   }
 
+  /**
+   * Test for event-based replication scenario
+   *
+   * Does not test if replication actually happened, merely tests if we're able to consume a repl task
+   * iter appropriately, calling all the functions expected of the interface, without errors.
+   */
+  @Test
+  public void testReplicationTaskIter() throws Exception {
+
+    HCatClient sourceMetastore = HCatClient.create(new Configuration(hcatConf));
+
+    List<HCatNotificationEvent> notifs = sourceMetastore.getNextNotification(
+        0, 0, new IMetaStoreClient.NotificationFilter() {
+      @Override
+      public boolean accept(NotificationEvent event) {
+        return true;
+      }
+    });
+    for(HCatNotificationEvent n : notifs){
+      LOG.info("notif from dblistener:" + n.getEventId()
+          + ":" + n.getEventTime() + ",t:" + n.getEventType() + ",o:" + n.getDbName() + "." + n.getTableName());
+    }
+
+    Iterator<ReplicationTask> taskIter = sourceMetastore.getReplicationTasks(0, 0, "mydb", null);
+    while(taskIter.hasNext()){
+      ReplicationTask task = taskIter.next();
+      HCatNotificationEvent n = task.getEvent();
+      LOG.info("notif from tasks:" + n.getEventId()
+          + ":" + n.getEventTime() + ",t:" + n.getEventType() + ",o:" + n.getDbName() + "." + n.getTableName()
+          + ",s:" + n.getEventScope());
+      LOG.info("task :" + task.getClass().getName());
+      if (task.needsStagingDirs()){
+        StagingDirectoryProvider provider = new StagingDirectoryProvider() {
+          @Override
+          public String getStagingDirectory(String key) {
+            LOG.info("getStagingDirectory(" + key + ") called!");
+            return "/tmp/" + key.replaceAll(" ","_");
+          }
+        };
+        task
+            .withSrcStagingDirProvider(provider)
+            .withDstStagingDirProvider(provider);
+      }
+      if (task.isActionable()){
+        LOG.info("task was actionable!");
+        Function<Command, String> commandDebugPrinter = new Function<Command, String>() {
+          @Override
+          public String apply(@Nullable Command cmd) {
+            StringBuilder sb = new StringBuilder();
+            String serializedCmd = null;
+            try {
+              serializedCmd = ReplicationUtils.serializeCommand(cmd);
+            } catch (IOException e) {
+              e.printStackTrace();
+              throw new RuntimeException(e);
+            }
+            sb.append("SERIALIZED:"+serializedCmd+"\n");
+            Command command = null;
+            try {
+              command = ReplicationUtils.deserializeCommand(serializedCmd);
+            } catch (IOException e) {
+              e.printStackTrace();
+              throw new RuntimeException(e);
+            }
+            sb.append("CMD:[" + command.getClass().getName() + "]\n");
+            sb.append("EVENTID:[" +command.getEventId()+"]\n");
+            for (String s : command.get()) {
+              sb.append("CMD:" + s);
+              sb.append("\n");
+            }
+            sb.append("Retriable:" + command.isRetriable() + "\n");
+            sb.append("Undoable:" + command.isUndoable() + "\n");
+            if (command.isUndoable()) {
+              for (String s : command.getUndo()) {
+                sb.append("UNDO:" + s);
+                sb.append("\n");
+              }
+            }
+            List<String> locns = command.cleanupLocationsPerRetry();
+            sb.append("cleanupLocationsPerRetry entries :" + locns.size());
+            for (String s : locns){
+              sb.append("RETRY_CLEANUP:"+s);
+              sb.append("\n");
+            }
+            locns = command.cleanupLocationsAfterEvent();
+            sb.append("cleanupLocationsAfterEvent entries :" + locns.size());
+            for (String s : locns){
+              sb.append("AFTER_EVENT_CLEANUP:"+s);
+              sb.append("\n");
+            }
+            return sb.toString();
+          }
+        };
+        LOG.info("On src:");
+        for (String s : Iterables.transform(task.getSrcWhCommands(), commandDebugPrinter)){
+          LOG.info(s);
+        }
+        LOG.info("On dest:");
+        for (String s : Iterables.transform(task.getDstWhCommands(), commandDebugPrinter)){
+          LOG.info(s);
+        }
+      } else {
+        LOG.info("task was not actionable.");
+      }
+    }
+  }
+
   /**
    * Test for detecting schema-changes for an HCatalog table, across 2 different HCat instances.
    * A table is created with the same schema on 2 HCat instances. The table-schema is modified on the source HCat

Modified: hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java (original)
+++ hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java Wed Apr 15 22:04:00 2015
@@ -24,6 +24,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import com.google.common.collect.Lists;
 import junit.framework.TestCase;
 
 import org.apache.hadoop.hive.cli.CliSessionState;
@@ -296,7 +297,8 @@ public class TestMetaStoreEventListener
     AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
     assert partEvent.getStatus();
     Partition part = msc.getPartition("hive2038", "tmptbl", "b=2011");
-    validateAddPartition(part, partEvent.getPartitions().get(0));
+    Partition partAdded = partEvent.getPartitionIterator().next();
+    validateAddPartition(part, partAdded);
     validateTableInAddPartition(tbl, partEvent.getTable());
     validateAddPartition(part, prePartEvent.getPartitions().get(0));
 
@@ -313,11 +315,12 @@ public class TestMetaStoreEventListener
     hmsClient.add_partitions(Arrays.asList(partition1, partition2, partition3));
     ++listSize;
     AddPartitionEvent multiplePartitionEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
-    assertEquals("Unexpected number of partitions in event!", 3, multiplePartitionEvent.getPartitions().size());
     assertEquals("Unexpected table value.", table, multiplePartitionEvent.getTable());
-    assertEquals("Unexpected partition value.", partition1.getValues(), multiplePartitionEvent.getPartitions().get(0).getValues());
-    assertEquals("Unexpected partition value.", partition2.getValues(), multiplePartitionEvent.getPartitions().get(1).getValues());
-    assertEquals("Unexpected partition value.", partition3.getValues(), multiplePartitionEvent.getPartitions().get(2).getValues());
+    List<Partition> multiParts = Lists.newArrayList(multiplePartitionEvent.getPartitionIterator());
+    assertEquals("Unexpected number of partitions in event!", 3, multiParts.size());
+    assertEquals("Unexpected partition value.", partition1.getValues(), multiParts.get(0).getValues());
+    assertEquals("Unexpected partition value.", partition2.getValues(), multiParts.get(1).getValues());
+    assertEquals("Unexpected partition value.", partition3.getValues(), multiParts.get(2).getValues());
 
     driver.run(String.format("alter table %s touch partition (%s)", tblName, "b='2011'"));
     listSize++;
@@ -352,7 +355,8 @@ public class TestMetaStoreEventListener
 
     AddPartitionEvent appendPartEvent =
         (AddPartitionEvent)(notifyList.get(listSize-1));
-    validateAddPartition(newPart, appendPartEvent.getPartitions().get(0));
+    Partition partAppended = appendPartEvent.getPartitionIterator().next();
+    validateAddPartition(newPart, partAppended);
 
     PreAddPartitionEvent preAppendPartEvent =
         (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1));

Modified: hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java (original)
+++ hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java Wed Apr 15 22:04:00 2015
@@ -97,7 +97,7 @@ public class TestHiveAuthorizerCheckInvo
         + " (i int, j int, k string) partitioned by (city string, `date` string) ");
     runCmd("create database " + dbName);
     // Need a separate table for ACID testing since it has to be bucketed and it has to be Acid
-    runCmd("create table " + acidTableName + " (i int, j int) clustered by (i) into 2 buckets " +
+    runCmd("create table " + acidTableName + " (i int, j int, k int) clustered by (k) into 2 buckets " +
         "stored as orc TBLPROPERTIES ('transactional'='true')");
   }
 
@@ -273,7 +273,7 @@ public class TestHiveAuthorizerCheckInvo
     List<HivePrivilegeObject> inputs = io.getLeft();
     assertEquals(1, inputs.size());
     tableObj = inputs.get(0);
-    assertEquals(1, tableObj.getColumns().size());
+    assertEquals(2, tableObj.getColumns().size());
     assertEquals("j", tableObj.getColumns().get(0));
   }
 

Modified: hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java (original)
+++ hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java Wed Apr 15 22:04:00 2015
@@ -371,7 +371,7 @@ public class TestCompactor {
     executeStatementOnDriver("insert into " + tblName + " partition (ds) values (1, 'fred', " +
         "'today'), (2, 'wilma', 'yesterday')", driver);
 
-    executeStatementOnDriver("update " + tblName + " set a = 3", driver);
+    executeStatementOnDriver("update " + tblName + " set b = 'fred' where a = 1", driver);
 
     executeStatementOnDriver("delete from " + tblName + " where b = 'fred'", driver);
 

Modified: hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java (original)
+++ hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java Wed Apr 15 22:04:00 2015
@@ -50,6 +50,8 @@ import java.util.regex.Pattern;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
+import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.TableType;
@@ -106,7 +108,7 @@ public class TestJdbcDriver2 {
   public static void setUpBeforeClass() throws SQLException, ClassNotFoundException{
     Class.forName(driverName);
     Connection con1 = getConnection("default");
-    System.setProperty(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_VERBOSE.varname, "" + true);
+    System.setProperty(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "verbose");
 
     Statement stmt1 = con1.createStatement();
     assertNotNull("Statement is null", stmt1);
@@ -893,6 +895,54 @@ public class TestJdbcDriver2 {
     assertFalse(res.next());
   }
 
+  @Test
+  public void testIntervalTypes() throws Exception {
+    Statement stmt = con.createStatement();
+
+    // Since interval types not currently supported as table columns, need to create them
+    // as expressions.
+    ResultSet res = stmt.executeQuery(
+        "select case when c17 is null then null else interval '1' year end as col1,"
+        + " c17 -  c17 as col2 from " + dataTypeTableName + " order by col1");
+    ResultSetMetaData meta = res.getMetaData();
+
+    assertEquals("col1", meta.getColumnLabel(1));
+    assertEquals(java.sql.Types.OTHER, meta.getColumnType(1));
+    assertEquals("interval_year_month", meta.getColumnTypeName(1));
+    assertEquals(11, meta.getColumnDisplaySize(1));
+    assertEquals(11, meta.getPrecision(1));
+    assertEquals(0, meta.getScale(1));
+    assertEquals(HiveIntervalYearMonth.class.getName(), meta.getColumnClassName(1));
+
+    assertEquals("col2", meta.getColumnLabel(2));
+    assertEquals(java.sql.Types.OTHER, meta.getColumnType(2));
+    assertEquals("interval_day_time", meta.getColumnTypeName(2));
+    assertEquals(29, meta.getColumnDisplaySize(2));
+    assertEquals(29, meta.getPrecision(2));
+    assertEquals(0, meta.getScale(2));
+    assertEquals(HiveIntervalDayTime.class.getName(), meta.getColumnClassName(2));
+
+    // row 1 - results should be null
+    assertTrue(res.next());
+    // skip the last (partitioning) column since it is always non-null
+    for (int i = 1; i < meta.getColumnCount(); i++) {
+      assertNull("Column " + i + " should be null", res.getObject(i));
+    }
+
+    // row 2 - results should be null
+    assertTrue(res.next());
+    for (int i = 1; i < meta.getColumnCount(); i++) {
+      assertNull("Column " + i + " should be null", res.getObject(i));
+    }
+
+    // row 3
+    assertTrue(res.next());
+    assertEquals("1-0", res.getString(1));
+    assertEquals(1, ((HiveIntervalYearMonth) res.getObject(1)).getYears());
+    assertEquals("0 00:00:00.000000000", res.getString(2));
+    assertEquals(0, ((HiveIntervalDayTime) res.getObject(2)).getDays());
+  }
+
   private void doTestSelectAll(String tableName, int maxRows, int fetchSize) throws Exception {
     boolean isPartitionTable = tableName.equals(partitionedTableName);
 

Modified: hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java (original)
+++ hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java Wed Apr 15 22:04:00 2015
@@ -266,7 +266,7 @@ public class TestJdbcWithMiniHS2 {
 
     // Set some conf parameters
     String hiveConf = "hive.cli.print.header=true;hive.server2.async.exec.shutdown.timeout=20;"
-        + "hive.server2.async.exec.threads=30;hive.server2.thrift.http.max.worker.threads=15";
+        + "hive.server2.async.exec.threads=30;hive.server2.thrift.max.worker.threads=15";
     // Set some conf vars
     String hiveVar = "stab=salesTable;icol=customerID";
     String jdbcUri = miniHS2.getJdbcURL() + "?" + hiveConf + "#" + hiveVar;
@@ -284,7 +284,7 @@ public class TestJdbcWithMiniHS2 {
     verifyConfProperty(stmt, "hive.cli.print.header", "true");
     verifyConfProperty(stmt, "hive.server2.async.exec.shutdown.timeout", "20");
     verifyConfProperty(stmt, "hive.server2.async.exec.threads", "30");
-    verifyConfProperty(stmt, "hive.server2.thrift.http.max.worker.threads",
+    verifyConfProperty(stmt, "hive.server2.thrift.max.worker.threads",
         "15");
     verifyConfProperty(stmt, "stab", "salesTable");
     verifyConfProperty(stmt, "icol", "customerID");

Modified: hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java (original)
+++ hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java Wed Apr 15 22:04:00 2015
@@ -155,7 +155,7 @@ public class TestSSL {
         cause = cause.getCause();
       }
       Assert.assertEquals("org.apache.http.NoHttpResponseException", cause.getClass().getName());
-      Assert.assertEquals("The target server failed to respond", cause.getMessage());
+      Assert.assertTrue(cause.getMessage().contains("failed to respond"));
     }
     miniHS2.stop();
   }

Modified: hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIService.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIService.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIService.java (original)
+++ hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIService.java Wed Apr 15 22:04:00 2015
@@ -160,7 +160,7 @@ public class TestThriftHttpCLIService ex
     String httpUrl = transportMode + "://" + host + ":" + port +
         "/" + thriftHttpPath + "/";
     httpClient.addRequestInterceptor(
-        new HttpBasicAuthInterceptor(USERNAME, PASSWORD));
+        new HttpBasicAuthInterceptor(USERNAME, PASSWORD, null, null));
     return new THttpClient(httpUrl, httpClient);
   }
 

Modified: hive/branches/llap/itests/src/test/resources/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/branches/llap/itests/src/test/resources/testconfiguration.properties?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/itests/src/test/resources/testconfiguration.properties (original)
+++ hive/branches/llap/itests/src/test/resources/testconfiguration.properties Wed Apr 15 22:04:00 2015
@@ -29,6 +29,7 @@ minimr.query.files=auto_sortmerge_join_1
   list_bucket_dml_10.q,\
   load_fs2.q,\
   load_hdfs_file_with_space_in_the_name.q,\
+  non_native_window_udf.q, \
   optrstat_groupby.q,\
   parallel_orderby.q,\
   ql_rewrite_gbtoidx.q,\
@@ -180,9 +181,11 @@ minitez.query.files.shared=alter_merge_2
   update_where_non_partitioned.q,\
   update_where_partitioned.q,\
   update_two_cols.q,\
+  vector_aggregate_9.q,\
   vector_between_in.q,\
   vector_bucket.q,\
   vector_cast_constant.q,\
+  vector_char_2.q,\
   vector_char_4.q,\
   vector_char_mapjoin1.q,\
   vector_char_simple.q,\
@@ -190,6 +193,7 @@ minitez.query.files.shared=alter_merge_2
   vector_coalesce_2.q,\
   vector_count_distinct.q,\
   vector_data_types.q,\
+  vector_date_1.q,\
   vector_decimal_1.q,\
   vector_decimal_10_0.q,\
   vector_decimal_2.q,\
@@ -203,6 +207,8 @@ minitez.query.files.shared=alter_merge_2
   vector_decimal_mapjoin.q,\
   vector_decimal_math_funcs.q,\
   vector_decimal_precision.q,\
+  vector_decimal_round.q,\
+  vector_decimal_round_2.q,\
   vector_decimal_trailing.q,\
   vector_decimal_udf.q,\
   vector_decimal_udf2.q,\
@@ -210,8 +216,12 @@ minitez.query.files.shared=alter_merge_2
   vector_elt.q,\
   vector_groupby_3.q,\
   vector_groupby_reduce.q,\
+  vector_if_expr.q,\
+  vector_interval_1.q,\
+  vector_interval_2.q,\
   vector_left_outer_join.q,\
   vector_mapjoin_reduce.q,\
+  vector_multi_insert.q,\
   vector_non_string_partition.q,\
   vector_orderby_5.q,\
   vector_partition_diff_num_cols.q,\
@@ -287,6 +297,8 @@ minitez.query.files=bucket_map_join_tez1
   bucket_map_join_tez2.q,\
   dynamic_partition_pruning.q,\
   dynamic_partition_pruning_2.q,\
+  explainuser_1.q,\
+  explainuser_2.q,\
   hybridhashjoin.q,\
   mapjoin_decimal.q,\
   lvj_mapjoin.q,\
@@ -302,6 +314,7 @@ minitez.query.files=bucket_map_join_tez1
   tez_schema_evolution.q,\
   tez_union.q,\
   tez_union2.q,\
+  tez_union_view.q,\
   tez_union_decimal.q,\
   tez_union_group_by.q,\
   tez_smb_main.q,\

Modified: hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java (original)
+++ hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java Wed Apr 15 22:04:00 2015
@@ -44,6 +44,8 @@ import java.util.Calendar;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
+import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
 import org.apache.hive.service.cli.TableSchema;
 import org.apache.hive.service.cli.Type;
 
@@ -443,6 +445,10 @@ public abstract class HiveBaseResultSet
         return new BigDecimal((String)value);
       case DATE_TYPE:
         return Date.valueOf((String) value);
+      case INTERVAL_YEAR_MONTH_TYPE:
+        return HiveIntervalYearMonth.valueOf((String) value);
+      case INTERVAL_DAY_TIME_TYPE:
+        return HiveIntervalDayTime.valueOf((String) value);
       case ARRAY_TYPE:
       case MAP_TYPE:
       case STRUCT_TYPE:

Modified: hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java (original)
+++ hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java Wed Apr 15 22:04:00 2015
@@ -50,10 +50,11 @@ import java.util.concurrent.TimeUnit;
 import javax.security.sasl.Sasl;
 import javax.security.sasl.SaslException;
 
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.protocol.HttpContext;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hive.jdbc.Utils.JdbcConnectionParams;
 import org.apache.hive.service.auth.HiveAuthFactory;
 import org.apache.hive.service.auth.KerberosSaslHelper;
@@ -73,9 +74,17 @@ import org.apache.hive.service.cli.thrif
 import org.apache.hive.service.cli.thrift.TRenewDelegationTokenResp;
 import org.apache.hive.service.cli.thrift.TSessionHandle;
 import org.apache.http.HttpRequestInterceptor;
-import org.apache.http.conn.scheme.Scheme;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.CookieStore;
+import org.apache.http.client.ServiceUnavailableRetryStrategy;
+import org.apache.http.config.Registry;
+import org.apache.http.config.RegistryBuilder;
+import org.apache.http.conn.socket.ConnectionSocketFactory;
 import org.apache.http.conn.ssl.SSLSocketFactory;
-import org.apache.http.impl.client.DefaultHttpClient;
+import org.apache.http.impl.client.BasicCookieStore;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.impl.client.HttpClients;
+import org.apache.http.impl.conn.BasicHttpClientConnectionManager;
 import org.apache.thrift.TException;
 import org.apache.thrift.protocol.TBinaryProtocol;
 import org.apache.thrift.transport.THttpClient;
@@ -173,6 +182,7 @@ public class HiveConnection implements j
     supportedProtocols.add(TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V5);
     supportedProtocols.add(TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6);
     supportedProtocols.add(TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V7);
+    supportedProtocols.add(TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V8);
 
     // open client session
     openSession();
@@ -235,7 +245,7 @@ public class HiveConnection implements j
   }
 
   private TTransport createHttpTransport() throws SQLException, TTransportException {
-    DefaultHttpClient httpClient;
+    CloseableHttpClient httpClient;
     boolean useSsl = isSslConnection();
     // Create an http client from the configs
     httpClient = getHttpClient(useSsl);
@@ -259,35 +269,76 @@ public class HiveConnection implements j
     return transport;
   }
 
-  private DefaultHttpClient getHttpClient(Boolean useSsl) throws SQLException {
-    DefaultHttpClient httpClient = new DefaultHttpClient();
+  private CloseableHttpClient getHttpClient(Boolean useSsl) throws SQLException {
+    boolean isCookieEnabled = sessConfMap.get(JdbcConnectionParams.COOKIE_AUTH) == null ||
+      (!JdbcConnectionParams.COOKIE_AUTH_FALSE.equalsIgnoreCase(
+      sessConfMap.get(JdbcConnectionParams.COOKIE_AUTH)));
+    String cookieName = sessConfMap.get(JdbcConnectionParams.COOKIE_NAME) == null ?
+      JdbcConnectionParams.DEFAULT_COOKIE_NAMES_HS2 :
+      sessConfMap.get(JdbcConnectionParams.COOKIE_NAME);
+    CookieStore cookieStore = isCookieEnabled ? new BasicCookieStore() : null;
+    HttpClientBuilder httpClientBuilder;
     // Request interceptor for any request pre-processing logic
     HttpRequestInterceptor requestInterceptor;
-    // If Kerberos
+
+    // Configure http client for kerberos/password based authentication
     if (isKerberosAuthMode()) {
       /**
        * Add an interceptor which sets the appropriate header in the request.
        * It does the kerberos authentication and get the final service ticket,
        * for sending to the server before every request.
        * In https mode, the entire information is encrypted
-       * TODO: Optimize this with a mix of kerberos + using cookie.
        */
       requestInterceptor =
           new HttpKerberosRequestInterceptor(sessConfMap.get(JdbcConnectionParams.AUTH_PRINCIPAL),
-              host, getServerHttpUrl(useSsl), assumeSubject);
+              host, getServerHttpUrl(useSsl), assumeSubject, cookieStore, cookieName);
     }
     else {
       /**
        * Add an interceptor to pass username/password in the header.
        * In https mode, the entire information is encrypted
        */
-      requestInterceptor = new HttpBasicAuthInterceptor(getUserName(), getPassword());
+      requestInterceptor = new HttpBasicAuthInterceptor(getUserName(), getPassword(),
+                                                        cookieStore, cookieName);
+    }
+    // Configure http client for cookie based authentication
+    if (isCookieEnabled) {
+      // Create a http client with a retry mechanism when the server returns a status code of 401.
+      httpClientBuilder =
+      HttpClients.custom().setServiceUnavailableRetryStrategy(
+        new  ServiceUnavailableRetryStrategy() {
+
+      @Override
+      public boolean retryRequest(
+        final HttpResponse response,
+        final int executionCount,
+        final HttpContext context) {
+        int statusCode = response.getStatusLine().getStatusCode();
+        boolean ret = statusCode == 401 && executionCount <= 1;
+
+        // Set the context attribute to true which will be interpreted by the request interceptor
+        if (ret) {
+          context.setAttribute(Utils.HIVE_SERVER2_RETRY_KEY, Utils.HIVE_SERVER2_RETRY_TRUE);
+        }
+        return ret;
+      }
+
+      @Override
+      public long getRetryInterval() {
+        // Immediate retry
+        return 0;
+      }
+    });
+    } else {
+      httpClientBuilder = HttpClientBuilder.create();
     }
-    // Configure httpClient for SSL
+    // Add the request interceptor to the client builder
+    httpClientBuilder.addInterceptorFirst(requestInterceptor);
+    // Configure http client for SSL
     if (useSsl) {
       String sslTrustStorePath = sessConfMap.get(JdbcConnectionParams.SSL_TRUST_STORE);
       String sslTrustStorePassword = sessConfMap.get(
-          JdbcConnectionParams.SSL_TRUST_STORE_PASSWORD);
+        JdbcConnectionParams.SSL_TRUST_STORE_PASSWORD);
       KeyStore sslTrustStore;
       SSLSocketFactory socketFactory;
       /**
@@ -311,21 +362,25 @@ public class HiveConnection implements j
           // Pick trust store config from the given path
           sslTrustStore = KeyStore.getInstance(JdbcConnectionParams.SSL_TRUST_STORE_TYPE);
           sslTrustStore.load(new FileInputStream(sslTrustStorePath),
-              sslTrustStorePassword.toCharArray());
+            sslTrustStorePassword.toCharArray());
           socketFactory = new SSLSocketFactory(sslTrustStore);
         }
         socketFactory.setHostnameVerifier(SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
-        Scheme sslScheme = new Scheme("https", 443, socketFactory);
-        httpClient.getConnectionManager().getSchemeRegistry().register(sslScheme);
+
+        final Registry<ConnectionSocketFactory> registry =
+          RegistryBuilder.<ConnectionSocketFactory>create()
+          .register("https", socketFactory)
+          .build();
+
+        httpClientBuilder.setConnectionManager(new BasicHttpClientConnectionManager(registry));
       }
       catch (Exception e) {
         String msg =  "Could not create an https connection to " +
-            jdbcUriString + ". " + e.getMessage();
+          jdbcUriString + ". " + e.getMessage();
         throw new SQLException(msg, " 08S01", e);
       }
     }
-    httpClient.addRequestInterceptor(requestInterceptor);
-    return httpClient;
+    return httpClientBuilder.build();
   }
 
   /**

Modified: hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HiveResultSetMetaData.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HiveResultSetMetaData.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HiveResultSetMetaData.java (original)
+++ hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HiveResultSetMetaData.java Wed Apr 15 22:04:00 2015
@@ -21,6 +21,7 @@ package org.apache.hive.jdbc;
 import java.sql.ResultSetMetaData;
 import java.sql.SQLException;
 import java.util.List;
+import org.apache.hive.service.cli.Type;
 
 /**
  * HiveResultSetMetaData.
@@ -43,9 +44,13 @@ public class HiveResultSetMetaData imple
     throw new SQLException("Method not supported");
   }
 
+  private Type getHiveType(int column) throws SQLException {
+    return JdbcColumn.typeStringToHiveType(columnTypes.get(toZeroIndex(column)));
+  }
+
   public String getColumnClassName(int column) throws SQLException {
-    int columnType = getColumnType(column);
-    return JdbcColumn.columnClassName(columnType, columnAttributes.get(toZeroIndex(column)));
+    return JdbcColumn.columnClassName(getHiveType(column),
+        columnAttributes.get(toZeroIndex(column)));
   }
 
   public int getColumnCount() throws SQLException {
@@ -53,9 +58,8 @@ public class HiveResultSetMetaData imple
   }
 
   public int getColumnDisplaySize(int column) throws SQLException {
-    int columnType = getColumnType(column);
-
-    return JdbcColumn.columnDisplaySize(columnType, columnAttributes.get(toZeroIndex(column)));
+    return JdbcColumn.columnDisplaySize(getHiveType(column),
+        columnAttributes.get(toZeroIndex(column)));
   }
 
   public String getColumnLabel(int column) throws SQLException {
@@ -79,15 +83,13 @@ public class HiveResultSetMetaData imple
   }
 
   public int getPrecision(int column) throws SQLException {
-    int columnType = getColumnType(column);
-
-    return JdbcColumn.columnPrecision(columnType, columnAttributes.get(toZeroIndex(column)));
+    return JdbcColumn.columnPrecision(getHiveType(column),
+        columnAttributes.get(toZeroIndex(column)));
   }
 
   public int getScale(int column) throws SQLException {
-    int columnType = getColumnType(column);
-
-    return JdbcColumn.columnScale(columnType, columnAttributes.get(toZeroIndex(column)));
+    return JdbcColumn.columnScale(getHiveType(column),
+        columnAttributes.get(toZeroIndex(column)));
   }
 
   public String getSchemaName(int column) throws SQLException {

Modified: hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HttpBasicAuthInterceptor.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HttpBasicAuthInterceptor.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HttpBasicAuthInterceptor.java (original)
+++ hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HttpBasicAuthInterceptor.java Wed Apr 15 22:04:00 2015
@@ -25,6 +25,8 @@ import org.apache.http.HttpException;
 import org.apache.http.HttpRequest;
 import org.apache.http.HttpRequestInterceptor;
 import org.apache.http.auth.UsernamePasswordCredentials;
+import org.apache.http.client.CookieStore;
+import org.apache.http.client.protocol.ClientContext;
 import org.apache.http.impl.auth.AuthSchemeBase;
 import org.apache.http.impl.auth.BasicScheme;
 import org.apache.http.protocol.HttpContext;
@@ -37,20 +39,42 @@ import org.apache.http.protocol.HttpCont
 public class HttpBasicAuthInterceptor implements HttpRequestInterceptor {
   UsernamePasswordCredentials credentials;
   AuthSchemeBase authScheme;
+  CookieStore cookieStore;
+  boolean isCookieEnabled;
+  String cookieName;
 
-  public HttpBasicAuthInterceptor(String username, String password) {
+  public HttpBasicAuthInterceptor(String username, String password, CookieStore cookieStore,
+                           String cn) {
     if(username != null){
       credentials = new UsernamePasswordCredentials(username, password);
     }
     authScheme = new BasicScheme();
+    this.cookieStore = cookieStore;
+    isCookieEnabled = (cookieStore != null);
+    cookieName = cn;
   }
 
   @Override
   public void process(HttpRequest httpRequest, HttpContext httpContext)
       throws HttpException, IOException {
-    Header basicAuthHeader = authScheme.authenticate(
-        credentials, httpRequest, httpContext);
-    httpRequest.addHeader(basicAuthHeader);
+    if (isCookieEnabled) {
+      httpContext.setAttribute(ClientContext.COOKIE_STORE, cookieStore);
+    }
+    // Add the authentication details under the following scenarios:
+    // 1. Cookie Authentication is disabled OR
+    // 2. The first time when the request is sent OR
+    // 3. The server returns a 401, which sometimes means the cookie has expired
+    if (!isCookieEnabled || ((httpContext.getAttribute(Utils.HIVE_SERVER2_RETRY_KEY) == null &&
+        (cookieStore == null || (cookieStore != null &&
+        Utils.needToSendCredentials(cookieStore, cookieName)))) ||
+        (httpContext.getAttribute(Utils.HIVE_SERVER2_RETRY_KEY) != null &&
+         httpContext.getAttribute(Utils.HIVE_SERVER2_RETRY_KEY).
+         equals(Utils.HIVE_SERVER2_RETRY_TRUE)))) {
+      Header basicAuthHeader = authScheme.authenticate(credentials, httpRequest, httpContext);
+      httpRequest.addHeader(basicAuthHeader);
+    }
+    if (isCookieEnabled) {
+      httpContext.setAttribute(Utils.HIVE_SERVER2_RETRY_KEY, Utils.HIVE_SERVER2_RETRY_FALSE);
+    }
   }
-
 }

Modified: hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java (original)
+++ hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java Wed Apr 15 22:04:00 2015
@@ -25,6 +25,8 @@ import org.apache.hive.service.auth.Http
 import org.apache.http.HttpException;
 import org.apache.http.HttpRequest;
 import org.apache.http.HttpRequestInterceptor;
+import org.apache.http.client.CookieStore;
+import org.apache.http.client.protocol.ClientContext;
 import org.apache.http.protocol.HttpContext;
 
 /**
@@ -40,31 +42,59 @@ public class HttpKerberosRequestIntercep
   String host;
   String serverHttpUrl;
   boolean assumeSubject;
+  CookieStore cookieStore;
+  boolean isCookieEnabled;
+  String cookieName;
 
   // A fair reentrant lock
   private static ReentrantLock kerberosLock = new ReentrantLock(true);
 
   public HttpKerberosRequestInterceptor(String principal, String host,
-      String serverHttpUrl, boolean assumeSubject) {
+      String serverHttpUrl, boolean assumeSubject, CookieStore cs, String cn) {
     this.principal = principal;
     this.host = host;
     this.serverHttpUrl = serverHttpUrl;
     this.assumeSubject = assumeSubject;
+    this.cookieStore = cs;
+    isCookieEnabled = (cs != null);
+    cookieName = cn;
   }
 
   @Override
   public void process(HttpRequest httpRequest, HttpContext httpContext)
       throws HttpException, IOException {
     String kerberosAuthHeader;
+
     try {
       // Generate the service ticket for sending to the server.
       // Locking ensures the tokens are unique in case of concurrent requests
       kerberosLock.lock();
-      kerberosAuthHeader = HttpAuthUtils.getKerberosServiceTicket(
-          principal, host, serverHttpUrl, assumeSubject);
-      // Set the session key token (Base64 encoded) in the headers
-      httpRequest.addHeader(HttpAuthUtils.AUTHORIZATION + ": " +
-          HttpAuthUtils.NEGOTIATE + " ", kerberosAuthHeader);
+      // If cookie based authentication is allowed, generate ticket only when necessary.
+      // The necessary condition is either when there are no server side cookies in the
+      // cookiestore which can be send back or when the server returns a 401 error code
+      // indicating that the previous cookie has expired.
+      if (isCookieEnabled) {
+        httpContext.setAttribute(ClientContext.COOKIE_STORE, cookieStore);
+      }
+      // Generate the kerberos ticket under the following scenarios:
+      // 1. Cookie Authentication is disabled OR
+      // 2. The first time when the request is sent OR
+      // 3. The server returns a 401, which sometimes means the cookie has expired
+      if (!isCookieEnabled || ((httpContext.getAttribute(Utils.HIVE_SERVER2_RETRY_KEY) == null &&
+          (cookieStore == null || (cookieStore != null &&
+          Utils.needToSendCredentials(cookieStore, cookieName)))) ||
+          (httpContext.getAttribute(Utils.HIVE_SERVER2_RETRY_KEY) != null &&
+          httpContext.getAttribute(Utils.HIVE_SERVER2_RETRY_KEY).
+          equals(Utils.HIVE_SERVER2_RETRY_TRUE)))) {
+        kerberosAuthHeader = HttpAuthUtils.getKerberosServiceTicket(
+            principal, host, serverHttpUrl, assumeSubject);
+        // Set the session key token (Base64 encoded) in the headers
+        httpRequest.addHeader(HttpAuthUtils.AUTHORIZATION + ": " +
+            HttpAuthUtils.NEGOTIATE + " ", kerberosAuthHeader);
+      }
+      if (isCookieEnabled) {
+        httpContext.setAttribute(Utils.HIVE_SERVER2_RETRY_KEY, Utils.HIVE_SERVER2_RETRY_FALSE);
+      }
     } catch (Exception e) {
       throw new HttpException(e.getMessage(), e);
     }

Modified: hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/JdbcColumn.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/JdbcColumn.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/JdbcColumn.java (original)
+++ hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/JdbcColumn.java Wed Apr 15 22:04:00 2015
@@ -18,7 +18,10 @@
 
 package org.apache.hive.jdbc;
 
+import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
+import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
 import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hive.service.cli.Type;
 
 import java.math.BigInteger;
 import java.sql.Date;
@@ -64,10 +67,12 @@ public class JdbcColumn {
     return type;
   }
 
-  static String columnClassName(int columnType, JdbcColumnAttributes columnAttributes)
+  static String columnClassName(Type hiveType, JdbcColumnAttributes columnAttributes)
       throws SQLException {
-    // according to hiveTypeToSqlType possible options are:
+    int columnType = hiveTypeToSqlType(hiveType);
     switch(columnType) {
+      case Types.NULL:
+        return "null";
       case Types.BOOLEAN:
         return Boolean.class.getName();
       case Types.CHAR:
@@ -93,7 +98,17 @@ public class JdbcColumn {
         return BigInteger.class.getName();
       case Types.BINARY:
         return byte[].class.getName();
-      case Types.JAVA_OBJECT:
+      case Types.OTHER:
+      case Types.JAVA_OBJECT: {
+        switch (hiveType) {
+          case INTERVAL_YEAR_MONTH_TYPE:
+            return HiveIntervalYearMonth.class.getName();
+          case INTERVAL_DAY_TIME_TYPE:
+            return HiveIntervalDayTime.class.getName();
+          default:
+            return String.class.getName();
+        }
+      }
       case Types.ARRAY:
       case Types.STRUCT:
         return String.class.getName();
@@ -102,45 +117,61 @@ public class JdbcColumn {
     }
   }
 
-  public static int hiveTypeToSqlType(String type) throws SQLException {
+  static Type typeStringToHiveType(String type) throws SQLException {
     if ("string".equalsIgnoreCase(type)) {
-      return Types.VARCHAR;
+      return Type.STRING_TYPE;
     } else if ("varchar".equalsIgnoreCase(type)) {
-      return Types.VARCHAR;
+      return Type.VARCHAR_TYPE;
     } else if ("char".equalsIgnoreCase(type)) {
-      return Types.CHAR;
+      return Type.CHAR_TYPE;
     } else if ("float".equalsIgnoreCase(type)) {
-      return Types.FLOAT;
+      return Type.FLOAT_TYPE;
     } else if ("double".equalsIgnoreCase(type)) {
-      return Types.DOUBLE;
+      return Type.DOUBLE_TYPE;
     } else if ("boolean".equalsIgnoreCase(type)) {
-      return Types.BOOLEAN;
+      return Type.BOOLEAN_TYPE;
     } else if ("tinyint".equalsIgnoreCase(type)) {
-      return Types.TINYINT;
+      return Type.TINYINT_TYPE;
     } else if ("smallint".equalsIgnoreCase(type)) {
-      return Types.SMALLINT;
+      return Type.SMALLINT_TYPE;
     } else if ("int".equalsIgnoreCase(type)) {
-      return Types.INTEGER;
+      return Type.INT_TYPE;
     } else if ("bigint".equalsIgnoreCase(type)) {
-      return Types.BIGINT;
+      return Type.BIGINT_TYPE;
     } else if ("date".equalsIgnoreCase(type)) {
-      return Types.DATE;
+      return Type.DATE_TYPE;
     } else if ("timestamp".equalsIgnoreCase(type)) {
-      return Types.TIMESTAMP;
+      return Type.TIMESTAMP_TYPE;
+    } else if ("interval_year_month".equalsIgnoreCase(type)) {
+      return Type.INTERVAL_YEAR_MONTH_TYPE;
+    } else if ("interval_day_time".equalsIgnoreCase(type)) {
+      return Type.INTERVAL_DAY_TIME_TYPE;
     } else if ("decimal".equalsIgnoreCase(type)) {
-      return Types.DECIMAL;
+      return Type.DECIMAL_TYPE;
     } else if ("binary".equalsIgnoreCase(type)) {
-      return Types.BINARY;
+      return Type.BINARY_TYPE;
     } else if ("map".equalsIgnoreCase(type)) {
-      return Types.JAVA_OBJECT;
+      return Type.MAP_TYPE;
     } else if ("array".equalsIgnoreCase(type)) {
-      return Types.ARRAY;
+      return Type.ARRAY_TYPE;
     } else if ("struct".equalsIgnoreCase(type)) {
-      return Types.STRUCT;
+      return Type.STRUCT_TYPE;
     }
     throw new SQLException("Unrecognized column type: " + type);
   }
 
+  public static int hiveTypeToSqlType(Type hiveType) throws SQLException {
+    return hiveType.toJavaSQLType();
+  }
+
+  public static int hiveTypeToSqlType(String type) throws SQLException {
+    if ("void".equalsIgnoreCase(type) || "null".equalsIgnoreCase(type)) {
+      return Types.NULL;
+    } else {
+      return hiveTypeToSqlType(typeStringToHiveType(type));
+    }
+  }
+
   static String getColumnTypeName(String type) throws SQLException {
     // we need to convert the Hive type to the SQL type name
     // TODO: this would be better handled in an enum
@@ -168,11 +199,15 @@ public class JdbcColumn {
       return serdeConstants.TIMESTAMP_TYPE_NAME;
     } else if ("date".equalsIgnoreCase(type)) {
       return serdeConstants.DATE_TYPE_NAME;
+    } else if ("interval_year_month".equalsIgnoreCase(type)) {
+      return serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME;
+    } else if ("interval_day_time".equalsIgnoreCase(type)) {
+      return serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME;
     } else if ("decimal".equalsIgnoreCase(type)) {
       return serdeConstants.DECIMAL_TYPE_NAME;
     } else if ("binary".equalsIgnoreCase(type)) {
       return serdeConstants.BINARY_TYPE_NAME;
-    } else if ("void".equalsIgnoreCase(type)) {
+    } else if ("void".equalsIgnoreCase(type) || "null".equalsIgnoreCase(type)) {
       return serdeConstants.VOID_TYPE_NAME;
     } else if (type.equalsIgnoreCase("map")) {
       return serdeConstants.MAP_TYPE_NAME;
@@ -185,26 +220,27 @@ public class JdbcColumn {
     throw new SQLException("Unrecognized column type: " + type);
   }
 
-  static int columnDisplaySize(int columnType, JdbcColumnAttributes columnAttributes)
+  static int columnDisplaySize(Type hiveType, JdbcColumnAttributes columnAttributes)
       throws SQLException {
     // according to hiveTypeToSqlType possible options are:
+    int columnType = hiveTypeToSqlType(hiveType);
     switch(columnType) {
     case Types.BOOLEAN:
-      return columnPrecision(columnType, columnAttributes);
+      return columnPrecision(hiveType, columnAttributes);
     case Types.CHAR:
     case Types.VARCHAR:
-      return columnPrecision(columnType, columnAttributes);
+      return columnPrecision(hiveType, columnAttributes);
     case Types.BINARY:
       return Integer.MAX_VALUE; // hive has no max limit for binary
     case Types.TINYINT:
     case Types.SMALLINT:
     case Types.INTEGER:
     case Types.BIGINT:
-      return columnPrecision(columnType, columnAttributes) + 1; // allow +/-
+      return columnPrecision(hiveType, columnAttributes) + 1; // allow +/-
     case Types.DATE:
       return 10;
     case Types.TIMESTAMP:
-      return columnPrecision(columnType, columnAttributes);
+      return columnPrecision(hiveType, columnAttributes);
 
     // see http://download.oracle.com/javase/6/docs/api/constant-values.html#java.lang.Float.MAX_EXPONENT
     case Types.FLOAT:
@@ -213,8 +249,10 @@ public class JdbcColumn {
     case Types.DOUBLE:
       return 25; // e.g. -(17#).e-####
     case Types.DECIMAL:
-      return columnPrecision(columnType, columnAttributes) + 2;  // '-' sign and '.'
+      return columnPrecision(hiveType, columnAttributes) + 2;  // '-' sign and '.'
+    case Types.OTHER:
     case Types.JAVA_OBJECT:
+      return columnPrecision(hiveType, columnAttributes);
     case Types.ARRAY:
     case Types.STRUCT:
       return Integer.MAX_VALUE;
@@ -223,8 +261,9 @@ public class JdbcColumn {
     }
   }
 
-  static int columnPrecision(int columnType, JdbcColumnAttributes columnAttributes)
+  static int columnPrecision(Type hiveType, JdbcColumnAttributes columnAttributes)
       throws SQLException {
+    int columnType = hiveTypeToSqlType(hiveType);
     // according to hiveTypeToSqlType possible options are:
     switch(columnType) {
     case Types.BOOLEAN:
@@ -255,7 +294,19 @@ public class JdbcColumn {
       return 29;
     case Types.DECIMAL:
       return columnAttributes.precision;
-    case Types.JAVA_OBJECT:
+    case Types.OTHER:
+    case Types.JAVA_OBJECT: {
+      switch (hiveType) {
+        case INTERVAL_YEAR_MONTH_TYPE:
+          // -yyyyyyy-mm  : should be more than enough
+          return 11;
+        case INTERVAL_DAY_TIME_TYPE:
+          // -ddddddddd hh:mm:ss.nnnnnnnnn
+          return 29;
+        default:
+          return Integer.MAX_VALUE;
+      }
+    }
     case Types.ARRAY:
     case Types.STRUCT:
       return Integer.MAX_VALUE;
@@ -264,8 +315,9 @@ public class JdbcColumn {
     }
   }
 
-  static int columnScale(int columnType, JdbcColumnAttributes columnAttributes)
+  static int columnScale(Type hiveType, JdbcColumnAttributes columnAttributes)
       throws SQLException {
+    int columnType = hiveTypeToSqlType(hiveType);
     // according to hiveTypeToSqlType possible options are:
     switch(columnType) {
     case Types.BOOLEAN:
@@ -286,6 +338,7 @@ public class JdbcColumn {
       return 9;
     case Types.DECIMAL:
       return columnAttributes.scale;
+    case Types.OTHER:
     case Types.JAVA_OBJECT:
     case Types.ARRAY:
     case Types.STRUCT:

Modified: hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/Utils.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/Utils.java?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/Utils.java (original)
+++ hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/Utils.java Wed Apr 15 22:04:00 2015
@@ -34,6 +34,8 @@ import org.apache.commons.logging.LogFac
 import org.apache.hive.service.cli.HiveSQLException;
 import org.apache.hive.service.cli.thrift.TStatus;
 import org.apache.hive.service.cli.thrift.TStatusCode;
+import org.apache.http.client.CookieStore;
+import org.apache.http.cookie.Cookie;
 
 public class Utils {
   public static final Log LOG = LogFactory.getLog(Utils.class.getName());
@@ -56,6 +58,11 @@ public class Utils {
 
   private static final String URI_HIVE_PREFIX = "hive2:";
 
+  // This value is set to true by the setServiceUnavailableRetryStrategy() when the server returns 401
+  static final String HIVE_SERVER2_RETRY_KEY = "hive.server2.retryserver";
+  static final String HIVE_SERVER2_RETRY_TRUE = "true";
+  static final String HIVE_SERVER2_RETRY_FALSE = "false";
+
   public static class JdbcConnectionParams {
     // Note on client side parameter naming convention:
     // Prefer using a shorter camelCase param name instead of using the same name as the
@@ -98,6 +105,11 @@ public class Utils {
     // Default namespace value on ZooKeeper.
     // This value is used if the param "zooKeeperNamespace" is not specified in the JDBC Uri.
     static final String ZOOKEEPER_DEFAULT_NAMESPACE = "hiveserver2";
+    static final String COOKIE_AUTH = "cookieAuth";
+    static final String COOKIE_AUTH_FALSE = "false";
+    static final String COOKIE_NAME = "cookieName";
+    // The default value of the cookie name when CookieAuth=true
+    static final String DEFAULT_COOKIE_NAMES_HS2 = "hive.server2.auth";
 
     // Non-configurable params:
     // Currently supports JKS keystore format
@@ -560,4 +572,28 @@ public class Utils {
     }
     return version;
   }
+
+  /**
+   * The function iterates through the list of cookies in the cookiestore and tries to
+   * match them with the cookieName. If there is a match, the cookieStore already
+   * has a valid cookie and the client need not send Credentials for validation purpose.
+   * @param cookieStore The cookie Store
+   * @param cookieName Name of the cookie which needs to be validated
+   * @return true or false based on whether the client needs to send the credentials or
+   * not to the server.
+   */
+  static boolean needToSendCredentials(CookieStore cookieStore, String cookieName) {
+    if (cookieName == null || cookieStore == null) {
+      return true;
+    }
+
+    List<Cookie> cookies = cookieStore.getCookies();
+
+    for (Cookie c : cookies) {
+      if (c.getName().equals(cookieName)) {
+        return false;
+      }
+    }
+    return true;
+  }
 }

Modified: hive/branches/llap/metastore/if/hive_metastore.thrift
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/if/hive_metastore.thrift?rev=1673969&r1=1673968&r2=1673969&view=diff
==============================================================================
--- hive/branches/llap/metastore/if/hive_metastore.thrift (original)
+++ hive/branches/llap/metastore/if/hive_metastore.thrift Wed Apr 15 22:04:00 2015
@@ -368,13 +368,25 @@ struct DecimalColumnStatsData {
 4: required i64 numDVs
 }
 
+struct Date {
+1: required i64 daysSinceEpoch
+}
+
+struct DateColumnStatsData {
+1: optional Date lowValue,
+2: optional Date highValue,
+3: required i64 numNulls,
+4: required i64 numDVs
+}
+
 union ColumnStatisticsData {
 1: BooleanColumnStatsData booleanStats,
 2: LongColumnStatsData longStats,
 3: DoubleColumnStatsData doubleStats,
 4: StringColumnStatsData stringStats,
 5: BinaryColumnStatsData binaryStats,
-6: DecimalColumnStatsData decimalStats
+6: DecimalColumnStatsData decimalStats,
+7: DateColumnStatsData dateStats
 }
 
 struct ColumnStatisticsObj {



Mime
View raw message