hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hashut...@apache.org
Subject svn commit: r1526201 [8/27] - in /hive/branches/vectorization: ./ beeline/src/java/org/apache/hive/beeline/ beeline/src/test/org/apache/hive/beeline/src/test/ bin/ bin/ext/ common/src/java/org/apache/hadoop/hive/conf/ contrib/src/test/results/clientpos...
Date Wed, 25 Sep 2013 15:05:01 GMT
Modified: hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java?rev=1526201&r1=1526200&r2=1526201&view=diff
==============================================================================
--- hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java (original)
+++ hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java Wed Sep 25 15:04:49 2013
@@ -43,7 +43,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
-import org.apache.hadoop.hive.metastore.parser.FilterParser;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LogicalOperator;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator;
@@ -78,6 +78,9 @@ class MetaStoreDirectSql {
    */
   public List<Partition> getPartitionsViaSqlFilter(
       String dbName, String tblName, List<String> partNames, Integer max) throws MetaException {
+    if (partNames.isEmpty()) {
+      return new ArrayList<Partition>();
+    }
     String list = repeat(",?", partNames.size()).substring(1);
     return getPartitionsViaSqlFilterInternal(dbName, tblName, null,
         "and PARTITIONS.PART_NAME in (" + list + ")", partNames, new ArrayList<String>(), max);
@@ -86,15 +89,18 @@ class MetaStoreDirectSql {
   /**
    * Gets partitions by using direct SQL queries.
    * @param table The table.
-   * @param parser The parsed filter from which the SQL filter will be generated.
+   * @param tree The expression tree from which the SQL filter will be derived.
    * @param max The maximum number of partitions to return.
-   * @return List of partitions.
+   * @return List of partitions. Null if SQL filter cannot be derived.
    */
   public List<Partition> getPartitionsViaSqlFilter(
-      Table table, FilterParser parser, Integer max) throws MetaException {
+      Table table, ExpressionTree tree, Integer max) throws MetaException {
+    assert tree != null;
     List<String> params = new ArrayList<String>(), joins = new ArrayList<String>();
-    String sqlFilter = (parser == null) ? null
-        : PartitionFilterGenerator.generateSqlFilter(table, parser.tree, params, joins);
+    String sqlFilter = PartitionFilterGenerator.generateSqlFilter(table, tree, params, joins);
+    if (sqlFilter == null) {
+      return null; // Cannot make SQL filter to push down.
+    }
     return getPartitionsViaSqlFilterInternal(table.getDbName(), table.getTableName(),
         isViewTable(table), sqlFilter, params, joins, max);
   }
@@ -135,12 +141,12 @@ class MetaStoreDirectSql {
    * @param tblName Metastore table name.
    * @param isView Whether table is a view. Can be passed as null if not immediately
    *               known, then this method will get it only if necessary.
-   * @param sqlFilter SQL filter to use. Better be SQL92-compliant. Can be null.
+   * @param sqlFilter SQL filter to use. Better be SQL92-compliant.
    * @param paramsForFilter params for ?-s in SQL filter text. Params must be in order.
    * @param joinsForFilter if the filter needs additional join statement, they must be in
    *                       this list. Better be SQL92-compliant.
    * @param max The maximum number of partitions to return.
-   * @return List of partition objects. FieldSchema is currently not populated.
+   * @return List of partition objects.
    */
   private List<Partition> getPartitionsViaSqlFilterInternal(String dbName, String tblName,
       Boolean isView, String sqlFilter, List<String> paramsForFilter,
@@ -163,8 +169,8 @@ class MetaStoreDirectSql {
         "select PARTITIONS.PART_ID from PARTITIONS"
       + "  inner join TBLS on PARTITIONS.TBL_ID = TBLS.TBL_ID "
       + "  inner join DBS on TBLS.DB_ID = DBS.DB_ID "
-      + join(joinsForFilter, ' ') + " where TBLS.TBL_NAME = ? and DBS.NAME = ?"
-      + ((sqlFilter == null) ? "" : " " + sqlFilter) + orderForFilter;
+      + join(joinsForFilter, ' ') + " where TBLS.TBL_NAME = ? and DBS.NAME = ? "
+      + (sqlFilter == null ? "" : sqlFilter) + orderForFilter;
     Object[] params = new Object[paramsForFilter.size() + 2];
     params[0] = tblName;
     params[1] = dbName;
@@ -539,17 +545,18 @@ class MetaStoreDirectSql {
     return rv;
   }
 
-  private static class PartitionFilterGenerator implements TreeVisitor {
+  private static class PartitionFilterGenerator extends TreeVisitor {
     private final Table table;
-    private final StringBuilder filterBuffer;
+    private final FilterBuilder filterBuffer;
     private final List<String> params;
     private final List<String> joins;
 
-    private PartitionFilterGenerator(Table table, List<String> params, List<String> joins) {
+    private PartitionFilterGenerator(
+        Table table, List<String> params, List<String> joins) {
       this.table = table;
       this.params = params;
       this.joins = joins;
-      this.filterBuffer = new StringBuilder();
+      this.filterBuffer = new FilterBuilder(false);
     }
 
     /**
@@ -566,37 +573,53 @@ class MetaStoreDirectSql {
         return "";
       }
       PartitionFilterGenerator visitor = new PartitionFilterGenerator(table, params, joins);
-      tree.getRoot().accept(visitor);
+      tree.accept(visitor);
+      if (visitor.filterBuffer.hasError()) {
+        LOG.info("Unable to push down SQL filter: " + visitor.filterBuffer.getErrorMessage());
+        return null;
+      }
+
       // Some joins might be null (see processNode for LeafNode), clean them up.
       for (int i = 0; i < joins.size(); ++i) {
         if (joins.get(i) != null) continue;
         joins.remove(i--);
       }
-      return "and (" + visitor.filterBuffer.toString() + ")";
+      return "and (" + visitor.filterBuffer.getFilter() + ")";
+    }
+
+    @Override
+    protected void beginTreeNode(TreeNode node) throws MetaException {
+      filterBuffer.append(" (");
     }
 
     @Override
-    public void visit(TreeNode node) throws MetaException {
-      assert node != null && node.getLhs() != null && node.getRhs() != null;
-      filterBuffer.append (" (");
-      node.getLhs().accept(this);
+    protected void midTreeNode(TreeNode node) throws MetaException {
       filterBuffer.append((node.getAndOr() == LogicalOperator.AND) ? " and " : " or ");
-      node.getRhs().accept(this);
-      filterBuffer.append (") ");
+    }
+
+    @Override
+    protected void endTreeNode(TreeNode node) throws MetaException {
+      filterBuffer.append(") ");
+    }
+
+    @Override
+    protected boolean shouldStop() {
+      return filterBuffer.hasError();
     }
 
     @Override
     public void visit(LeafNode node) throws MetaException {
       if (node.operator == Operator.LIKE) {
-        // ANSI92 supports || for concatenation (we need to concat '%'-s to the parameter),
-        // but it doesn't work on all RDBMSes, e.g. on MySQL by default. So don't use it for now.
-        throw new MetaException("LIKE is not supported for SQL filter pushdown");
+        filterBuffer.setError("LIKE is not supported for SQL filter pushdown");
+        return;
       }
       int partColCount = table.getPartitionKeys().size();
-      int partColIndex = node.getPartColIndexForFilter(table);
+      int partColIndex = node.getPartColIndexForFilter(table, filterBuffer);
+      if (filterBuffer.hasError()) return;
 
-      String valueAsString = node.getFilterPushdownParam(table, partColIndex);
       // Add parameters linearly; we are traversing leaf nodes LTR, so they would match correctly.
+      String valueAsString = node.getFilterPushdownParam(table, partColIndex, filterBuffer);
+      if (filterBuffer.hasError()) return;
       params.add(valueAsString);
 
       if (joins.isEmpty()) {

Modified: hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=1526201&r1=1526200&r2=1526201&view=diff
==============================================================================
--- hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Wed Sep 25 15:04:49 2013
@@ -55,7 +55,6 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.Deserializer;
 import org.apache.hadoop.hive.serde2.SerDeException;
-import org.apache.hadoop.hive.serde2.SerDeUtils;
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
@@ -66,6 +65,7 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge;
+import org.apache.hadoop.util.ReflectionUtils;
 
 public class MetaStoreUtils {
 
@@ -174,11 +174,10 @@ public class MetaStoreUtils {
    */
   static public Deserializer getDeserializer(Configuration conf,
       Properties schema) throws MetaException {
-    String lib = schema
-        .getProperty(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_LIB);
     try {
-      Deserializer deserializer = SerDeUtils.lookupDeserializer(lib);
-      (deserializer).initialize(conf, schema);
+      Deserializer deserializer = ReflectionUtils.newInstance(conf.getClassByName(
+      schema.getProperty(serdeConstants.SERIALIZATION_LIB)).asSubclass(Deserializer.class), conf);
+      deserializer.initialize(conf, schema);
       return deserializer;
     } catch (Exception e) {
       LOG.error("error in initSerDe: " + e.getClass().getName() + " "
@@ -214,7 +213,8 @@ public class MetaStoreUtils {
       return null;
     }
     try {
-      Deserializer deserializer = SerDeUtils.lookupDeserializer(lib);
+      Deserializer deserializer = ReflectionUtils.newInstance(conf.getClassByName(lib).
+        asSubclass(Deserializer.class), conf);
       deserializer.initialize(conf, MetaStoreUtils.getTableMetadata(table));
       return deserializer;
     } catch (RuntimeException e) {
@@ -250,7 +250,8 @@ public class MetaStoreUtils {
       org.apache.hadoop.hive.metastore.api.Table table) throws MetaException {
     String lib = part.getSd().getSerdeInfo().getSerializationLib();
     try {
-      Deserializer deserializer = SerDeUtils.lookupDeserializer(lib);
+      Deserializer deserializer = ReflectionUtils.newInstance(conf.getClassByName(lib).
+        asSubclass(Deserializer.class), conf);
       deserializer.initialize(conf, MetaStoreUtils.getPartitionMetadata(part, table));
       return deserializer;
     } catch (RuntimeException e) {

Modified: hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1526201&r1=1526200&r2=1526201&view=diff
==============================================================================
--- hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Wed Sep 25 15:04:49 2013
@@ -28,6 +28,7 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -55,6 +56,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -120,10 +122,17 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
 import org.apache.hadoop.hive.metastore.model.MType;
 import org.apache.hadoop.hive.metastore.model.MVersionTable;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.ANTLRNoCaseStringStream;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor;
 import org.apache.hadoop.hive.metastore.parser.FilterLexer;
 import org.apache.hadoop.hive.metastore.parser.FilterParser;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.thrift.TException;
 import org.datanucleus.store.rdbms.exceptions.MissingTableException;
 
 /**
@@ -161,6 +170,7 @@ public class ObjectStore implements RawS
   private boolean isInitialized = false;
   private PersistenceManager pm = null;
   private MetaStoreDirectSql directSql = null;
+  private PartitionExpressionProxy expressionProxy = null;
   private Configuration hiveConf;
   int openTrasactionCalls = 0;
   private Transaction currentTransaction = null;
@@ -202,6 +212,7 @@ public class ObjectStore implements RawS
       // most recent instance of the pmf
       pm = null;
       directSql = null;
+      expressionProxy = null;
       openTrasactionCalls = 0;
       currentTransaction = null;
       transactionStatus = TXN_STATUS.NO_STATE;
@@ -234,9 +245,30 @@ public class ObjectStore implements RawS
     pm = getPersistenceManager();
     isInitialized = pm != null;
     if (isInitialized) {
+      expressionProxy = createExpressionProxy(hiveConf);
       directSql = new MetaStoreDirectSql(pm);
     }
-    return;
+  }
+
+  /**
+   * Creates the proxy used to evaluate expressions. This is here to prevent circular
+   * dependency - ql -&gt; metastore client &lt;-&gt metastore server -&gt ql. If server and
+   * client are split, this can be removed.
+   * @param conf Configuration.
+   * @return The partition expression proxy.
+   */
+  private static PartitionExpressionProxy createExpressionProxy(Configuration conf) {
+    String className = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS);
+    try {
+      @SuppressWarnings("unchecked")
+      Class<? extends PartitionExpressionProxy> clazz =
+          (Class<? extends PartitionExpressionProxy>)MetaStoreUtils.getClass(className);
+      return (PartitionExpressionProxy)MetaStoreUtils.newInstance(
+          clazz, new Class<?>[0], new Object[0]);
+    } catch (MetaException e) {
+      LOG.error("Error loading PartitionExpressionProxy", e);
+      throw new RuntimeException("Error loading PartitionExpressionProxy: " + e.getMessage());
+    }
   }
 
   /**
@@ -1493,13 +1525,22 @@ public class ObjectStore implements RawS
   }
 
 
-  private List<Partition> convertToParts(List<MPartition> mparts)
+  private List<Partition> convertToParts(List<MPartition> mparts) throws MetaException {
+    return convertToParts(mparts, null);
+  }
+
+  private List<Partition> convertToParts(List<MPartition> src, List<Partition> dest)
       throws MetaException {
-    List<Partition> parts = new ArrayList<Partition>(mparts.size());
-    for (MPartition mp : mparts) {
-      parts.add(convertToPart(mp));
+    if (src == null) {
+      return dest;
     }
-    return parts;
+    if (dest == null) {
+      dest = new ArrayList<Partition>(src.size());
+    }
+    for (MPartition mp : src) {
+      dest.add(convertToPart(mp));
+    }
+    return dest;
   }
 
   private List<Partition> convertToParts(String dbName, String tblName, List<MPartition> mparts)
@@ -1514,27 +1555,12 @@ public class ObjectStore implements RawS
   // TODO:pc implement max
   public List<String> listPartitionNames(String dbName, String tableName,
       short max) throws MetaException {
-    List<String> pns = new ArrayList<String>();
+    List<String> pns = null;
     boolean success = false;
     try {
       openTransaction();
       LOG.debug("Executing getPartitionNames");
-      dbName = dbName.toLowerCase().trim();
-      tableName = tableName.toLowerCase().trim();
-      Query q = pm.newQuery(
-          "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition "
-          + "where table.database.name == t1 && table.tableName == t2 "
-          + "order by partitionName asc");
-      q.declareParameters("java.lang.String t1, java.lang.String t2");
-      q.setResult("partitionName");
-
-      if(max > 0) {
-        q.setRange(0, max);
-      }
-      Collection names = (Collection) q.execute(dbName, tableName);
-      for (Iterator i = names.iterator(); i.hasNext();) {
-        pns.add((String) i.next());
-      }
+      pns = getPartitionNamesNoTxn(dbName, tableName, max);
       success = commitTransaction();
     } finally {
       if (!success) {
@@ -1544,6 +1570,27 @@ public class ObjectStore implements RawS
     return pns;
   }
 
+  private List<String> getPartitionNamesNoTxn(String dbName, String tableName, short max) {
+    List<String> pns = new ArrayList<String>();
+    dbName = dbName.toLowerCase().trim();
+    tableName = tableName.toLowerCase().trim();
+    Query q = pm.newQuery(
+        "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition "
+        + "where table.database.name == t1 && table.tableName == t2 "
+        + "order by partitionName asc");
+    q.declareParameters("java.lang.String t1, java.lang.String t2");
+    q.setResult("partitionName");
+
+    if(max > 0) {
+      q.setRange(0, max);
+    }
+    Collection names = (Collection) q.execute(dbName, tableName);
+    for (Iterator i = names.iterator(); i.hasNext();) {
+      pns.add((String) i.next());
+    }
+    return pns;
+  }
+
   /**
    * Retrieves a Collection of partition-related results from the database that match
    *  the partial specification given for a specific table.
@@ -1704,6 +1751,8 @@ public class ObjectStore implements RawS
       List<String> partNames, boolean allowSql, boolean allowJdo)
           throws MetaException, NoSuchObjectException {
     assert allowSql || allowJdo;
+    dbName = dbName.toLowerCase();
+    tblName = tblName.toLowerCase();
     boolean doTrace = LOG.isDebugEnabled();
     boolean doUseDirectSql = canUseDirectSql(allowSql);
 
@@ -1723,19 +1772,224 @@ public class ObjectStore implements RawS
       }
 
       if (!doUseDirectSql) {
-        results = getPartitionsViaOrm(dbName, tblName, partNames);
+        results = getPartitionsViaOrmFilter(dbName, tblName, partNames);
       }
       success = commitTransaction();
       if (doTrace) {
         LOG.debug(results.size() + " partition retrieved using " + (doUseDirectSql ? "SQL" : "ORM")
             + " in " + ((System.nanoTime() - start) / 1000000.0) + "ms");
       }
-      return results;
     } finally {
       if (!success) {
         rollbackTransaction();
       }
     }
+    return results;
+  }
+
+  @Override
+  public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr,
+      String defaultPartitionName, short maxParts, Set<Partition> result) throws TException {
+    return getPartitionsByExprInternal(
+        dbName, tblName, expr, defaultPartitionName, maxParts, result, true, true);
+  }
+
+  protected boolean getPartitionsByExprInternal(String dbName, String tblName,
+      byte[] expr, String defaultPartitionName, short maxParts, Set<Partition> result,
+      boolean allowSql, boolean allowJdo) throws TException {
+    assert allowSql || allowJdo;
+    assert result != null;
+    dbName = dbName.toLowerCase();
+    tblName = tblName.toLowerCase();
+
+    // We will try pushdown first, so make the filter. This will also validate the expression,
+    // if serialization fails we will throw incompatible metastore error to the client.
+    String filter = null;
+    try {
+      filter = expressionProxy.convertExprToFilter(expr);
+    } catch (MetaException ex) {
+      throw new IMetaStoreClient.IncompatibleMetastoreException(ex.getMessage());
+    }
+
+    // Make a tree out of the filter.
+    // TODO: this is all pretty ugly. The only reason we need all these transformations
+    //       is to maintain support for simple filters for HCat users that query metastore.
+    //       If forcing everyone to use thick client is out of the question, maybe we could
+    //       parse the filter into standard hive expressions and not all this separate tree
+    //       Filter.g stuff. That way this method and ...ByFilter would just be merged.
+    ExpressionTree exprTree = makeExpressionTree(filter);
+
+    boolean doUseDirectSql = allowSql && isDirectSqlEnabled(maxParts);
+    boolean doTrace = LOG.isDebugEnabled();
+    List<Partition> partitions = null;
+    boolean hasUnknownPartitions = false;
+    boolean success = false;
+    try {
+      long start = doTrace ? System.nanoTime() : 0;
+      openTransaction();
+      Table table = ensureGetTable(dbName, tblName);
+      if (doUseDirectSql) {
+        try {
+          if (exprTree != null) {
+            // We have some sort of expression tree, try SQL filter pushdown.
+            partitions = directSql.getPartitionsViaSqlFilter(table, exprTree, null);
+          }
+          if (partitions == null) {
+            // We couldn't do SQL filter pushdown. Get names via normal means.
+            List<String> partNames = new LinkedList<String>();
+            hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn(
+                table, expr, defaultPartitionName, maxParts, partNames);
+            partitions = directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames, null);
+          }
+        } catch (Exception ex) {
+          handleDirectSqlError(allowJdo, ex);
+          doUseDirectSql = false;
+          table = ensureGetTable(dbName, tblName); // Get again, detached on rollback.
+        }
+      }
+
+      if (!doUseDirectSql) {
+        assert partitions == null;
+        if (exprTree != null) {
+          // We have some sort of expression tree, try JDOQL filter pushdown.
+          partitions = getPartitionsViaOrmFilter(table, exprTree, maxParts, false);
+        }
+        if (partitions == null) {
+          // We couldn't do JDOQL filter pushdown. Get names via normal means.
+          List<String> partNames = new ArrayList<String>();
+          hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn(
+              table, expr, defaultPartitionName, maxParts, partNames);
+          partitions = getPartitionsViaOrmFilter(dbName, tblName, partNames);
+        }
+      }
+      success = commitTransaction();
+      if (doTrace) {
+        double time = ((System.nanoTime() - start) / 1000000.0);
+        LOG.debug(partitions.size() + " partition retrieved using "
+          + (doUseDirectSql ? "SQL" : "ORM") + " in " + time + "ms");
+      }
+    } finally {
+      if (!success) {
+        rollbackTransaction();
+      }
+    }
+    result.addAll(partitions);
+    return hasUnknownPartitions;
+  }
+
+  private boolean isDirectSqlEnabled(short maxParts) {
+    // There's no portable SQL limit. It doesn't make a lot of sense w/o offset anyway.
+    return (maxParts < 0)
+        && HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL);
+  }
+
+  private class LikeChecker extends ExpressionTree.TreeVisitor {
+    private boolean hasLike;
+
+    public boolean hasLike() {
+      return hasLike;
+    }
+
+    @Override
+    protected boolean shouldStop() {
+      return hasLike;
+    }
+
+    @Override
+    protected void visit(LeafNode node) throws MetaException {
+      hasLike = hasLike || (node.operator == Operator.LIKE);
+    }
+  }
+
+  /**
+   * Makes expression tree out of expr.
+   * @param filter Filter.
+   * @return Expression tree. Null if there was an error.
+   */
+  private ExpressionTree makeExpressionTree(String filter) throws MetaException {
+    // TODO: ExprNodeDesc is an expression tree, we could just use that and be rid of Filter.g.
+    if (filter == null || filter.isEmpty()) {
+      return ExpressionTree.EMPTY_TREE;
+    }
+    LOG.debug("Filter specified is " + filter);
+    ExpressionTree tree = null;
+    try {
+      tree = getFilterParser(filter).tree;
+    } catch (MetaException ex) {
+      LOG.info("Unable to make the expression tree from expression string ["
+          + filter + "]" + ex.getMessage()); // Don't log the stack, this is normal.
+    }
+    if (tree == null) {
+      return null;
+    }
+    // We suspect that LIKE pushdown into JDO is invalid; see HIVE-5134. Check for like here.
+    LikeChecker lc = new LikeChecker();
+    tree.accept(lc);
+    return lc.hasLike() ? null : tree;
+  }
+
+  /**
+   * Gets the partition names from a table, pruned using an expression.
+   * @param table Table.
+   * @param expr Expression.
+   * @param defaultPartName Default partition name from job config, if any.
+   * @param maxParts Maximum number of partition names to return.
+   * @param result The resulting names.
+   * @return Whether the result contains any unknown partitions.
+   */
+  private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr,
+      String defaultPartName, short maxParts, List<String> result) throws MetaException {
+    result.addAll(getPartitionNamesNoTxn(
+        table.getDbName(), table.getTableName(), maxParts));
+    List<String> columnNames = new ArrayList<String>();
+    for (FieldSchema fs : table.getPartitionKeys()) {
+      columnNames.add(fs.getName());
+    }
+    if (defaultPartName == null || defaultPartName.isEmpty()) {
+      defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME);
+    }
+    return expressionProxy.filterPartitionsByExpr(
+        columnNames, expr, defaultPartName, result);
+  }
+
+  /**
+   * Gets partition names from the table via ORM (JDOQL) filter pushdown.
+   * @param table The table.
+   * @param tree The expression tree from which JDOQL filter will be made.
+   * @param maxParts Maximum number of partitions to return.
+   * @param isValidatedFilter Whether the filter was pre-validated for JDOQL pushdown by a client
+   *   (old hive client or non-hive one); if it was and we fail to create a filter, we will throw.
+   * @return Resulting partitions. Can be null if isValidatedFilter is false, and
+   *         there was error deriving the JDO filter.
+   */
+  private List<Partition> getPartitionsViaOrmFilter(Table table, ExpressionTree tree,
+      short maxParts, boolean isValidatedFilter) throws MetaException {
+    Map<String, Object> params = new HashMap<String, Object>();
+    String jdoFilter = makeQueryFilterString(
+        table.getDbName(), table, tree, params, isValidatedFilter);
+    if (jdoFilter == null) {
+      assert !isValidatedFilter;
+      return null;
+    }
+    Query query = pm.newQuery(MPartition.class, jdoFilter);
+    if (maxParts >= 0) {
+      // User specified a row limit, set it on the Query
+      query.setRange(0, maxParts);
+    }
+
+    String parameterDeclaration = makeParameterDeclarationStringObj(params);
+    query.declareParameters(parameterDeclaration);
+    query.setOrdering("partitionName ascending");
+
+    @SuppressWarnings("unchecked")
+    List<MPartition> mparts = (List<MPartition>) query.executeWithMap(params);
+
+    LOG.debug("Done executing query for getPartitionsViaOrmFilter");
+    pm.retrieveAll(mparts); // TODO: why is this inconsistent with what we get by names?
+    LOG.debug("Done retrieving all objects for getPartitionsViaOrmFilter");
+    List<Partition> results = convertToParts(mparts);
+    query.closeAll();
+    return results;
   }
 
   private void handleDirectSqlError(boolean allowJdo, Exception ex) throws MetaException {
@@ -1750,8 +2004,18 @@ public class ObjectStore implements RawS
     openTransaction();
   }
 
-  private List<Partition> getPartitionsViaOrm(
+  /**
+   * Gets partition names from the table via ORM (JDOQL) name filter.
+   * @param dbName Database name.
+   * @param tblName Table name.
+   * @param partNames Partition names to get the objects for.
+   * @return Resulting partitions.
+   */
+  private List<Partition> getPartitionsViaOrmFilter(
       String dbName, String tblName, List<String> partNames) throws MetaException {
+    if (partNames.isEmpty()) {
+      return new ArrayList<Partition>();
+    }
     StringBuilder sb = new StringBuilder(
         "table.tableName == t1 && table.database.name == t2 && (");
     int n = 0;
@@ -1778,6 +2042,7 @@ public class ObjectStore implements RawS
     query.declareParameters(parameterDeclaration);
     query.setOrdering("partitionName ascending");
 
+    @SuppressWarnings("unchecked")
     List<MPartition> mparts = (List<MPartition>) query.executeWithMap(params);
     // pm.retrieveAll(mparts); // retrieveAll is pessimistic. some fields may not be needed
     List<Partition> results = convertToParts(dbName, tblName, mparts);
@@ -1798,39 +2063,43 @@ public class ObjectStore implements RawS
     assert allowSql || allowJdo;
     boolean doTrace = LOG.isDebugEnabled();
     boolean doUseDirectSql = canUseDirectSql(allowSql);
+
     dbName = dbName.toLowerCase();
     tblName = tblName.toLowerCase();
-    List<Partition> results = null;
-    FilterParser parser = null;
-    if (filter != null && filter.length() != 0) {
-      LOG.debug("Filter specified is " + filter);
-      parser = getFilterParser(filter);
-    }
+    ExpressionTree tree = (filter != null && !filter.isEmpty())
+        ? getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE;
 
+    List<Partition> results = null;
     boolean success = false;
     try {
       long start = doTrace ? System.nanoTime() : 0;
       openTransaction();
-      MTable mtable = ensureGetMTable(dbName, tblName);
+      Table table = ensureGetTable(dbName, tblName);
       if (doUseDirectSql) {
         try {
-          Table table = convertToTable(mtable);
           Integer max = (maxParts < 0) ? null : (int)maxParts;
-          results = directSql.getPartitionsViaSqlFilter(table, parser, max);
+          results = directSql.getPartitionsViaSqlFilter(table, tree, max);
+          if (results == null) {
+            // Cannot push down SQL filter. The message has been logged internally.
+            // This is not an error so don't roll back, just go to JDO.
+            doUseDirectSql = false;
+          }
         } catch (Exception ex) {
           handleDirectSqlError(allowJdo, ex);
           doUseDirectSql = false;
           start = doTrace ? System.nanoTime() : 0;
-          mtable = ensureGetMTable(dbName, tblName); // detached on rollback, get again
+          table = ensureGetTable(dbName, tblName); // detached on rollback, get again
         }
       }
+
       if (!doUseDirectSql) {
-        results = convertToParts(listMPartitionsByFilterNoTxn(
-            mtable, dbName, tblName, parser, maxParts));
+        results = getPartitionsViaOrmFilter(table, tree, maxParts, true);
       }
       success = commitTransaction();
-      LOG.info(results.size() + " partitions retrieved using " + (doUseDirectSql ? "SQL" : "ORM")
-          + (doTrace ? (" in " + ((System.nanoTime() - start) / 1000000.0) + "ms") : ""));
+      if (doTrace) {
+        LOG.debug(results.size() + " partition retrieved using " + (doUseDirectSql ? "SQL" : "ORM")
+            + " in " + ((System.nanoTime() - start) / 1000000.0) + "ms");
+      }
       return results;
     } finally {
       if (!success) {
@@ -1849,13 +2118,20 @@ public class ObjectStore implements RawS
       && !isActiveTransaction();
   }
 
-  private MTable ensureGetMTable(String dbName, String tblName) throws NoSuchObjectException {
+  /**
+   * Gets the table object for a given table, throws if anything goes wrong.
+   * @param dbName Database name.
+   * @param tblName Table name.
+   * @return Table object.
+   */
+  private Table ensureGetTable(
+      String dbName, String tblName) throws NoSuchObjectException, MetaException {
     MTable mtable = getMTable(dbName, tblName);
     if (mtable == null) {
       throw new NoSuchObjectException("Specified database/table does not exist : "
           + dbName + "." + tblName);
     }
-    return mtable;
+    return convertToTable(mtable);
   }
 
   private FilterParser getFilterParser(String filter) throws MetaException {
@@ -1881,56 +2157,55 @@ public class ObjectStore implements RawS
   }
 
   /**
-   * Makes a JDO query filter string
-   * if mtable is not null, generates the query to filter over partitions in a table.
-   * if mtable is null, generates the query to filter over tables in a database
+   * Makes a JDO query filter string.
+   * Makes a JDO query filter string for tables or partitions.
+   * @param dbName Database name.
+   * @param table Table. If null, the query returned is over tables in a database.
+   *   If not null, the query returned is over partitions in a table.
+   * @param filter The filter from which JDOQL filter will be made.
+   * @param params Parameters for the filter. Some parameters may be added here.
+   * @return Resulting filter.
    */
-  private String makeQueryFilterString(MTable mtable, String filter,
+  private String makeQueryFilterString(String dbName, MTable mtable, String filter,
       Map<String, Object> params) throws MetaException {
-    FilterParser parser =
-        (filter != null && filter.length() != 0) ? getFilterParser(filter) : null;
-    return makeQueryFilterString(mtable, parser, params);
+    ExpressionTree tree = (filter != null && !filter.isEmpty())
+        ? getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE;
+    return makeQueryFilterString(dbName, convertToTable(mtable), tree, params, true);
   }
 
   /**
-   * Makes a JDO query filter string
-   * if mtable is not null, generates the query to filter over partitions in a table.
-   * if mtable is null, generates the query to filter over tables in a database
+   * Makes a JDO query filter string for tables or partitions.
+   * @param dbName Database name.
+   * @param table Table. If null, the query returned is over tables in a database.
+   *   If not null, the query returned is over partitions in a table.
+   * @param tree The expression tree from which JDOQL filter will be made.
+   * @param params Parameters for the filter. Some parameters may be added here.
+   * @param isValidatedFilter Whether the filter was pre-validated for JDOQL pushdown
+   *   by the client; if it was and we fail to create a filter, we will throw.
+   * @return Resulting filter. Can be null if isValidatedFilter is false, and there was error.
    */
-  private String makeQueryFilterString(MTable mtable, FilterParser parser,
-      Map<String, Object> params)
-      throws MetaException {
-
-    StringBuilder queryBuilder = new StringBuilder();
-    if (mtable != null) {
+  private String makeQueryFilterString(String dbName, Table table, ExpressionTree tree,
+      Map<String, Object> params, boolean isValidatedFilter) throws MetaException {
+    assert tree != null;
+    FilterBuilder queryBuilder = new FilterBuilder(isValidatedFilter);
+    if (table != null) {
       queryBuilder.append("table.tableName == t1 && table.database.name == t2");
+      params.put("t1", table.getTableName());
+      params.put("t2", table.getDbName());
     } else {
       queryBuilder.append("database.name == dbName");
+      params.put("dbName", dbName);
     }
 
-    if (parser != null) {
-      String jdoFilter;
-      if (mtable != null) {
-        Table table = convertToTable(mtable);
-        jdoFilter = parser.tree.generateJDOFilter(table, params);
-      } else {
-        jdoFilter = parser.tree.generateJDOFilter(null, params);
-      }
-      LOG.debug("jdoFilter = " + jdoFilter);
-
-      if( jdoFilter.trim().length() > 0 ) {
-        queryBuilder.append(" && ( ");
-        queryBuilder.append(jdoFilter.trim());
-        queryBuilder.append(" )");
-      }
+    tree.generateJDOFilterFragment(table, params, queryBuilder);
+    if (queryBuilder.hasError()) {
+      assert !isValidatedFilter;
+      LOG.info("JDO filter pushdown cannot be used: " + queryBuilder.getErrorMessage());
+      return null;
     }
-    return queryBuilder.toString();
-  }
-
-  private String makeTableQueryFilterString(String filter,
-      Map<String, Object> params)
-      throws MetaException {
-    return makeQueryFilterString(null, filter, params);
+    String jdoFilter = queryBuilder.getFilter();
+    LOG.debug("jdoFilter = " + jdoFilter);
+    return jdoFilter;
   }
 
   private String makeParameterDeclarationString(Map<String, String> params) {
@@ -1954,38 +2229,6 @@ public class ObjectStore implements RawS
     return paramDecl.toString();
   }
 
-  private List<MPartition> listMPartitionsByFilterNoTxn(MTable mtable, String dbName,
-      String tableName, FilterParser parser, short maxParts)
-          throws MetaException, NoSuchObjectException {
-    List<MPartition> mparts = null;
-    LOG.debug("Executing listMPartitionsByFilterNoTxn");
-    Map<String, Object> params = new HashMap<String, Object>();
-    String queryFilterString = makeQueryFilterString(mtable, parser, params);
-
-    Query query = pm.newQuery(MPartition.class,
-        queryFilterString);
-
-    if( maxParts >= 0 ) {
-      //User specified a row limit, set it on the Query
-      query.setRange(0, maxParts);
-    }
-
-    LOG.debug("JDOQL filter is " + queryFilterString);
-
-    params.put("t1", tableName.trim());
-    params.put("t2", dbName.trim());
-
-    String parameterDeclaration = makeParameterDeclarationStringObj(params);
-    query.declareParameters(parameterDeclaration);
-    query.setOrdering("partitionName ascending");
-
-    mparts = (List<MPartition>) query.executeWithMap(params);
-
-    LOG.debug("Done executing query for listMPartitionsByFilterNoTxn");
-    pm.retrieveAll(mparts);
-    return mparts;
-  }
-
   @Override
   public List<String> listTableNamesByFilter(String dbName, String filter, short maxTables)
       throws MetaException {
@@ -1996,7 +2239,7 @@ public class ObjectStore implements RawS
       LOG.debug("Executing listTableNamesByFilter");
       dbName = dbName.toLowerCase().trim();
       Map<String, Object> params = new HashMap<String, Object>();
-      String queryFilterString = makeTableQueryFilterString(filter, params);
+      String queryFilterString = makeQueryFilterString(dbName, null, filter, params);
       Query query = pm.newQuery(MTable.class);
       query.declareImports("import java.lang.String");
       query.setResult("tableName");
@@ -2005,7 +2248,6 @@ public class ObjectStore implements RawS
         query.setRange(0, maxTables);
       }
       LOG.debug("filter specified is " + filter + "," + " JDOQL filter is " + queryFilterString);
-      params.put("dbName", dbName);
       for (Entry<String, Object> entry : params.entrySet()) {
         LOG.debug("key: " + entry.getKey() + " value: " + entry.getValue() +
             " class: " + entry.getValue().getClass().getName());
@@ -2050,7 +2292,7 @@ public class ObjectStore implements RawS
         return partNames;
       }
       Map<String, Object> params = new HashMap<String, Object>();
-      String queryFilterString = makeQueryFilterString(mtable, filter, params);
+      String queryFilterString = makeQueryFilterString(dbName, mtable, filter, params);
       Query query = pm.newQuery(
           "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition "
           + "where " + queryFilterString);
@@ -2064,9 +2306,6 @@ public class ObjectStore implements RawS
           " JDOQL filter is " + queryFilterString);
       LOG.debug("Parms is " + params);
 
-      params.put("t1", tableName.trim());
-      params.put("t2", dbName.trim());
-
       String parameterDeclaration = makeParameterDeclarationStringObj(params);
       query.declareParameters(parameterDeclaration);
       query.setOrdering("partitionName ascending");

Modified: hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java?rev=1526201&r1=1526200&r2=1526201&view=diff
==============================================================================
--- hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (original)
+++ hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java Wed Sep 25 15:04:49 2013
@@ -18,8 +18,13 @@
 
 package org.apache.hadoop.hive.metastore;
 
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
@@ -48,9 +53,18 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.model.MRoleMap;
 import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
 import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
+import org.apache.thrift.TException;
 
 public interface RawStore extends Configurable {
 
+  /***
+   * Annotation to skip retries
+   */
+  @Target(value = ElementType.METHOD)
+  @Retention(value = RetentionPolicy.RUNTIME)
+  public @interface CanNotRetry {
+  }
+
   public abstract void shutdown();
 
   /**
@@ -68,11 +82,13 @@ public interface RawStore extends Config
    *
    * @return true or false
    */
+  @CanNotRetry
   public abstract boolean commitTransaction();
 
   /**
    * Rolls back the current transaction if it is active
    */
+  @CanNotRetry
   public abstract void rollbackTransaction();
 
   public abstract void createDatabase(Database db)
@@ -186,6 +202,10 @@ public interface RawStore extends Config
       String dbName, String tblName, String filter, short maxParts)
       throws MetaException, NoSuchObjectException;
 
+  public abstract boolean getPartitionsByExpr(String dbName, String tblName,
+      byte[] expr, String defaultPartitionName, short maxParts, Set<Partition> result)
+      throws TException;
+
   public abstract List<Partition> getPartitionsByNames(
       String dbName, String tblName, List<String> partNames)
       throws MetaException, NoSuchObjectException;
@@ -441,5 +461,4 @@ public interface RawStore extends Config
  public abstract String getMetaStoreSchemaVersion() throws  MetaException;
 
  public abstract void setMetaStoreSchemaVersion(String version, String comment) throws MetaException;
-
 }

Modified: hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingRawStore.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingRawStore.java?rev=1526201&r1=1526200&r2=1526201&view=diff
==============================================================================
--- hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingRawStore.java (original)
+++ hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingRawStore.java Wed Sep 25 15:04:49 2013
@@ -29,12 +29,10 @@ import org.apache.commons.lang.ClassUtil
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.hooks.JDOConnectionURLHook;
 import org.apache.hadoop.util.ReflectionUtils;
 
 @InterfaceAudience.Private
@@ -46,7 +44,7 @@ public class RetryingRawStore implements
   private final RawStore base;
   private int retryInterval = 0;
   private int retryLimit = 0;
-  private MetaStoreInit.MetaStoreInitData metaStoreInitData =
+  private final MetaStoreInit.MetaStoreInitData metaStoreInitData =
     new MetaStoreInit.MetaStoreInitData();
   private final int id;
   private final HiveConf hiveConf;
@@ -132,12 +130,13 @@ public class RetryingRawStore implements
           // Due to reflection, the jdo exception is wrapped in
           // invocationTargetException
           caughtException = (javax.jdo.JDOException) e.getCause();
-        }
-        else
+        } else {
           throw e.getCause();
+        }
       }
 
-      if (retryCount >= retryLimit) {
+      if (retryCount >= retryLimit ||
+          (method.getAnnotation(RawStore.CanNotRetry.class) != null)) {
         throw  caughtException;
       }
 

Modified: hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java?rev=1526201&r1=1526200&r2=1526201&view=diff
==============================================================================
--- hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java (original)
+++ hive/branches/vectorization/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java Wed Sep 25 15:04:49 2013
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hive.metastore.parser;
 
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
@@ -26,11 +25,12 @@ import java.util.Stack;
 import org.antlr.runtime.ANTLRStringStream;
 import org.antlr.runtime.CharStream;
 import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.metastore.ObjectStore;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.serde.serdeConstants;
 
 import com.google.common.collect.Sets;
 
@@ -39,6 +39,8 @@ import com.google.common.collect.Sets;
  * at intermediate level and the leaf level nodes are of type LeafNode.
  */
 public class ExpressionTree {
+  /** The empty tree that can be returned for an empty filter. */
+  public static final ExpressionTree EMPTY_TREE = new ExpressionTree();
 
   /** The logical operations supported. */
   public enum LogicalOperator {
@@ -104,9 +106,81 @@ public class ExpressionTree {
 
   }
 
-  public static interface TreeVisitor {
-    void visit(TreeNode node) throws MetaException;
-    void visit(LeafNode node) throws MetaException;
+  /**
+   * Depth first traversal of ExpressionTree.
+   * The users should override the subset of methods to do their stuff.
+   */
+  public static class TreeVisitor {
+    private void visit(TreeNode node) throws MetaException {
+      if (shouldStop()) return;
+      assert node != null && node.getLhs() != null && node.getRhs() != null;
+      beginTreeNode(node);
+      node.lhs.accept(this);
+      midTreeNode(node);
+      node.rhs.accept(this);
+      endTreeNode(node);
+    }
+
+    protected void beginTreeNode(TreeNode node) throws MetaException {}
+    protected void midTreeNode(TreeNode node) throws MetaException {}
+    protected void endTreeNode(TreeNode node) throws MetaException {}
+    protected void visit(LeafNode node) throws MetaException {}
+    protected boolean shouldStop() {
+      return false;
+    }
+  }
+
+  /**
+   * Helper class that wraps the stringbuilder used to build the filter over the tree,
+   * as well as error propagation in two modes - expect errors, i.e. filter might as well
+   * be unbuildable and that's not a failure condition; or don't expect errors, i.e. filter
+   * must be buildable.
+   */
+  public static class FilterBuilder {
+    private final StringBuilder result = new StringBuilder();
+    private String errorMessage = null;
+    private boolean expectNoErrors = false;
+
+    public FilterBuilder(boolean expectNoErrors) {
+      this.expectNoErrors = expectNoErrors;
+    }
+
+    public String getFilter() throws MetaException {
+      assert errorMessage == null;
+      if (errorMessage != null) {
+        throw new MetaException("Trying to get result after error: " + errorMessage);
+      }
+      return result.toString();
+    }
+
+    @Override
+    public String toString() {
+      try {
+        return getFilter();
+      } catch (MetaException ex) {
+        throw new RuntimeException(ex);
+      }
+    }
+
+    public String getErrorMessage() {
+      return errorMessage;
+    }
+
+    public boolean hasError() {
+      return errorMessage != null;
+    }
+
+    public FilterBuilder append(String filterPart) {
+      this.result.append(filterPart);
+      return this;
+    }
+
+    public void setError(String errorMessage) throws MetaException {
+      this.errorMessage = errorMessage;
+      if (expectNoErrors) {
+        throw new MetaException(errorMessage);
+      }
+    }
   }
 
   /**
@@ -139,7 +213,7 @@ public class ExpressionTree {
     }
 
     /** Double dispatch for TreeVisitor. */
-    public void accept(TreeVisitor visitor) throws MetaException {
+    protected void accept(TreeVisitor visitor) throws MetaException {
       visitor.visit(this);
     }
 
@@ -153,16 +227,16 @@ public class ExpressionTree {
      *        tables that match the filter.
      * @param params
      *        A map of parameter key to values for the filter statement.
+     * @param filterBuilder The filter builder that is used to build filter.
      * @return a JDO filter statement
      * @throws MetaException
      */
-    public String generateJDOFilter(Table table, Map<String, Object> params)
-    throws MetaException {
-      StringBuilder filterBuffer = new StringBuilder();
-
-      if ( lhs != null) {
+    public void generateJDOFilter(Table table, Map<String, Object> params,
+        FilterBuilder filterBuffer) throws MetaException {
+      if (filterBuffer.hasError()) return;
+      if (lhs != null) {
         filterBuffer.append (" (");
-        filterBuffer.append(lhs.generateJDOFilter(table, params));
+        lhs.generateJDOFilter(table, params, filterBuffer);
 
         if (rhs != null) {
           if( andOr == LogicalOperator.AND ) {
@@ -171,12 +245,10 @@ public class ExpressionTree {
             filterBuffer.append(" || ");
           }
 
-          filterBuffer.append(rhs.generateJDOFilter(table, params));
+          rhs.generateJDOFilter(table, params, filterBuffer);
         }
         filterBuffer.append (") ");
       }
-
-      return filterBuffer.toString();
     }
   }
 
@@ -192,18 +264,17 @@ public class ExpressionTree {
     private static final String PARAM_PREFIX = "hive_filter_param_";
 
     @Override
-    public void accept(TreeVisitor visitor) throws MetaException {
+    protected void accept(TreeVisitor visitor) throws MetaException {
       visitor.visit(this);
     }
 
     @Override
-    public String generateJDOFilter(Table table,
-        Map<String, Object> params)
-        throws MetaException {
+    public void generateJDOFilter(Table table, Map<String, Object> params,
+        FilterBuilder filterBuilder) throws MetaException {
       if (table != null) {
-        return generateJDOFilterOverPartitions(table, params);
+        generateJDOFilterOverPartitions(table, params, filterBuilder);
       } else {
-        return generateJDOFilterOverTables(params);
+        generateJDOFilterOverTables(params, filterBuilder);
       }
     }
 
@@ -212,20 +283,22 @@ public class ExpressionTree {
     private static final Set<Operator> TABLE_FILTER_OPS = Sets.newHashSet(
         Operator.EQUALS, Operator.NOTEQUALS, Operator.NOTEQUALS2);
 
-    private String generateJDOFilterOverTables(Map<String, Object> params)
-        throws MetaException {
+    private void generateJDOFilterOverTables(Map<String, Object> params,
+        FilterBuilder filterBuilder) throws MetaException {
       if (keyName.equals(hive_metastoreConstants.HIVE_FILTER_FIELD_OWNER)) {
         keyName = "this.owner";
       } else if (keyName.equals(hive_metastoreConstants.HIVE_FILTER_FIELD_LAST_ACCESS)) {
         //lastAccessTime expects an integer, so we cannot use the "like operator"
         if (operator == Operator.LIKE) {
-          throw new MetaException("Like is not supported for HIVE_FILTER_FIELD_LAST_ACCESS");
+          filterBuilder.setError("Like is not supported for HIVE_FILTER_FIELD_LAST_ACCESS");
+          return;
         }
         keyName = "this.lastAccessTime";
       } else if (keyName.startsWith(hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS)) {
         if (!TABLE_FILTER_OPS.contains(operator)) {
-          throw new MetaException("Only " + TABLE_FILTER_OPS + " are supported " +
+          filterBuilder.setError("Only " + TABLE_FILTER_OPS + " are supported " +
             "operators for HIVE_FILTER_FIELD_PARAMS");
+          return;
         }
         String paramKeyName = keyName.substring(hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS.length());
         keyName = "this.parameters.get(\"" + paramKeyName + "\")";
@@ -233,10 +306,11 @@ public class ExpressionTree {
         // in case we get a long.
         value = value.toString();
       } else {
-        throw new MetaException("Invalid key name in filter.  " +
+        filterBuilder.setError("Invalid key name in filter.  " +
           "Use constants from org.apache.hadoop.hive.metastore.api");
+        return;
       }
-      return generateJDOFilterGeneral(params);
+      generateJDOFilterGeneral(params, filterBuilder);
     }
 
     /**
@@ -247,70 +321,73 @@ public class ExpressionTree {
      * Currently supported types for value are String and Long.
      * The LIKE operator for Longs is unsupported.
      */
-    private String generateJDOFilterGeneral(Map<String, Object> params)
-        throws MetaException {
+    private void generateJDOFilterGeneral(Map<String, Object> params,
+        FilterBuilder filterBuilder) throws MetaException {
       String paramName = PARAM_PREFIX + params.size();
       params.put(paramName, value);
-      String filter;
 
       if (isReverseOrder) {
         if (operator == Operator.LIKE) {
-          throw new MetaException(
-              "Value should be on the RHS for LIKE operator : " +
+          filterBuilder.setError("Value should be on the RHS for LIKE operator : " +
               "Key <" + keyName + ">");
         } else {
-          filter = paramName + " " + operator.getJdoOp() + " " + keyName;
+          filterBuilder.append(paramName + " " + operator.getJdoOp() + " " + keyName);
         }
       } else {
         if (operator == Operator.LIKE) {
-          filter = " " + keyName + "." + operator.getJdoOp() + "(" + paramName + ") ";
+          filterBuilder.append(" " + keyName + "." + operator.getJdoOp() + "(" + paramName + ") ");
         } else {
-          filter = " " + keyName + " " + operator.getJdoOp() + " " + paramName;
+          filterBuilder.append(" " + keyName + " " + operator.getJdoOp() + " " + paramName);
         }
       }
-      return filter;
     }
 
-    private String generateJDOFilterOverPartitions(Table table, Map<String, Object> params)
-    throws MetaException {
+    private void generateJDOFilterOverPartitions(Table table, Map<String, Object> params,
+        FilterBuilder filterBuilder) throws MetaException {
       int partitionColumnCount = table.getPartitionKeys().size();
-      int partitionColumnIndex = getPartColIndexForFilter(table);
+      int partitionColumnIndex = getPartColIndexForFilter(table, filterBuilder);
+      if (filterBuilder.hasError()) return;
+
+      String valueAsString = getFilterPushdownParam(table, partitionColumnIndex, filterBuilder);
+      if (filterBuilder.hasError()) return;
 
-      String valueAsString = getFilterPushdownParam(table, partitionColumnIndex);
       String paramName = PARAM_PREFIX + params.size();
       params.put(paramName, valueAsString);
 
       boolean isOpEquals = operator == Operator.EQUALS;
       if (isOpEquals || operator == Operator.NOTEQUALS || operator == Operator.NOTEQUALS2) {
-        return makeFilterForEquals(keyName, valueAsString, paramName, params,
-            partitionColumnIndex, partitionColumnCount, isOpEquals);
+        makeFilterForEquals(keyName, valueAsString, paramName, params,
+            partitionColumnIndex, partitionColumnCount, isOpEquals, filterBuilder);
+        return;
       }
 
       String keyEqual = FileUtils.escapePathName(keyName) + "=";
-      int keyEqualLength = keyEqual.length();
-      String valString;
-      // partitionname ==>  (key=value/)*(key=value)
-      if (partitionColumnIndex == (partitionColumnCount - 1)) {
-        valString = "partitionName.substring(partitionName.indexOf(\"" + keyEqual + "\")+" + keyEqualLength + ")";
-      }
-      else {
-        valString = "partitionName.substring(partitionName.indexOf(\"" + keyEqual + "\")+" + keyEqualLength + ").substring(0, partitionName.substring(partitionName.indexOf(\"" + keyEqual + "\")+" + keyEqualLength + ").indexOf(\"/\"))";
+      String valString = "partitionName.substring(";
+      String indexOfKeyStr = "";
+      if (partitionColumnIndex != 0) {
+        keyEqual = "/" + keyEqual;
+        indexOfKeyStr = "partitionName.indexOf(\"" + keyEqual + "\") + ";
+        valString += indexOfKeyStr;
+      }
+      valString += keyEqual.length();
+      if (partitionColumnIndex != (partitionColumnCount - 1)) {
+        valString += ", partitionName.indexOf(\"/\", " + indexOfKeyStr + keyEqual.length() + ")";
       }
+      valString += ")";
 
       if (operator == Operator.LIKE) {
         if (isReverseOrder) {
-          //For LIKE, the value should be on the RHS
-          throw new MetaException(
+          // For LIKE, the value should be on the RHS.
+          filterBuilder.setError(
               "Value should be on the RHS for LIKE operator : Key <" + keyName + ">");
         }
-        //generate this.values.get(i).matches("abc%")
-        return " " + valString + "." + operator.getJdoOp() + "(" + paramName + ") ";
+        // TODO: in all likelihood, this won't actually work. Keep it for backward compat.
+        filterBuilder.append(" " + valString + "." + operator.getJdoOp() + "(" + paramName + ") ");
+      } else {
+        filterBuilder.append(isReverseOrder
+            ? paramName + " " + operator.getJdoOp() + " " + valString
+            : " " + valString + " " + operator.getJdoOp() + " " + paramName);
       }
-
-      // TODO: support for other ops for numbers to be handled in HIVE-4888.
-      return isReverseOrder
-          ? paramName + " " + operator.getJdoOp() + " " + valString
-          : " " + valString + " " + operator.getJdoOp() + " " + paramName;
     }
 
     /**
@@ -339,9 +416,11 @@ public class ExpressionTree {
      * Get partition column index in the table partition column list that
      * corresponds to the key that is being filtered on by this tree node.
      * @param table The table.
+     * @param filterBuilder filter builder used to report error, if any.
      * @return The index.
      */
-    public int getPartColIndexForFilter(Table table) throws MetaException {
+    public int getPartColIndexForFilter(
+        Table table, FilterBuilder filterBuilder) throws MetaException {
       int partitionColumnIndex;
       assert (table.getPartitionKeys().size() > 0);
       for (partitionColumnIndex = 0; partitionColumnIndex < table.getPartitionKeys().size();
@@ -351,9 +430,10 @@ public class ExpressionTree {
           break;
         }
       }
-      if( partitionColumnIndex == table.getPartitionKeys().size() ) {
-        throw new MetaException("Specified key <" + keyName +
+      if( partitionColumnIndex == table.getPartitionKeys().size()) {
+        filterBuilder.setError("Specified key <" + keyName +
             "> is not a partitioning key for the table");
+        return -1;
       }
 
       return partitionColumnIndex;
@@ -365,28 +445,38 @@ public class ExpressionTree {
      * In future this may become different for SQL and JDOQL filter pushdown.
      * @param table The table.
      * @param partColIndex The index of the column to check.
+     * @param filterBuilder filter builder used to report error, if any.
      * @return The parameter string.
      */
-    public String getFilterPushdownParam(Table table, int partColIndex) throws MetaException {
+    public String getFilterPushdownParam(
+        Table table, int partColIndex, FilterBuilder filterBuilder) throws MetaException {
       boolean isIntegralSupported = doesOperatorSupportIntegral(operator);
       String colType = table.getPartitionKeys().get(partColIndex).getType();
       // Can only support partitions whose types are string, or maybe integers
       if (!colType.equals(serdeConstants.STRING_TYPE_NAME)
           && (!isIntegralSupported || !isIntegralType(colType))) {
-        throw new MetaException("Filtering is supported only on partition keys of type " +
+        filterBuilder.setError("Filtering is supported only on partition keys of type " +
             "string" + (isIntegralSupported ? ", or integral types" : ""));
+        return null;
       }
 
       boolean isStringValue = value instanceof String;
       if (!isStringValue && (!isIntegralSupported || !(value instanceof Long))) {
-        throw new MetaException("Filtering is supported only on partition keys of type " +
+        filterBuilder.setError("Filtering is supported only on partition keys of type " +
             "string" + (isIntegralSupported ? ", or integral types" : ""));
+        return null;
       }
 
       return isStringValue ? (String) value : Long.toString((Long) value);
     }
   }
 
+  public void accept(TreeVisitor treeVisitor) throws MetaException {
+    if (this.root != null) {
+      this.root.accept(treeVisitor);
+    }
+  }
+
   /**
    * For equals and not-equals, we can make the JDO query much faster by filtering
    * based on the partition name. For a condition like ds="2010-10-01", we can see
@@ -400,24 +490,24 @@ public class ExpressionTree {
    * Case where the partition key column is at the end of the name. (no
    * tailing '/')
    *
-   * @param keyName name of the partition col e.g. ds
-   * @param value
-   * @param paramName name of the parameter to use for JDOQL
-   * @param params a map from the parameter name to their values
+   * @param keyName name of the partition column e.g. ds.
+   * @param value The value to compare to.
+   * @param paramName name of the parameter to use for JDOQL.
+   * @param params a map from the parameter name to their values.
+   * @param keyPos The index of the requisite partition column in the list of such columns.
+   * @param keyCount Partition column count for the table.
    * @param isEq whether the operator is equals, or not-equals.
-   * @return
-   * @throws MetaException
+   * @param fltr Filter builder used to append the filter, or report errors.
    */
-  private static String makeFilterForEquals(String keyName, String value, String paramName,
-      Map<String, Object> params, int keyPos, int keyCount, boolean isEq)
-      throws MetaException {
+  private static void makeFilterForEquals(String keyName, String value, String paramName,
+      Map<String, Object> params, int keyPos, int keyCount, boolean isEq, FilterBuilder fltr)
+          throws MetaException {
     Map<String, String> partKeyToVal = new HashMap<String, String>();
     partKeyToVal.put(keyName, value);
     // If a partition has multiple partition keys, we make the assumption that
     // makePartName with one key will return a substring of the name made
     // with both all the keys.
     String escapedNameFragment = Warehouse.makePartName(partKeyToVal, false);
-    StringBuilder fltr = new StringBuilder();
     if (keyCount == 1) {
       // Case where this is no other partition columns
       params.put(paramName, escapedNameFragment);
@@ -427,13 +517,13 @@ public class ExpressionTree {
       // be a leading '/' but no trailing '/'
       params.put(paramName, "/" + escapedNameFragment);
       fltr.append(isEq ? "" : "!").append("partitionName.endsWith(")
-        .append(paramName).append(')');
+        .append(paramName).append(")");
     } else if (keyPos == 0) {
-      // Case where the parttion column is at the beginning of the name. There will
+      // Case where the partition column is at the beginning of the name. There will
       // be a trailing '/' but no leading '/'
       params.put(paramName, escapedNameFragment + "/");
       fltr.append(isEq ? "" : "!").append("partitionName.startsWith(")
-        .append(paramName).append(')');
+        .append(paramName).append(")");
     } else {
       // Case where the partition column is in the middle of the name. There will
       // be a leading '/' and an trailing '/'
@@ -441,7 +531,6 @@ public class ExpressionTree {
       fltr.append("partitionName.indexOf(").append(paramName).append(")")
         .append(isEq ? ">= 0" : "< 0");
     }
-    return fltr.toString();
   }
 
   /**
@@ -489,19 +578,19 @@ public class ExpressionTree {
    * @param params the input map which is updated with the
    *     the parameterized values. Keys are the parameter names and values
    *     are the parameter values
-   * @return the string representation of the expression tree
-   * @throws MetaException
+   * @param filterBuilder the filter builder to append to.
    */
-  public String generateJDOFilter(Table table,
-        Map<String, Object> params) throws MetaException {
-    if( root == null ) {
-      return "";
+  public void generateJDOFilterFragment(Table table, Map<String, Object> params,
+      FilterBuilder filterBuilder) throws MetaException {
+    if (root == null) {
+      return;
     }
 
-    return root.generateJDOFilter(table, params);
+    filterBuilder.append(" && ( ");
+    root.generateJDOFilter(table, params, filterBuilder);
+    filterBuilder.append(" )");
   }
 
-
   /** Case insensitive ANTLR string stream */
   public static class ANTLRNoCaseStringStream extends ANTLRStringStream {
     public ANTLRNoCaseStringStream (String input) {

Modified: hive/branches/vectorization/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java?rev=1526201&r1=1526200&r2=1526201&view=diff
==============================================================================
--- hive/branches/vectorization/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java (original)
+++ hive/branches/vectorization/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java Wed Sep 25 15:04:49 2013
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.metastore
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
@@ -50,6 +51,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.model.MRoleMap;
 import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
 import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
+import org.apache.thrift.TException;
 
 /**
  * A wrapper around {@link org.apache.hadoop.hive.metastore.ObjectStore}
@@ -303,6 +305,13 @@ public class DummyRawStoreControlledComm
   }
 
   @Override
+  public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr,
+      String defaultPartitionName, short maxParts, Set<Partition> result) throws TException {
+    return objectStore.getPartitionsByExpr(
+        dbName, tblName, expr, defaultPartitionName, maxParts, result);
+  }
+
+  @Override
   public Table markPartitionForEvent(String dbName, String tblName,
       Map<String, String> partVals, PartitionEventType evtType)
       throws MetaException, UnknownTableException, InvalidPartitionException,

Modified: hive/branches/vectorization/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java?rev=1526201&r1=1526200&r2=1526201&view=diff
==============================================================================
--- hive/branches/vectorization/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java (original)
+++ hive/branches/vectorization/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java Wed Sep 25 15:04:49 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.metastore
 
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import junit.framework.Assert;
 
@@ -51,6 +52,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.model.MRoleMap;
 import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
 import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
+import org.apache.thrift.TException;
 
 /**
  *
@@ -322,6 +324,12 @@ public class DummyRawStoreForJdoConnecti
   }
 
   @Override
+  public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr,
+      String defaultPartitionName, short maxParts, Set<Partition> result) throws TException {
+    return false;
+  }
+
+  @Override
   public Table markPartitionForEvent(String dbName, String tblName, Map<String, String> partVals,
       PartitionEventType evtType) throws MetaException, UnknownTableException,
       InvalidPartitionException, UnknownPartitionException {

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1526201&r1=1526200&r2=1526201&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Wed Sep 25 15:04:49 2013
@@ -158,14 +158,13 @@ import org.apache.hadoop.hive.ql.session
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.Deserializer;
 import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe;
-import org.apache.hadoop.hive.serde2.SerDeException;
-import org.apache.hadoop.hive.serde2.SerDeUtils;
 import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
 import org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe;
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.hive.shims.HadoopShims;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.stringtemplate.v4.ST;
 
@@ -3423,11 +3422,13 @@ public class DDLTask extends Task<DDLWor
    */
   private void validateSerDe(String serdeName) throws HiveException {
     try {
-      Deserializer d = SerDeUtils.lookupDeserializer(serdeName);
+
+      Deserializer d = ReflectionUtils.newInstance(conf.getClassByName(serdeName).
+        asSubclass(Deserializer.class), conf);
       if (d != null) {
         LOG.debug("Found class for " + serdeName);
       }
-    } catch (SerDeException e) {
+    } catch (Exception e) {
       throw new HiveException("Cannot validate serde: " + serdeName, e);
     }
   }

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java?rev=1526201&r1=1526200&r2=1526201&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java Wed Sep 25 15:04:49 2013
@@ -400,7 +400,7 @@ public class FetchOperator implements Se
       this.inputSplits = inputSplits;
 
       splitNum = 0;
-      serde = partDesc.getDeserializerClass().newInstance();
+      serde = partDesc.getDeserializer();
       serde.initialize(job, partDesc.getOverlayedProperties());
 
       if (currTbl != null) {
@@ -636,7 +636,7 @@ public class FetchOperator implements Se
       // Get the OI corresponding to all the partitions
       for (PartitionDesc listPart : listParts) {
         partition = listPart;
-        Deserializer partSerde = listPart.getDeserializerClass().newInstance();
+        Deserializer partSerde = listPart.getDeserializer();
         partSerde.initialize(job, listPart.getOverlayedProperties());
 
         partitionedTableOI = ObjectInspectorConverters.getConvertedOI(

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java?rev=1526201&r1=1526200&r2=1526201&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java Wed Sep 25 15:04:49 2013
@@ -176,6 +176,7 @@ public final class FunctionRegistry {
    */
   static Map<String, FunctionInfo> mFunctions = Collections.synchronizedMap(new LinkedHashMap<String, FunctionInfo>());
 
+  static Set<Class<?>> nativeUdfs = Collections.synchronizedSet(new HashSet<Class<?>>());
   /*
    * PTF variables
    * */
@@ -486,6 +487,7 @@ public final class FunctionRegistry {
       FunctionInfo fI = new FunctionInfo(isNative, displayName,
           new GenericUDFBridge(displayName, isOperator, UDFClass.getName()));
       mFunctions.put(functionName.toLowerCase(), fI);
+      registerNativeStatus(fI);
     } else {
       throw new RuntimeException("Registering UDF Class " + UDFClass
           + " which does not extend " + UDF.class);
@@ -508,6 +510,7 @@ public final class FunctionRegistry {
       FunctionInfo fI = new FunctionInfo(isNative, functionName,
           (GenericUDF) ReflectionUtils.newInstance(genericUDFClass, null));
       mFunctions.put(functionName.toLowerCase(), fI);
+      registerNativeStatus(fI);
     } else {
       throw new RuntimeException("Registering GenericUDF Class "
           + genericUDFClass + " which does not extend " + GenericUDF.class);
@@ -530,6 +533,7 @@ public final class FunctionRegistry {
       FunctionInfo fI = new FunctionInfo(isNative, functionName,
           (GenericUDTF) ReflectionUtils.newInstance(genericUDTFClass, null));
       mFunctions.put(functionName.toLowerCase(), fI);
+      registerNativeStatus(fI);
     } else {
       throw new RuntimeException("Registering GenericUDTF Class "
           + genericUDTFClass + " which does not extend " + GenericUDTF.class);
@@ -955,8 +959,9 @@ public final class FunctionRegistry {
 
   public static void registerGenericUDAF(boolean isNative, String functionName,
       GenericUDAFResolver genericUDAFResolver) {
-    mFunctions.put(functionName.toLowerCase(), new FunctionInfo(isNative,
-        functionName.toLowerCase(), genericUDAFResolver));
+    FunctionInfo fi = new FunctionInfo(isNative, functionName.toLowerCase(), genericUDAFResolver);
+    mFunctions.put(functionName.toLowerCase(), fi);
+    registerNativeStatus(fi);
   }
 
   public static void registerTemporaryUDAF(String functionName,
@@ -970,9 +975,11 @@ public final class FunctionRegistry {
 
   public static void registerUDAF(boolean isNative, String functionName,
       Class<? extends UDAF> udafClass) {
-    mFunctions.put(functionName.toLowerCase(), new FunctionInfo(isNative,
+    FunctionInfo fi = new FunctionInfo(isNative,
         functionName.toLowerCase(), new GenericUDAFBridge(
-        (UDAF) ReflectionUtils.newInstance(udafClass, null))));
+        (UDAF) ReflectionUtils.newInstance(udafClass, null)));
+    mFunctions.put(functionName.toLowerCase(), fi);
+    registerNativeStatus(fi);
   }
 
   public static void unregisterTemporaryUDF(String functionName) throws HiveException {
@@ -1560,6 +1567,7 @@ public final class FunctionRegistry {
     FunctionInfo fI = new FunctionInfo(false, macroName,
         new GenericUDFMacro(macroName, body, colNames, colTypes));
     mFunctions.put(macroName.toLowerCase(), fI);
+    registerNativeStatus(fI);
   }
 
   /**
@@ -1717,6 +1725,7 @@ public final class FunctionRegistry {
   {
     FunctionInfo tInfo = new FunctionInfo(name, tFnCls);
     mFunctions.put(name.toLowerCase(), tInfo);
+    registerNativeStatus(tInfo);
   }
 
   /**
@@ -1738,4 +1747,21 @@ public final class FunctionRegistry {
     }
     return false;
   }
+
+  /**
+   * @param fnExpr Function expression.
+   * @return True iff the fnExpr represents a hive built-in function.
+   */
+  public static boolean isNativeFuncExpr(ExprNodeGenericFuncDesc fnExpr) {
+    Class<?> udfClass = getUDFClassFromExprDesc(fnExpr);
+    if (udfClass == null) {
+      udfClass = getGenericUDFClassFromExprDesc(fnExpr);
+    }
+    return nativeUdfs.contains(udfClass);
+  }
+
+  private static void registerNativeStatus(FunctionInfo fi) {
+    if (!fi.isNative()) return;
+    nativeUdfs.add(fi.getFunctionClass());
+  }
 }

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java?rev=1526201&r1=1526200&r2=1526201&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java Wed Sep 25 15:04:49 2013
@@ -30,6 +30,7 @@ import org.apache.hadoop.hive.ql.metadat
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.JoinDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
+import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.SerDe;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.io.ShortWritable;
@@ -316,7 +317,7 @@ public class JoinUtil {
       // remove the last ','
       colNames.setLength(colNames.length() - 1);
       colTypes.setLength(colTypes.length() - 1);
-      TableDesc tblDesc = new TableDesc(LazyBinarySerDe.class,
+      TableDesc tblDesc = new TableDesc(
           SequenceFileInputFormat.class, HiveSequenceFileOutputFormat.class,
           Utilities.makeProperties(
           org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, ""
@@ -324,7 +325,8 @@ public class JoinUtil {
           org.apache.hadoop.hive.serde.serdeConstants.LIST_COLUMNS, colNames
           .toString(),
           org.apache.hadoop.hive.serde.serdeConstants.LIST_COLUMN_TYPES,
-          colTypes.toString()));
+          colTypes.toString(),
+          serdeConstants.SERIALIZATION_LIB,LazyBinarySerDe.class.getName()));
       spillTableDesc[tag] = tblDesc;
     }
     return spillTableDesc;

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java?rev=1526201&r1=1526200&r2=1526201&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java Wed Sep 25 15:04:49 2013
@@ -186,12 +186,7 @@ public class MapOperator extends Operato
     opCtx.tableName = String.valueOf(partProps.getProperty("name"));
     opCtx.partName = String.valueOf(partSpec);
 
-    Class serdeclass = pd.getDeserializerClass();
-    if (serdeclass == null) {
-      String className = checkSerdeClassName(pd.getSerdeClassName(), opCtx.tableName);
-      serdeclass = hconf.getClassByName(className);
-    }
-
+    Class serdeclass = hconf.getClassByName(pd.getSerdeClassName());
     opCtx.deserializer = (Deserializer) serdeclass.newInstance();
     opCtx.deserializer.initialize(hconf, partProps);
 
@@ -285,14 +280,7 @@ public class MapOperator extends Operato
         Properties tblProps = tableDesc.getProperties();
         // If the partition does not exist, use table properties
         Properties partProps = isPartitioned(pd) ? pd.getOverlayedProperties() : tblProps;
-
-        Class sdclass = pd.getDeserializerClass();
-        if (sdclass == null) {
-          String className = checkSerdeClassName(pd.getSerdeClassName(),
-              pd.getProperties().getProperty("name"));
-          sdclass = hconf.getClassByName(className);
-        }
-
+        Class sdclass = hconf.getClassByName(pd.getSerdeClassName());
         Deserializer partDeserializer = (Deserializer) sdclass.newInstance();
         partDeserializer.initialize(hconf, partProps);
         StructObjectInspector partRawRowObjectInspector = (StructObjectInspector) partDeserializer
@@ -301,13 +289,8 @@ public class MapOperator extends Operato
         StructObjectInspector tblRawRowObjectInspector = tableDescOI.get(tableDesc);
         if ((tblRawRowObjectInspector == null) ||
             (identityConverterTableDesc.contains(tableDesc))) {
-          sdclass = tableDesc.getDeserializerClass();
-          if (sdclass == null) {
-            String className = checkSerdeClassName(tableDesc.getSerdeClassName(),
-                tableDesc.getProperties().getProperty("name"));
-            sdclass = hconf.getClassByName(className);
-          }
-          Deserializer tblDeserializer = (Deserializer) sdclass.newInstance();
+            sdclass = hconf.getClassByName(tableDesc.getSerdeClassName());
+            Deserializer tblDeserializer = (Deserializer) sdclass.newInstance();
           tblDeserializer.initialize(hconf, tblProps);
           tblRawRowObjectInspector =
               (StructObjectInspector) ObjectInspectorConverters.getConvertedOI(
@@ -336,14 +319,6 @@ public class MapOperator extends Operato
     return pd.getPartSpec() != null && !pd.getPartSpec().isEmpty();
   }
 
-  private String checkSerdeClassName(String className, String tableName) throws HiveException {
-    if (className == null || className.isEmpty()) {
-      throw new HiveException(
-          "SerDe class or the SerDe class name is not set for table: " + tableName);
-    }
-    return className;
-  }
-
   public void setChildren(Configuration hconf) throws HiveException {
 
     Path fpath = new Path(HiveConf.getVar(hconf,

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java?rev=1526201&r1=1526200&r2=1526201&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java Wed Sep 25 15:04:49 2013
@@ -613,6 +613,8 @@ public abstract class Operator<T extends
         op.close(abort);
       }
 
+      out = null;
+
       LOG.info(id + " Close done");
     } catch (HiveException e) {
       e.printStackTrace();

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java?rev=1526201&r1=1526200&r2=1526201&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java Wed Sep 25 15:04:49 2013
@@ -49,6 +49,7 @@ package org.apache.hadoop.hive.ql.exec;
  * partial);
  * 
  */
+@Deprecated
 public class UDAF {
 
   /**



Mime
View raw message