hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gunt...@apache.org
Subject svn commit: r1530173 [1/4] - in /hive/branches/tez: ./ beeline/src/java/org/apache/hive/beeline/ cli/src/java/org/apache/hadoop/hive/cli/ common/src/java/org/apache/hadoop/hive/conf/ conf/ contrib/src/java/org/apache/hadoop/hive/contrib/util/typedbytes...
Date Tue, 08 Oct 2013 07:43:14 GMT
Author: gunther
Date: Tue Oct  8 07:43:11 2013
New Revision: 1530173

URL: http://svn.apache.org/r1530173
Log:
Merge latest trunk into branch. (Gunther Hagleitner)

Added:
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java
      - copied unchanged from r1530172, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java
      - copied unchanged from r1530172, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java
    hive/branches/tez/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java
      - copied unchanged from r1530172, hive/trunk/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java
    hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java
      - copied unchanged from r1530172, hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java
    hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/processors/
      - copied from r1530172, hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/processors/
    hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java
      - copied unchanged from r1530172, hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java
    hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java
      - copied unchanged from r1530172, hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java
    hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/security/TestStorageBasedClientSideAuthorizationProvider.java
      - copied unchanged from r1530172, hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/security/TestStorageBasedClientSideAuthorizationProvider.java
    hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java
      - copied unchanged from r1530172, hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java
    hive/branches/tez/ql/src/test/queries/clientpositive/filter_numeric.q
      - copied unchanged from r1530172, hive/trunk/ql/src/test/queries/clientpositive/filter_numeric.q
    hive/branches/tez/ql/src/test/queries/clientpositive/ptf_matchpath.q
      - copied unchanged from r1530172, hive/trunk/ql/src/test/queries/clientpositive/ptf_matchpath.q
    hive/branches/tez/ql/src/test/queries/clientpositive/vectorization_pushdown.q
      - copied unchanged from r1530172, hive/trunk/ql/src/test/queries/clientpositive/vectorization_pushdown.q
    hive/branches/tez/ql/src/test/results/clientpositive/filter_numeric.q.out
      - copied unchanged from r1530172, hive/trunk/ql/src/test/results/clientpositive/filter_numeric.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/ptf_matchpath.q.out
      - copied unchanged from r1530172, hive/trunk/ql/src/test/results/clientpositive/ptf_matchpath.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/vectorization_pushdown.q.out
      - copied unchanged from r1530172, hive/trunk/ql/src/test/results/clientpositive/vectorization_pushdown.q.out
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/BaseCharTypeInfo.java
      - copied unchanged from r1530172, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/BaseCharTypeInfo.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/VarcharTypeInfo.java
      - copied unchanged from r1530172, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/VarcharTypeInfo.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/VarcharUtils.java
      - copied unchanged from r1530172, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/VarcharUtils.java
Removed:
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/NPath.java
    hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/security/TestDefaultHiveMetastoreAuthorizationProvider.java
    hive/branches/tez/ql/src/test/queries/clientpositive/ptf_npath.q
    hive/branches/tez/ql/src/test/results/clientpositive/ptf_npath.q.out
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/BaseTypeParams.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/ParameterizedPrimitiveTypeUtils.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/PrimitiveTypeSpec.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/VarcharTypeParams.java
Modified:
    hive/branches/tez/   (props changed)
    hive/branches/tez/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
    hive/branches/tez/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
    hive/branches/tez/build.xml
    hive/branches/tez/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
    hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/branches/tez/conf/hive-default.xml.template
    hive/branches/tez/contrib/src/java/org/apache/hadoop/hive/contrib/util/typedbytes/TypedBytesRecordReader.java
    hive/branches/tez/eclipse-templates/.classpath
    hive/branches/tez/hcatalog/src/test/e2e/hcatalog/drivers/TestDriverPig.pm
    hive/branches/tez/hcatalog/src/test/e2e/hcatalog/tests/pig.conf
    hive/branches/tez/hcatalog/src/test/e2e/templeton/tests/doas.conf
    hive/branches/tez/hwi/src/java/org/apache/hadoop/hive/hwi/HWISessionItem.java
    hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
    hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
    hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
    hive/branches/tez/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcStruct.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/SettableUDF.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcat.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLower.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFReflect2.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToVarchar.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUpper.java
    hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUtils.java
    hive/branches/tez/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFMacro.java
    hive/branches/tez/ql/src/test/queries/clientpositive/ptf_register_tblfn.q
    hive/branches/tez/ql/src/test/results/clientnegative/invalid_varchar_length_1.q.out
    hive/branches/tez/ql/src/test/results/clientnegative/invalid_varchar_length_2.q.out
    hive/branches/tez/ql/src/test/results/clientnegative/invalid_varchar_length_3.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/ptf_register_tblfn.q.out
    hive/branches/tez/ql/src/test/results/clientpositive/show_functions.q.out
    hive/branches/tez/ql/src/test/results/compiler/plan/input9.q.xml
    hive/branches/tez/serde/if/serde.thrift
    hive/branches/tez/serde/src/gen/thrift/gen-cpp/serde_constants.cpp
    hive/branches/tez/serde/src/gen/thrift/gen-cpp/serde_constants.h
    hive/branches/tez/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/serdeConstants.java
    hive/branches/tez/serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php
    hive/branches/tez/serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py
    hive/branches/tez/serde/src/gen/thrift/gen-rb/serde_constants.rb
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/RegexSerDe.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroObjectInspectorGenerator.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDe.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyFactory.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyHiveVarchar.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/AbstractPrimitiveLazyObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyBinaryObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyBooleanObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyByteObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyDateObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyDoubleObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyFloatObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyHiveDecimalObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyHiveVarcharObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyIntObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyLongObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyPrimitiveObjectInspectorFactory.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyShortObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyStringObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyTimestampObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyVoidObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryHiveVarchar.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/PrimitiveObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/AbstractPrimitiveJavaObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/AbstractPrimitiveObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/AbstractPrimitiveWritableObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaBinaryObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaBooleanObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaByteObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaDateObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaDoubleObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaFloatObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveDecimalObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveVarcharObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaIntObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaLongObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaShortObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaStringObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaTimestampObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaVoidObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorConverter.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorFactory.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorUtils.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableBinaryObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableBooleanObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableByteObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantHiveVarcharObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableDateObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableDoubleObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableFloatObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableHiveDecimalObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableHiveVarcharObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableIntObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableLongObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableShortObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableStringObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableTimestampObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableVoidObjectInspector.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/PrimitiveTypeInfo.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoFactory.java
    hive/branches/tez/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
    hive/branches/tez/serde/src/test/org/apache/hadoop/hive/serde2/lazybinary/TestLazyBinarySerDe.java
    hive/branches/tez/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorConverters.java
    hive/branches/tez/service/src/java/org/apache/hive/service/cli/TypeDescriptor.java
    hive/branches/tez/service/src/java/org/apache/hive/service/cli/TypeQualifiers.java
    hive/branches/tez/service/src/java/org/apache/hive/service/cli/operation/AddResourceOperation.java
    hive/branches/tez/service/src/java/org/apache/hive/service/cli/operation/DeleteResourceOperation.java
    hive/branches/tez/service/src/java/org/apache/hive/service/cli/operation/DfsOperation.java
    hive/branches/tez/service/src/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java
    hive/branches/tez/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java
    hive/branches/tez/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
    hive/branches/tez/service/src/java/org/apache/hive/service/cli/operation/SetOperation.java
    hive/branches/tez/shims/ivy.xml

Propchange: hive/branches/tez/
------------------------------------------------------------------------------
  Merged /hive/trunk:r1529346-1530172

Modified: hive/branches/tez/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java (original)
+++ hive/branches/tez/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java Tue Oct  8 07:43:11 2013
@@ -73,6 +73,12 @@ public class HiveSchemaHelper {
      * @return
      */
     public String cleanseCommand(String dbCommand);
+
+    /***
+     * Does the DB required table/column names quoted
+     * @return
+     */
+    public boolean needsQuotedIdentifier();
   }
 
 
@@ -115,6 +121,11 @@ public class HiveSchemaHelper {
       }
       return dbCommand;
     }
+
+    @Override
+    public boolean needsQuotedIdentifier() {
+      return false;
+    }
   }
 
 
@@ -215,6 +226,11 @@ public class HiveSchemaHelper {
     public boolean isNestedScript(String dbCommand) {
       return dbCommand.startsWith(POSTGRES_NESTING_TOKEN);
     }
+
+    @Override
+    public boolean needsQuotedIdentifier() {
+      return true;
+    }
   }
 
   //Oracle specific parser

Modified: hive/branches/tez/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java (original)
+++ hive/branches/tez/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java Tue Oct  8 07:43:11 2013
@@ -116,7 +116,12 @@ public class HiveSchemaTool {
   // read schema version from metastore
   private String getMetaStoreSchemaVersion(Connection metastoreConn)
         throws HiveMetaException {
-    String versionQuery = "select t.SCHEMA_VERSION from VERSION t";
+    String versionQuery;
+    if (HiveSchemaHelper.getDbCommandParser(dbType).needsQuotedIdentifier()) {
+      versionQuery = "select t.\"SCHEMA_VERSION\" from \"VERSION\" t";
+    } else {
+      versionQuery = "select t.SCHEMA_VERSION from VERSION t";
+    }
     try {
       Statement stmt = metastoreConn.createStatement();
       ResultSet res = stmt.executeQuery(versionQuery);

Modified: hive/branches/tez/build.xml
URL: http://svn.apache.org/viewvc/hive/branches/tez/build.xml?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/build.xml (original)
+++ hive/branches/tez/build.xml Tue Oct  8 07:43:11 2013
@@ -1456,8 +1456,8 @@
         output.file="${mvn.jar.dir}/hive-metastore-${version}.jar.asc"
         gpg.passphrase="${gpg.passphrase}"/>
     <sign-artifact
-        input.file="${mvn.jar.dir}/hive-metastore-${version}.pom"
-        output.file="${mvn.jar.dir}/hive-metastore-${version}.pom.asc"
+        input.file="${mvn.pom.dir}/hive-metastore-${version}.pom"
+        output.file="${mvn.pom.dir}/hive-metastore-${version}.pom.asc"
         gpg.passphrase="${gpg.passphrase}"/>
 
     <!-- hive-serde -->

Modified: hive/branches/tez/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java (original)
+++ hive/branches/tez/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java Tue Oct  8 07:43:11 2013
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hive.cli;
 
+import static org.apache.hadoop.util.StringUtils.stringifyException;
+
 import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -30,6 +32,7 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.sql.SQLException;
 
 import jline.ArgumentCompletor;
 import jline.ArgumentCompletor.AbstractArgumentDelimiter;
@@ -122,7 +125,7 @@ public class CliDriver {
           this.processFile(cmd_1);
         } catch (IOException e) {
           console.printError("Failed processing file "+ cmd_1 +" "+ e.getLocalizedMessage(),
-            org.apache.hadoop.util.StringUtils.stringifyException(e));
+            stringifyException(e));
           ret = 1;
         }
       }
@@ -146,7 +149,7 @@ public class CliDriver {
         }
       } catch (Exception e) {
         console.printError("Exception raised from Shell command " + e.getLocalizedMessage(),
-            org.apache.hadoop.util.StringUtils.stringifyException(e));
+            stringifyException(e));
         ret = 1;
       }
 
@@ -212,8 +215,14 @@ public class CliDriver {
         }
       }
     } else { // local mode
-      CommandProcessor proc = CommandProcessorFactory.get(tokens[0], (HiveConf) conf);
-      ret = processLocalCmd(cmd, proc, ss);
+      try {
+        CommandProcessor proc = CommandProcessorFactory.get(tokens[0], (HiveConf) conf);
+        ret = processLocalCmd(cmd, proc, ss);
+      } catch (SQLException e) {
+        console.printError("Failed processing command " + tokens[0] + " " + e.getLocalizedMessage(),
+          org.apache.hadoop.util.StringUtils.stringifyException(e));
+        ret = 1;
+      }
     }
 
     return ret;

Modified: hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Tue Oct  8 07:43:11 2013
@@ -774,7 +774,9 @@ public class HiveConf extends Configurat
     HIVE_SERVER2_TABLE_TYPE_MAPPING("hive.server2.table.type.mapping", "CLASSIC"),
     HIVE_SERVER2_SESSION_HOOK("hive.server2.session.hook", ""),
 
-    HIVE_CONF_RESTRICTED_LIST("hive.conf.restricted.list", null),
+    HIVE_SECURITY_COMMAND_WHITELIST("hive.security.command.whitelist", "set,reset,dfs,add,delete"),
+
+    HIVE_CONF_RESTRICTED_LIST("hive.conf.restricted.list", ""),
 
     // If this is set all move tasks at the end of a multi-insert query will only begin once all
     // outputs are ready
@@ -1169,9 +1171,10 @@ public class HiveConf extends Configurat
     }
 
     // setup list of conf vars that are not allowed to change runtime
-    String restrictListStr = this.get(ConfVars.HIVE_CONF_RESTRICTED_LIST.toString());
-    if (restrictListStr != null) {
-      for (String entry : restrictListStr.split(",")) {
+    String restrictListStr = this.get(ConfVars.HIVE_CONF_RESTRICTED_LIST.toString(), "").trim();
+    for (String entry : restrictListStr.split(",")) {
+      entry = entry.trim();
+      if (!entry.isEmpty()) {
         restrictList.add(entry);
       }
     }

Modified: hive/branches/tez/conf/hive-default.xml.template
URL: http://svn.apache.org/viewvc/hive/branches/tez/conf/hive-default.xml.template?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/conf/hive-default.xml.template (original)
+++ hive/branches/tez/conf/hive-default.xml.template Tue Oct  8 07:43:11 2013
@@ -1514,6 +1514,18 @@
 </property>
 
 <property>
+  <name>hive.security.command.whitelist</name>
+  <value>set,reset,dfs,add,delete</value>
+  <description>Comma seperated list of non-SQL Hive commands users are authorized to execute</description>
+</property>
+
+<property>
+  <name>hive.conf.restricted.list</name>
+  <value></value>
+  <description>Comma seperated list of configuration options which are immutable at runtime</description>
+</property>
+
+<property>
   <name>hive.metastore.authorization.storage.checks</name>
   <value>false</value>
   <description>Should the metastore do authorization checks against the underlying storage

Modified: hive/branches/tez/contrib/src/java/org/apache/hadoop/hive/contrib/util/typedbytes/TypedBytesRecordReader.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/contrib/src/java/org/apache/hadoop/hive/contrib/util/typedbytes/TypedBytesRecordReader.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/contrib/src/java/org/apache/hadoop/hive/contrib/util/typedbytes/TypedBytesRecordReader.java (original)
+++ hive/branches/tez/contrib/src/java/org/apache/hadoop/hive/contrib/util/typedbytes/TypedBytesRecordReader.java Tue Oct  8 07:43:11 2013
@@ -39,9 +39,8 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils.PrimitiveTypeEntry;
-import org.apache.hadoop.hive.serde2.typeinfo.ParameterizedPrimitiveTypeUtils;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.io.BooleanWritable;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.FloatWritable;
@@ -59,16 +58,16 @@ public class TypedBytesRecordReader impl
   private DataInputStream din;
   private TypedBytesWritableInput tbIn;
 
-  private NonSyncDataOutputBuffer barrStr = new NonSyncDataOutputBuffer();
+  private final NonSyncDataOutputBuffer barrStr = new NonSyncDataOutputBuffer();
   private TypedBytesWritableOutput tbOut;
 
-  private ArrayList<Writable> row = new ArrayList<Writable>(0);
-  private ArrayList<String> rowTypeName = new ArrayList<String>(0);
+  private final ArrayList<Writable> row = new ArrayList<Writable>(0);
+  private final ArrayList<String> rowTypeName = new ArrayList<String>(0);
   private List<String> columnTypes;
 
-  private ArrayList<ObjectInspector> srcOIns = new ArrayList<ObjectInspector>();
-  private ArrayList<ObjectInspector> dstOIns = new ArrayList<ObjectInspector>();
-  private ArrayList<Converter> converters = new ArrayList<Converter>();
+  private final ArrayList<ObjectInspector> srcOIns = new ArrayList<ObjectInspector>();
+  private final ArrayList<ObjectInspector> dstOIns = new ArrayList<ObjectInspector>();
+  private final ArrayList<Converter> converters = new ArrayList<Converter>();
 
   private static Map<Type, String> typedBytesToTypeName = new HashMap<Type, String>();
   static {
@@ -89,10 +88,9 @@ public class TypedBytesRecordReader impl
     String columnTypeProperty = tbl.getProperty(serdeConstants.LIST_COLUMN_TYPES);
     columnTypes = Arrays.asList(columnTypeProperty.split(","));
     for (String columnType : columnTypes) {
-      PrimitiveTypeEntry dstTypeEntry = PrimitiveObjectInspectorUtils
-          .getTypeEntryFromTypeName(columnType);
+      PrimitiveTypeInfo dstTypeInfo = TypeInfoFactory.getPrimitiveTypeInfo(columnType);
       dstOIns.add(PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(
-          dstTypeEntry));
+          dstTypeInfo));
     }
   }
 
@@ -152,11 +150,10 @@ public class TypedBytesRecordReader impl
         row.add(wrt);
         rowTypeName.add(type.name());
         String typeName = typedBytesToTypeName.get(type);
-        PrimitiveTypeEntry srcTypeEntry = PrimitiveObjectInspectorUtils
-            .getTypeEntryFromTypeName(typeName);
+        PrimitiveTypeInfo srcTypeInfo = TypeInfoFactory.getPrimitiveTypeInfo(typeName);
         srcOIns
             .add(PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(
-                srcTypeEntry));
+                srcTypeInfo));
         converters.add(ObjectInspectorConverters.getConverter(srcOIns.get(pos),
             dstOIns.get(pos)));
       } else {

Modified: hive/branches/tez/eclipse-templates/.classpath
URL: http://svn.apache.org/viewvc/hive/branches/tez/eclipse-templates/.classpath?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/eclipse-templates/.classpath (original)
+++ hive/branches/tez/eclipse-templates/.classpath Tue Oct  8 07:43:11 2013
@@ -20,7 +20,7 @@
   <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
   <classpathentry kind="lib" path="build/ivy/lib/hadoop0.20.shim/ant-1.6.5.jar"/>
   <classpathentry kind="lib" path="build/ivy/lib/hadoop0.20.shim/commons-cli-1.2.jar"/>
-  <classpathentry kind="lib" path="build/ivy/lib/hadoop0.20.shim/commons-codec-1.3.jar"/>
+  <classpathentry kind="lib" path="build/ivy/lib/hadoop0.20.shim/commons-codec-1.4.jar"/>
   <classpathentry kind="lib" path="build/ivy/lib/hadoop0.20.shim/commons-el-1.0.jar"/>
   <classpathentry kind="lib" path="build/ivy/lib/hadoop0.20.shim/commons-httpclient-3.0.1.jar"/>
   <classpathentry kind="lib" path="build/ivy/lib/hadoop0.20.shim/commons-logging-1.1.1.jar"/>

Modified: hive/branches/tez/hcatalog/src/test/e2e/hcatalog/drivers/TestDriverPig.pm
URL: http://svn.apache.org/viewvc/hive/branches/tez/hcatalog/src/test/e2e/hcatalog/drivers/TestDriverPig.pm?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/hcatalog/src/test/e2e/hcatalog/drivers/TestDriverPig.pm (original)
+++ hive/branches/tez/hcatalog/src/test/e2e/hcatalog/drivers/TestDriverPig.pm Tue Oct  8 07:43:11 2013
@@ -184,7 +184,8 @@ sub runTest
 	               $testCmd->{'group'} .  "_" .  $testCmd->{'num'} . ".$i.out";
                    $tableName = $results[$i];
 	           $modifiedTestCmd{'num'} = $testCmd->{'num'} . "_" . $i . "_benchmark";
-                   $modifiedTestCmd{'pig'} = "a = load '$tableName' using org.apache.hive.hcatalog.pig.HCatLoader(); store a into ':OUTPATH:';";
+                   $tableLoader = (defined $testCmd->{'result_table_loader'} ? $testCmd->{'result_table_loader'} : "org.apache.hive.hcatalog.pig.HCatLoader()");
+                   $modifiedTestCmd{'pig'} = "a = load '$tableName' using $tableLoader; store a into ':OUTPATH:';";
                    my $r = $self->runPig(\%modifiedTestCmd, $log, 1, 1);
 	           $outputs[$i] = $r->{'output'};
                } else {

Modified: hive/branches/tez/hcatalog/src/test/e2e/hcatalog/tests/pig.conf
URL: http://svn.apache.org/viewvc/hive/branches/tez/hcatalog/src/test/e2e/hcatalog/tests/pig.conf?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/hcatalog/src/test/e2e/hcatalog/tests/pig.conf (original)
+++ hive/branches/tez/hcatalog/src/test/e2e/hcatalog/tests/pig.conf Tue Oct  8 07:43:11 2013
@@ -330,6 +330,7 @@ exec
 e = load 'pig_hbase_1' using org.apache.hcatalog.pig.HCatLoader();
 store e into ':OUTPATH:';\,
                                 ,'result_table' => ['pig_hbase_1','?']
+                                ,'result_table_loader' => 'org.apache.hcatalog.pig.HCatLoader()'
 				,'sql'   => [ 'select name, avg(cast(age as decimal(10,5))), avg(gpa) from studenttab10k group by name;', 'select name, avg(cast(age as decimal(10,5))), avg(gpa) from studenttab10k group by name;' ]
                                 ,'floatpostprocess' => 1
                                 ,'delimiter' => '	'
@@ -350,6 +351,7 @@ d = foreach c generate name as key, (cha
 store d into 'pig_hbase_2_1' using org.apache.hcatalog.pig.HCatStorer();
 store d into 'pig_hbase_2_2' using org.apache.hcatalog.pig.HCatStorer();\,
                                 ,'result_table' => ['pig_hbase_2_1','pig_hbase_2_2']
+                                ,'result_table_loader' => 'org.apache.hcatalog.pig.HCatLoader()'
 				,'sql'   => [ 'select name, avg(cast(age as decimal(10,5))), avg(gpa) from studenttab10k group by name;', 'select name, avg(cast(age as decimal(10,5))), avg(gpa) from studenttab10k group by name;']
                                 ,'floatpostprocess' => 1
                                 ,'delimiter' => '	'

Modified: hive/branches/tez/hcatalog/src/test/e2e/templeton/tests/doas.conf
URL: http://svn.apache.org/viewvc/hive/branches/tez/hcatalog/src/test/e2e/templeton/tests/doas.conf?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/hcatalog/src/test/e2e/templeton/tests/doas.conf (original)
+++ hive/branches/tez/hcatalog/src/test/e2e/templeton/tests/doas.conf Tue Oct  8 07:43:11 2013
@@ -104,12 +104,12 @@ $cfg = 
     
     {
              #descbe the table (as the table owner but using doAs)
-             #this should succeed (it seems reading metadata is allowed even if reading data is not)
+             #this should fail when using StorageBasedAuthorizationProvider
      'num' => 6,
      'method' => 'GET',
      'url' => ':TEMPLETON_URL:/templeton/v1/ddl/database/default/table/:UNAME:_doastab2/partition?user.name=:UNAME:?doAs=:DOAS:',
-     'status_code' => 200,
-     'json_field_substr_match' => {'database' => 'default',  'table' => ':UNAME:_doastab2'},
+     'status_code' => 500,
+     'json_field_substr_match' => {'error' => 'FAILED: AuthorizationException java\.security\.AccessControlException: action READ not permitted on path .* for user :UNAME:\?doAs=:DOAS:'},
     },
   
     {
@@ -118,7 +118,7 @@ $cfg = 
      'method' => 'DELETE',
      'url' => ':TEMPLETON_URL:/templeton/v1/ddl/database/default/table/:UNAME:_doastab2?user.name=:UNAME:&doAs=:DOAS:',
      'status_code' => 500,
-     'json_field_substr_match' => {'error' => 'FAILED: Execution Error, return code 1 from org\.apache\.hadoop\.hive\.ql\.exec\.DDLTask\. MetaException\(message:java\.security\.AccessControlException: action WRITE not permitted on path.* for user :DOAS:\)'},
+     'json_field_substr_match' => {'error' => 'Authorization failed:java\.security\.AccessControlException: action WRITE not permitted on path .* for user :DOAS:'},
     },
     {
              #descbe the table....

Modified: hive/branches/tez/hwi/src/java/org/apache/hadoop/hive/hwi/HWISessionItem.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/hwi/src/java/org/apache/hadoop/hive/hwi/HWISessionItem.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/hwi/src/java/org/apache/hadoop/hive/hwi/HWISessionItem.java (original)
+++ hive/branches/tez/hwi/src/java/org/apache/hadoop/hive/hwi/HWISessionItem.java Tue Oct  8 07:43:11 2013
@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.List;
+import java.sql.SQLException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -332,8 +333,12 @@ public class HWISessionItem implements R
       String cmd_trimmed = cmd.trim();
       String[] tokens = cmd_trimmed.split("\\s+");
       String cmd_1 = cmd_trimmed.substring(tokens[0].length()).trim();
-
-      CommandProcessor proc = CommandProcessorFactory.get(tokens[0]);
+      CommandProcessor proc = null;
+      try {
+        proc = CommandProcessorFactory.get(tokens[0]);
+      } catch (SQLException e) {
+        l4j.error(getSessionName() + " error processing " + cmd, e);
+      }
       if (proc != null) {
         if (proc instanceof Driver) {
           Driver qp = (Driver) proc;

Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java Tue Oct  8 07:43:11 2013
@@ -46,6 +46,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.model.MDatabase;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode;
@@ -53,6 +54,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor;
+import org.apache.hadoop.hive.serde.serdeConstants;
 
 /**
  * This class contains the optimizations for MetaStore that rely on direct SQL access to
@@ -101,7 +103,9 @@ class MetaStoreDirectSql {
       tx = pm.currentTransaction();
       tx.begin();
     }
-    // This should work. If it doesn't, we will self-disable. What a PITA...
+    // Force the underlying db to initialize.
+    pm.newQuery(MDatabase.class, "name == ''").execute();
+    // Self-test query. If it doesn't work, we will self-disable. What a PITA...
     boolean isCompatibleDatastore = false;
     String selfTestQuery = "select \"DB_ID\" from \"DBS\"";
     try {
@@ -165,7 +169,7 @@ class MetaStoreDirectSql {
     }
     String list = repeat(",?", partNames.size()).substring(1);
     return getPartitionsViaSqlFilterInternal(dbName, tblName, null,
-        "and \"PARTITIONS\".\"PART_NAME\" in (" + list + ")",
+        "\"PARTITIONS\".\"PART_NAME\" in (" + list + ")",
         partNames, new ArrayList<String>(), max);
   }
 
@@ -179,7 +183,8 @@ class MetaStoreDirectSql {
   public List<Partition> getPartitionsViaSqlFilter(
       Table table, ExpressionTree tree, Integer max) throws MetaException {
     assert tree != null;
-    List<String> params = new ArrayList<String>(), joins = new ArrayList<String>();
+    List<Object> params = new ArrayList<Object>();
+    List<String> joins = new ArrayList<String>();
     String sqlFilter = PartitionFilterGenerator.generateSqlFilter(table, tree, params, joins);
     if (sqlFilter == null) {
       return null; // Cannot make SQL filter to push down.
@@ -232,7 +237,7 @@ class MetaStoreDirectSql {
    * @return List of partition objects.
    */
   private List<Partition> getPartitionsViaSqlFilterInternal(String dbName, String tblName,
-      Boolean isView, String sqlFilter, List<String> paramsForFilter,
+      Boolean isView, String sqlFilter, List<? extends Object> paramsForFilter,
       List<String> joinsForFilter, Integer max) throws MetaException {
     boolean doTrace = LOG.isDebugEnabled();
     dbName = dbName.toLowerCase();
@@ -255,9 +260,11 @@ class MetaStoreDirectSql {
     String queryText =
         "select \"PARTITIONS\".\"PART_ID\" from \"PARTITIONS\""
       + "  inner join \"TBLS\" on \"PARTITIONS\".\"TBL_ID\" = \"TBLS\".\"TBL_ID\" "
+      + "    and \"TBLS\".\"TBL_NAME\" = ? "
       + "  inner join \"DBS\" on \"TBLS\".\"DB_ID\" = \"DBS\".\"DB_ID\" "
-      + join(joinsForFilter, ' ') + " where \"TBLS\".\"TBL_NAME\" = ? and \"DBS\".\"NAME\" = ? "
-      + (sqlFilter == null ? "" : sqlFilter) + orderForFilter;
+      + "     and \"DBS\".\"NAME\" = ? "
+      + join(joinsForFilter, ' ')
+      + (sqlFilter == null ? "" : (" where " + sqlFilter)) + orderForFilter;
     Object[] params = new Object[paramsForFilter.size() + 2];
     params[0] = tblName;
     params[1] = dbName;
@@ -649,11 +656,11 @@ class MetaStoreDirectSql {
   private static class PartitionFilterGenerator extends TreeVisitor {
     private final Table table;
     private final FilterBuilder filterBuffer;
-    private final List<String> params;
+    private final List<Object> params;
     private final List<String> joins;
 
     private PartitionFilterGenerator(
-        Table table, List<String> params, List<String> joins) {
+        Table table, List<Object> params, List<String> joins) {
       this.table = table;
       this.params = params;
       this.joins = joins;
@@ -668,7 +675,7 @@ class MetaStoreDirectSql {
      * @return the string representation of the expression tree
      */
     public static String generateSqlFilter(Table table,
-        ExpressionTree tree, List<String> params, List<String> joins) throws MetaException {
+        ExpressionTree tree, List<Object> params, List<String> joins) throws MetaException {
       assert table != null;
       if (tree.getRoot() == null) {
         return "";
@@ -685,7 +692,7 @@ class MetaStoreDirectSql {
         if (joins.get(i) != null) continue;
         joins.remove(i--);
       }
-      return "and (" + visitor.filterBuffer.getFilter() + ")";
+      return "(" + visitor.filterBuffer.getFilter() + ")";
     }
 
     @Override
@@ -718,10 +725,28 @@ class MetaStoreDirectSql {
       int partColIndex = node.getPartColIndexForFilter(table, filterBuffer);
       if (filterBuffer.hasError()) return;
 
-      // Add parameters linearly; we are traversing leaf nodes LTR, so they would match correctly.
-      String valueAsString = node.getFilterPushdownParam(table, partColIndex, filterBuffer);
-      if (filterBuffer.hasError()) return;
-      params.add(valueAsString);
+      // We skipped 'like', other ops should all work as long as the types are right.
+      String colType = table.getPartitionKeys().get(partColIndex).getType();
+      boolean isStringCol = colType.equals(serdeConstants.STRING_TYPE_NAME);
+      if (!isStringCol && !serdeConstants.IntegralTypes.contains(colType)) {
+        filterBuffer.setError("Filter pushdown is only supported for string or integral columns");
+        return;
+      }
+
+      boolean isStringVal = node.value instanceof String;
+      if (!isStringVal && !(node.value instanceof Long)) {
+        filterBuffer.setError("Filter pushdown is only supported for string or integral values");
+        return;
+      } else if (isStringCol != isStringVal) {
+        // It's not clear how filtering for e.g. "stringCol > 5" should work (which side is
+        // to be coerced?). Let the expression evaluation sort this one out, not metastore.
+        filterBuffer.setError("Cannot push down filter for "
+            + (isStringCol ? "string" : "integral") + " column and value " + node.value);
+        return;
+      }
+
+      // Force string-based handling in some cases to be compatible with JDO pushdown.
+      boolean forceStringEq = !isStringCol && node.canJdoUseStringsWithIntegral();
 
       if (joins.isEmpty()) {
         // There's a fixed number of partition cols that we might have filters on. To avoid
@@ -738,8 +763,19 @@ class MetaStoreDirectSql {
             + " and \"FILTER" + partColIndex + "\".\"INTEGER_IDX\" = " + partColIndex);
       }
 
+      // Build the filter and add parameters linearly; we are traversing leaf nodes LTR.
       String tableValue = "\"FILTER" + partColIndex + "\".\"PART_KEY_VAL\"";
-      // TODO: need casts here if #doesOperatorSupportIntegral is amended to include lt/gt/etc.
+      if (!isStringCol && !forceStringEq) {
+        // The underlying database field is varchar, we need to compare numbers.
+        tableValue = "cast(" + tableValue + " as decimal(21,0))";
+        // This is a workaround for DERBY-6358; as such, it is pretty horrible.
+        tableValue = "(case when \"TBLS\".\"TBL_NAME\" = ? and \"DBS\".\"NAME\" = ? then "
+          + tableValue + " else null end)";
+        params.add(table.getTableName().toLowerCase());
+        params.add(table.getDbName().toLowerCase());
+      }
+      params.add(forceStringEq ? node.value.toString() : node.value);
+
       filterBuffer.append(node.isReverseOrder
           ? "(? " + node.operator.getSqlOp() + " " + tableValue + ")"
           : "(" + tableValue + " " + node.operator.getSqlOp() + " ?)");

Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Tue Oct  8 07:43:11 2013
@@ -1424,7 +1424,7 @@ public class ObjectStore implements RawS
       int maxParts, boolean allowSql, boolean allowJdo) throws MetaException {
     assert allowSql || allowJdo;
     boolean doTrace = LOG.isDebugEnabled();
-    boolean doUseDirectSql = canUseDirectSql(allowSql);
+    boolean doUseDirectSql = canUseDirectSql(allowSql, allowJdo);
 
     boolean success = false;
     List<Partition> parts = null;
@@ -1754,7 +1754,7 @@ public class ObjectStore implements RawS
     dbName = dbName.toLowerCase();
     tblName = tblName.toLowerCase();
     boolean doTrace = LOG.isDebugEnabled();
-    boolean doUseDirectSql = canUseDirectSql(allowSql);
+    boolean doUseDirectSql = canUseDirectSql(allowSql, allowJdo);
 
     boolean success = false;
     List<Partition> results = null;
@@ -1819,7 +1819,7 @@ public class ObjectStore implements RawS
     //       Filter.g stuff. That way this method and ...ByFilter would just be merged.
     ExpressionTree exprTree = makeExpressionTree(filter);
 
-    boolean doUseDirectSql = allowSql && isDirectSqlEnabled(maxParts);
+    boolean doUseDirectSql = canUseDirectSql(allowSql, allowJdo);
     boolean doTrace = LOG.isDebugEnabled();
     List<Partition> partitions = null;
     boolean hasUnknownPartitions = false;
@@ -1877,12 +1877,6 @@ public class ObjectStore implements RawS
     return hasUnknownPartitions;
   }
 
-  private boolean isDirectSqlEnabled(short maxParts) {
-    // There's no portable SQL limit. It doesn't make a lot of sense w/o offset anyway.
-    return (maxParts < 0)
-        && HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL);
-  }
-
   private class LikeChecker extends ExpressionTree.TreeVisitor {
     private boolean hasLike;
 
@@ -2062,7 +2056,7 @@ public class ObjectStore implements RawS
       throws MetaException, NoSuchObjectException {
     assert allowSql || allowJdo;
     boolean doTrace = LOG.isDebugEnabled();
-    boolean doUseDirectSql = canUseDirectSql(allowSql);
+    boolean doUseDirectSql = canUseDirectSql(allowSql, allowJdo);
 
     dbName = dbName.toLowerCase();
     tblName = tblName.toLowerCase();
@@ -2108,15 +2102,22 @@ public class ObjectStore implements RawS
     }
   }
 
-  private boolean canUseDirectSql(boolean allowSql) {
+  /**
+   * @param allowSql Whether SQL usage is allowed (always true outside test).
+   * @param allowJdo Whether JDO usage is allowed (always true outside test).
+   * @return Whether we can use direct SQL.
+   */
+  private boolean canUseDirectSql(boolean allowSql, boolean allowJdo) throws MetaException {
     // We don't allow direct SQL usage if we are inside a larger transaction (e.g. droptable).
     // That is because some databases (e.g. Postgres) abort the entire transaction when
     // any query fails, so the fallback from failed SQL to JDO is not possible.
     // TODO: Drop table can be very slow on large tables, we might want to address this.
-    return allowSql
-      && HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL)
-      && directSql.isCompatibleDatastore()
-      && !isActiveTransaction();
+    boolean isEnabled = !isActiveTransaction()
+        && HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL);
+    if (!allowJdo && isEnabled && !directSql.isCompatibleDatastore()) {
+      throw new MetaException("SQL is not operational"); // test path; SQL is enabled and broken.
+    }
+    return allowSql && isEnabled && directSql.isCompatibleDatastore();
   }
 
   /**

Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java Tue Oct  8 07:43:11 2013
@@ -348,7 +348,7 @@ public class ExpressionTree {
       int partitionColumnIndex = getPartColIndexForFilter(table, filterBuilder);
       if (filterBuilder.hasError()) return;
 
-      String valueAsString = getFilterPushdownParam(table, partitionColumnIndex, filterBuilder);
+      String valueAsString = getJdoFilterPushdownParam(table, partitionColumnIndex, filterBuilder);
       if (filterBuilder.hasError()) return;
 
       String paramName = PARAM_PREFIX + params.size();
@@ -394,25 +394,13 @@ public class ExpressionTree {
      * @param operator operator
      * @return true iff filter pushdown for this operator can be done for integral types.
      */
-    private static boolean doesOperatorSupportIntegral(Operator operator) {
-      // TODO: for SQL-based filtering, this could be amended if we added casts.
+    public boolean canJdoUseStringsWithIntegral() {
       return (operator == Operator.EQUALS)
           || (operator == Operator.NOTEQUALS)
           || (operator == Operator.NOTEQUALS2);
     }
 
     /**
-     * @param type type
-     * @return true iff type is an integral type.
-     */
-    private static boolean isIntegralType(String type) {
-      return type.equals(serdeConstants.TINYINT_TYPE_NAME)
-          || type.equals(serdeConstants.SMALLINT_TYPE_NAME)
-          || type.equals(serdeConstants.INT_TYPE_NAME)
-          || type.equals(serdeConstants.BIGINT_TYPE_NAME);
-    }
-
-    /**
      * Get partition column index in the table partition column list that
      * corresponds to the key that is being filtered on by this tree node.
      * @param table The table.
@@ -440,21 +428,20 @@ public class ExpressionTree {
     }
 
     /**
-     * Validates and gets the query parameter for filter pushdown based on the column
+     * Validates and gets the query parameter for JDO filter pushdown based on the column
      * and the constant stored in this node.
-     * In future this may become different for SQL and JDOQL filter pushdown.
      * @param table The table.
      * @param partColIndex The index of the column to check.
      * @param filterBuilder filter builder used to report error, if any.
      * @return The parameter string.
      */
-    public String getFilterPushdownParam(
+    private String getJdoFilterPushdownParam(
         Table table, int partColIndex, FilterBuilder filterBuilder) throws MetaException {
-      boolean isIntegralSupported = doesOperatorSupportIntegral(operator);
+      boolean isIntegralSupported = canJdoUseStringsWithIntegral();
       String colType = table.getPartitionKeys().get(partColIndex).getType();
       // Can only support partitions whose types are string, or maybe integers
       if (!colType.equals(serdeConstants.STRING_TYPE_NAME)
-          && (!isIntegralSupported || !isIntegralType(colType))) {
+          && (!isIntegralSupported || !serdeConstants.IntegralTypes.contains(colType))) {
         filterBuilder.setError("Filtering is supported only on partition keys of type " +
             "string" + (isIntegralSupported ? ", or integral types" : ""));
         return null;

Modified: hive/branches/tez/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (original)
+++ hive/branches/tez/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java Tue Oct  8 07:43:11 2013
@@ -2019,6 +2019,17 @@ public abstract class TestHiveMetaStore 
     checkFilter(client, dbName, tblName, "p1 like \"p1.*\"", 6);
     checkFilter(client, dbName, tblName, "p2 like \"p.*3\"", 1);
 
+    // Test gt/lt/lte/gte for numbers.
+    checkFilter(client, dbName, tblName, "p3 < 0", 1);
+    checkFilter(client, dbName, tblName, "p3 >= -33", 6);
+    checkFilter(client, dbName, tblName, "p3 > -33", 5);
+    checkFilter(client, dbName, tblName, "p3 > 31 and p3 < 32", 0);
+    checkFilter(client, dbName, tblName, "p3 > 31 or p3 < 31", 3);
+    checkFilter(client, dbName, tblName, "p3 > 30 or p3 < 30", 6);
+    checkFilter(client, dbName, tblName, "p3 >= 31 or p3 < -32", 6);
+    checkFilter(client, dbName, tblName, "p3 >= 32", 2);
+    checkFilter(client, dbName, tblName, "p3 > 32", 0);
+
     //Test for setting the maximum partition count
     List<Partition> partitions = client.listPartitionsByFilter(dbName,
         tblName, "p1 >= \"p12\"", (short) 2);
@@ -2037,17 +2048,6 @@ public abstract class TestHiveMetaStore 
     assertTrue("Filter on int partition key", me.getMessage().contains(
           "Filtering is supported only on partition keys of type string"));
 
-    try {
-      client.listPartitionsByFilter(dbName,
-          tblName, "p3 >= 31", (short) -1);
-    } catch(MetaException e) {
-      me = e;
-    }
-    assertNotNull(me);
-    assertTrue("Filter on int partition key", me.getMessage().contains(
-          "Filtering is supported only on partition keys of type string"));
-
-
     me = null;
     try {
       client.listPartitionsByFilter(dbName,

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java Tue Oct  8 07:43:11 2013
@@ -39,7 +39,6 @@ import javax.xml.parsers.DocumentBuilder
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hive.common.type.HiveVarchar;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
@@ -131,7 +130,7 @@ import org.apache.hadoop.hive.ql.udf.UDF
 import org.apache.hadoop.hive.ql.udf.generic.*;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFLeadLag.GenericUDFLag;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFLeadLag.GenericUDFLead;
-import org.apache.hadoop.hive.ql.udf.ptf.NPath.NPathResolver;
+import org.apache.hadoop.hive.ql.udf.ptf.MatchPath.MatchPathResolver;
 import org.apache.hadoop.hive.ql.udf.ptf.Noop.NoopResolver;
 import org.apache.hadoop.hive.ql.udf.ptf.NoopWithMap.NoopWithMapResolver;
 import org.apache.hadoop.hive.ql.udf.ptf.TableFunctionResolver;
@@ -156,7 +155,6 @@ import org.apache.hadoop.hive.serde2.typ
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeParams;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.w3c.dom.Document;
@@ -457,7 +455,7 @@ public final class FunctionRegistry {
     registerTableFunction(NOOP_TABLE_FUNCTION, NoopResolver.class);
     registerTableFunction(NOOP_MAP_TABLE_FUNCTION, NoopWithMapResolver.class);
     registerTableFunction(WINDOWING_TABLE_FUNCTION,  WindowingTableFunctionResolver.class);
-    registerTableFunction("npath", NPathResolver.class);
+    registerTableFunction("matchpath", MatchPathResolver.class);
   }
 
   public static void registerTemporaryUDF(String functionName,
@@ -663,12 +661,7 @@ public final class FunctionRegistry {
         int maxLength = getCommonLength(
             TypeInfoUtils.getCharacterLengthForType(a),
             TypeInfoUtils.getCharacterLengthForType(b));
-        VarcharTypeParams varcharParams = new VarcharTypeParams();
-        varcharParams.setLength(maxLength);
-        // Generate type name so that we can retrieve the TypeInfo for that type.
-        String typeName = PrimitiveObjectInspectorUtils
-            .getTypeEntryFromTypeSpecs(typeCategory, varcharParams).toString();
-        return TypeInfoFactory.getPrimitiveTypeInfo(typeName);
+        return TypeInfoFactory.getVarcharTypeInfo(maxLength);
 
       default:
         // Type doesn't require any qualifiers.
@@ -1329,9 +1322,9 @@ public final class FunctionRegistry {
       // The original may have settable info that needs to be added to the new copy.
       if (genericUDF instanceof SettableUDF) {
         try {
-          Object settableData = ((SettableUDF)genericUDF).getParams();
-          if (settableData != null) {
-            ((SettableUDF)clonedUDF).setParams(settableData);
+          TypeInfo typeInfo = ((SettableUDF)genericUDF).getTypeInfo();
+          if (typeInfo != null) {
+            ((SettableUDF)clonedUDF).setTypeInfo(typeInfo);
           }
         } catch (UDFArgumentException err) {
           // In theory this should not happen - if the original copy of the UDF had this
@@ -1761,7 +1754,9 @@ public final class FunctionRegistry {
   }
 
   private static void registerNativeStatus(FunctionInfo fi) {
-    if (!fi.isNative()) return;
+    if (!fi.isNative()) {
+      return;
+    }
     nativeUdfs.add(fi.getFunctionClass());
   }
 }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java Tue Oct  8 07:43:11 2013
@@ -27,6 +27,7 @@ import java.util.Map;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -90,38 +91,12 @@ public class OrcInputFormat  implements 
 
     OrcRecordReader(Reader file, Configuration conf,
                     long offset, long length) throws IOException {
-      String serializedPushdown = conf.get(TableScanDesc.FILTER_EXPR_CONF_STR);
-      String columnNamesString =
-          conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR);
-      String[] columnNames = null;
-      SearchArgument sarg = null;
       List<OrcProto.Type> types = file.getTypes();
-      if (types.size() == 0) {
-        numColumns = 0;
-      } else {
-        numColumns = types.get(0).getSubtypesCount();
-      }
-      columnNames = new String[types.size()];
-      LOG.info("included column ids = " +
-          conf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "null"));
-      LOG.info("included columns names = " +
-          conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, "null"));
-      boolean[] includeColumn = findIncludedColumns(types, conf);
-      if (serializedPushdown != null && columnNamesString != null) {
-        sarg = SearchArgument.FACTORY.create
-            (Utilities.deserializeExpression(serializedPushdown, conf));
-        LOG.info("ORC pushdown predicate: " + sarg);
-        String[] neededColumnNames = columnNamesString.split(",");
-        int i = 0;
-        for(int columnId: types.get(0).getSubtypesList()) {
-          if (includeColumn == null || includeColumn[columnId]) {
-            columnNames[columnId] = neededColumnNames[i++];
-          }
-        }
-      } else {
-        LOG.info("No ORC pushdown predicate");
-      }
-      this.reader = file.rows(offset, length, includeColumn, sarg, columnNames);
+      numColumns = (types.size() == 0) ? 0 : types.get(0).getSubtypesCount();
+      boolean[] includedColumns = findIncludedColumns(types, conf);
+      String[] columnNames = getIncludedColumnNames(types, includedColumns, conf);
+      SearchArgument sarg = createSarg(types, conf);
+      this.reader = file.rows(offset, length, includedColumns, sarg, columnNames);
       this.offset = offset;
       this.length = length;
     }
@@ -187,15 +162,48 @@ public class OrcInputFormat  implements 
     }
   }
 
+  public static SearchArgument createSarg(List<OrcProto.Type> types, Configuration conf) {
+    String serializedPushdown = conf.get(TableScanDesc.FILTER_EXPR_CONF_STR);
+    if (serializedPushdown == null
+        || conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR) == null) {
+      LOG.info("No ORC pushdown predicate");
+      return null;
+    }
+    SearchArgument sarg = SearchArgument.FACTORY.create
+        (Utilities.deserializeExpression(serializedPushdown, conf));
+    LOG.info("ORC pushdown predicate: " + sarg);
+    return sarg;
+  }
+
+  public static String[] getIncludedColumnNames(
+      List<OrcProto.Type> types, boolean[] includedColumns, Configuration conf) {
+    String columnNamesString = conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR);
+    LOG.info("included columns names = " + columnNamesString);
+    if (columnNamesString == null || conf.get(TableScanDesc.FILTER_EXPR_CONF_STR) == null) {
+      return null;
+    }
+    String[] neededColumnNames = columnNamesString.split(",");
+    int i = 0;
+    String[] columnNames = new String[types.size()];
+    for(int columnId: types.get(0).getSubtypesList()) {
+      if (includedColumns == null || includedColumns[columnId]) {
+        columnNames[columnId] = neededColumnNames[i++];
+      }
+    }
+    return columnNames;
+  }
+
   /**
    * Take the configuration and figure out which columns we need to include.
    * @param types the types of the file
    * @param conf the configuration
    * @return true for each column that should be included
    */
-  static boolean[] findIncludedColumns(List<OrcProto.Type> types,
-                                               Configuration conf) {
-    if (ColumnProjectionUtils.isReadAllColumns(conf)) {
+  public static boolean[] findIncludedColumns(List<OrcProto.Type> types, Configuration conf) {
+    String includedStr = conf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR);
+    LOG.info("included column ids = " + includedStr);
+    if (ColumnProjectionUtils.isReadAllColumns(conf) ||
+      includedStr == null || includedStr.trim().length() == 0) {
       return null;
     } else {
       int numColumns = types.size();

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcStruct.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcStruct.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcStruct.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcStruct.java Tue Oct  8 07:43:11 2013
@@ -32,18 +32,14 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.objectinspector.SettableMapObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.SettableStructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
-import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.ParameterizedPrimitiveTypeUtils;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.UnionTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeParams;
 import org.apache.hadoop.io.Writable;
 
 final class OrcStruct implements Writable {
@@ -487,12 +483,6 @@ final class OrcStruct implements Writabl
           case STRING:
             return PrimitiveObjectInspectorFactory.writableStringObjectInspector;
           case VARCHAR:
-            // For varchar we need to retrieve the string length from the TypeInfo.
-            VarcharTypeParams varcharParams = (VarcharTypeParams)
-                ParameterizedPrimitiveTypeUtils.getTypeParamsFromTypeInfo(info);
-            if (varcharParams == null) {
-              throw new IllegalArgumentException("varchar type used without type params");
-            }
             return PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(
                 (PrimitiveTypeInfo) info);
           case TIMESTAMP:
@@ -546,11 +536,8 @@ final class OrcStruct implements Writabl
           throw new UnsupportedOperationException(
               "Illegal use of varchar type without length in ORC type definition.");
         }
-        VarcharTypeParams varcharParams = new VarcharTypeParams();
-        varcharParams.setLength(type.getMaximumLength());
         return PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(
-            PrimitiveObjectInspectorUtils.getTypeEntryFromTypeSpecs(
-                PrimitiveCategory.VARCHAR, varcharParams));
+            TypeInfoFactory.getVarcharTypeInfo(type.getMaximumLength()));
       case TIMESTAMP:
         return PrimitiveObjectInspectorFactory.javaTimestampObjectInspector;
       case DATE:

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java Tue Oct  8 07:43:11 2013
@@ -31,8 +31,8 @@ import org.apache.hadoop.hive.ql.exec.ve
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
 import org.apache.hadoop.hive.ql.io.InputFormatChecker;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapred.FileInputFormat;
 import org.apache.hadoop.mapred.FileSplit;
@@ -58,12 +58,14 @@ public class VectorizedOrcInputFormat ex
 
     VectorizedOrcRecordReader(Reader file, Configuration conf,
         FileSplit fileSplit) throws IOException {
+      List<OrcProto.Type> types = file.getTypes();
+      boolean[] includedColumns = OrcInputFormat.findIncludedColumns(types, conf);
+      String[] columnNames = OrcInputFormat.getIncludedColumnNames(types, includedColumns, conf);
+      SearchArgument sarg = OrcInputFormat.createSarg(types, conf);
 
       this.offset = fileSplit.getStart();
       this.length = fileSplit.getLength();
-      this.reader = file.rows(offset, length,
-          findIncludedColumns(file.getTypes(), conf));
-
+      this.reader = file.rows(offset, length, includedColumns, sarg, columnNames);
       try {
         rbCtx = new VectorizedRowBatchCtx();
         rbCtx.init(conf, fileSplit);
@@ -134,63 +136,6 @@ public class VectorizedOrcInputFormat ex
     setMinSplitSize(16 * 1024);
   }
 
-  /**
-   * Recurse down into a type subtree turning on all of the sub-columns.
-   *
-   * @param types
-   *          the types of the file
-   * @param result
-   *          the global view of columns that should be included
-   * @param typeId
-   *          the root of tree to enable
-   */
-  private static void includeColumnRecursive(List<OrcProto.Type> types,
-      boolean[] result,
-      int typeId) {
-    result[typeId] = true;
-    OrcProto.Type type = types.get(typeId);
-    int children = type.getSubtypesCount();
-    for (int i = 0; i < children; ++i) {
-      includeColumnRecursive(types, result, type.getSubtypes(i));
-    }
-  }
-
-  /**
-   * Take the configuration and figure out which columns we need to include.
-   *
-   * @param types
-   *          the types of the file
-   * @param conf
-   *          the configuration
-   * @return true for each column that should be included
-   */
-  private static boolean[] findIncludedColumns(List<OrcProto.Type> types,
-      Configuration conf) {
-    String includedStr =
-        conf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR);
-    if (includedStr == null || includedStr.trim().length() == 0) {
-      return null;
-    } else {
-      int numColumns = types.size();
-      boolean[] result = new boolean[numColumns];
-      result[0] = true;
-      OrcProto.Type root = types.get(0);
-      List<Integer> included = ColumnProjectionUtils.getReadColumnIDs(conf);
-      for (int i = 0; i < root.getSubtypesCount(); ++i) {
-        if (included.contains(i)) {
-          includeColumnRecursive(types, result, root.getSubtypes(i));
-        }
-      }
-      // if we are filtering at least one column, return the boolean array
-      for (boolean include : result) {
-        if (!include) {
-          return result;
-        }
-      }
-      return null;
-    }
-  }
-
   @Override
   public RecordReader<NullWritable, VectorizedRowBatch>
       getRecordReader(InputSplit inputSplit, JobConf conf,

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java Tue Oct  8 07:43:11 2013
@@ -59,8 +59,7 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.ShortObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectInspector;
-import org.apache.hadoop.hive.serde2.typeinfo.ParameterizedPrimitiveTypeUtils;
-import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeParams;
+import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 
@@ -1609,14 +1608,9 @@ class WriterImpl implements Writer, Memo
           case VARCHAR:
             // The varchar length needs to be written to file and should be available
             // from the object inspector
-            VarcharTypeParams varcharParams = (VarcharTypeParams)
-                ParameterizedPrimitiveTypeUtils.getTypeParamsFromPrimitiveObjectInspector(
-                    (PrimitiveObjectInspector) treeWriter.inspector);
-            if (varcharParams == null) {
-              throw new IllegalArgumentException("No varchar length specified in ORC type");
-            }
+            VarcharTypeInfo typeInfo = (VarcharTypeInfo) ((PrimitiveObjectInspector) treeWriter.inspector).getTypeInfo();
             type.setKind(Type.Kind.VARCHAR);
-            type.setMaximumLength(varcharParams.getLength());
+            type.setMaximumLength(typeInfo.getLength());
             break;
           case BINARY:
             type.setKind(OrcProto.Type.Kind.BINARY);

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Tue Oct  8 07:43:11 2013
@@ -129,7 +129,7 @@ import org.apache.hadoop.hive.serde.serd
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
-import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeParams;
+import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.TextInputFormat;
 
@@ -172,9 +172,8 @@ public class DDLSemanticAnalyzer extends
     case HiveParser.TOK_VARCHAR:
       PrimitiveCategory primitiveCategory = PrimitiveCategory.VARCHAR;
       typeName = TokenToTypeName.get(token);
-      VarcharTypeParams varcharParams = ParseUtils.getVarcharParams(typeName, node);
-      typeName = PrimitiveObjectInspectorUtils.getTypeEntryFromTypeSpecs(
-          primitiveCategory, varcharParams).toString();
+      VarcharTypeInfo varcharTypeInfo = ParseUtils.getVarcharTypeInfo(typeName, node);
+      typeName = varcharTypeInfo.getQualifiedName();
       break;
     default:
       typeName = TokenToTypeName.get(token);

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java Tue Oct  8 07:43:11 2013
@@ -20,17 +20,15 @@ package org.apache.hadoop.hive.ql.parse;
 
 import java.util.ArrayList;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.serde2.SerDeException;
-import org.apache.hadoop.hive.serde2.typeinfo.BaseTypeParams;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeParams;
+import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
 
 
 /**
@@ -110,49 +108,24 @@ public final class ParseUtils {
    */
   static ExprNodeDesc createConversionCast(ExprNodeDesc column, PrimitiveTypeInfo tableFieldTypeInfo)
       throws SemanticException {
-    ExprNodeDesc ret;
-
     // Get base type, since type string may be parameterized
     String baseType = TypeInfoUtils.getBaseName(tableFieldTypeInfo.getTypeName());
-    BaseTypeParams typeParams = null;
-    // If TypeInfo is parameterized, provide the params to the UDF factory method.
-    typeParams = tableFieldTypeInfo.getTypeParams();
-    if (typeParams != null) {
-      switch (tableFieldTypeInfo.getPrimitiveCategory()) {
-        case VARCHAR:
-          // Nothing to do here - the parameter will be passed to the UDF factory method below
-          break;
-        default:
-          throw new SemanticException("Type cast for " + tableFieldTypeInfo.getPrimitiveCategory() +
-              " does not take type parameters");
-      }
-    }
 
     // If the type cast UDF is for a parameterized type, then it should implement
     // the SettableUDF interface so that we can pass in the params.
     // Not sure if this is the cleanest solution, but there does need to be a way
     // to provide the type params to the type cast.
-    ret = TypeCheckProcFactory.DefaultExprProcessor
-        .getFuncExprNodeDescWithUdfData(baseType, typeParams, column);
-
-    return ret;
+    return TypeCheckProcFactory.DefaultExprProcessor.getFuncExprNodeDescWithUdfData(baseType,
+        tableFieldTypeInfo, column);
   }
 
-  public static VarcharTypeParams getVarcharParams(String typeName, ASTNode node)
+  public static VarcharTypeInfo getVarcharTypeInfo(String typeName, ASTNode node)
       throws SemanticException {
     if (node.getChildCount() != 1) {
       throw new SemanticException("Bad params for type " + typeName);
     }
 
-    try {
-      VarcharTypeParams typeParams = new VarcharTypeParams();
-      String lengthStr = node.getChild(0).getText();
-      Integer length = Integer.valueOf(lengthStr);
-      typeParams.setLength(length.intValue());
-      typeParams.validateParams();
-      return typeParams;
-    } catch (SerDeException err) {
-      throw new SemanticException(err);
-    }
+    String lengthStr = node.getChild(0).getText();
+    return TypeInfoFactory.getVarcharTypeInfo(Integer.valueOf(lengthStr));
   }
 }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java Tue Oct  8 07:43:11 2013
@@ -62,13 +62,13 @@ import org.apache.hadoop.hive.ql.udf.gen
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
-import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeParams;
 import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
 
 /**
  * The Factory for creating typecheck processors. The typecheck processors are
@@ -652,7 +652,7 @@ public final class TypeCheckProcFactory 
      *
      * @throws UDFArgumentException
      */
-    static ExprNodeDesc getFuncExprNodeDescWithUdfData(String udfName, Object udfData,
+    static ExprNodeDesc getFuncExprNodeDescWithUdfData(String udfName, TypeInfo typeInfo,
         ExprNodeDesc... children) throws UDFArgumentException {
 
       FunctionInfo fi = FunctionRegistry.getFunctionInfo(udfName);
@@ -667,9 +667,9 @@ public final class TypeCheckProcFactory 
       }
 
       // Add udfData to UDF if necessary
-      if (udfData != null) {
+      if (typeInfo != null) {
         if (genericUDF instanceof SettableUDF) {
-          ((SettableUDF)genericUDF).setParams(udfData);
+          ((SettableUDF)genericUDF).setTypeInfo(typeInfo);
         }
       }
 
@@ -793,10 +793,10 @@ public final class TypeCheckProcFactory 
           switch (funcNameNode.getType()) {
             case HiveParser.TOK_VARCHAR:
               // Add type params
-              VarcharTypeParams varcharTypeParams = new VarcharTypeParams();
-              varcharTypeParams.length = Integer.valueOf((funcNameNode.getChild(0).getText()));
+              VarcharTypeInfo varcharTypeInfo = TypeInfoFactory.getVarcharTypeInfo(
+                  Integer.valueOf((funcNameNode.getChild(0).getText())));
               if (genericUDF != null) {
-                ((SettableUDF)genericUDF).setParams(varcharTypeParams);
+                ((SettableUDF)genericUDF).setTypeInfo(varcharTypeInfo);
               }
               break;
             default:

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java Tue Oct  8 07:43:11 2013
@@ -135,7 +135,7 @@ public class ConditionalResolverCommonJo
     return resTsks;
   }
 
-  class AliasFileSizePair implements Comparable<AliasFileSizePair> {
+  static class AliasFileSizePair implements Comparable<AliasFileSizePair> {
     String alias;
     long size;
     AliasFileSizePair(String alias, long size) {
@@ -148,7 +148,7 @@ public class ConditionalResolverCommonJo
       if (o == null) {
         return 1;
       }
-      return (int)(size - o.size);
+      return (size < o.size) ? -1 : ((size > o.size) ? 1 : 0);
     }
   }
 

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java Tue Oct  8 07:43:11 2013
@@ -66,8 +66,7 @@ public class ExprNodeConstantDesc extend
         .getPrimitiveJavaObjectInspector(pc).getPrimitiveWritableObject(
           getValue());
     return PrimitiveObjectInspectorFactory
-        .getPrimitiveWritableConstantObjectInspector(
-            (PrimitiveTypeInfo) getTypeInfo(), writableValue);
+        .getPrimitiveWritableConstantObjectInspector((PrimitiveTypeInfo) getTypeInfo(), writableValue);
   }
 
 

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java?rev=1530173&r1=1530172&r2=1530173&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java Tue Oct  8 07:43:11 2013
@@ -796,15 +796,18 @@ public final class PlanUtils {
                     "using configureTableJobProperties",e);
                 storageHandler.configureTableJobProperties(tableDesc, jobProperties);
             }
+            if (tableDesc.getOutputFileFormatClass().getName()
+                     == HivePassThroughOutputFormat.HIVE_PASSTHROUGH_OF_CLASSNAME) {
+             // get the real output format when we register this for the table
+             jobProperties.put(
+                 HivePassThroughOutputFormat.HIVE_PASSTHROUGH_STORAGEHANDLER_OF_JOBCONFKEY,
+                 HiveFileFormatUtils.getRealOutputFormatClassName());
+           }
         }
         // Job properties are only relevant for non-native tables, so
         // for native tables, leave it null to avoid cluttering up
         // plans.
         if (!jobProperties.isEmpty()) {
-          if (tableDesc.getOutputFileFormatClass().getName() == HivePassThroughOutputFormat.HIVE_PASSTHROUGH_OF_CLASSNAME) {
-            // get the real output format when we register this for the table
-            jobProperties.put(HivePassThroughOutputFormat.HIVE_PASSTHROUGH_STORAGEHANDLER_OF_JOBCONFKEY,HiveFileFormatUtils.getRealOutputFormatClassName());
-          }
           tableDesc.setJobProperties(jobProperties);
         }
       }



Mime
View raw message