hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hashut...@apache.org
Subject svn commit: r1668750 [1/8] - in /hive/branches/cbo: ./ common/src/java/org/apache/hadoop/hive/common/ common/src/java/org/apache/hadoop/hive/common/type/ common/src/java/org/apache/hadoop/hive/conf/ common/src/java/org/apache/hive/common/util/ common/s...
Date Mon, 23 Mar 2015 22:02:16 GMT
Author: hashutosh
Date: Mon Mar 23 22:02:13 2015
New Revision: 1668750

URL: http://svn.apache.org/r1668750
Log:
Merged latest trunk into branch (Ashutosh Chauhan)

Added:
    hive/branches/cbo/common/src/java/org/apache/hadoop/hive/common/DiskRange.java
      - copied unchanged from r1668746, hive/trunk/common/src/java/org/apache/hadoop/hive/common/DiskRange.java
    hive/branches/cbo/common/src/java/org/apache/hadoop/hive/common/DiskRangeList.java
      - copied unchanged from r1668746, hive/trunk/common/src/java/org/apache/hadoop/hive/common/DiskRangeList.java
    hive/branches/cbo/common/src/java/org/apache/hadoop/hive/common/type/HiveIntervalDayTime.java
      - copied unchanged from r1668746, hive/trunk/common/src/java/org/apache/hadoop/hive/common/type/HiveIntervalDayTime.java
    hive/branches/cbo/common/src/java/org/apache/hadoop/hive/common/type/HiveIntervalYearMonth.java
      - copied unchanged from r1668746, hive/trunk/common/src/java/org/apache/hadoop/hive/common/type/HiveIntervalYearMonth.java
    hive/branches/cbo/common/src/java/org/apache/hive/common/util/DateTimeMath.java
      - copied unchanged from r1668746, hive/trunk/common/src/java/org/apache/hive/common/util/DateTimeMath.java
    hive/branches/cbo/common/src/test/org/apache/hadoop/hive/common/type/TestHiveIntervalDayTime.java
      - copied unchanged from r1668746, hive/trunk/common/src/test/org/apache/hadoop/hive/common/type/TestHiveIntervalDayTime.java
    hive/branches/cbo/common/src/test/org/apache/hadoop/hive/common/type/TestHiveIntervalYearMonth.java
      - copied unchanged from r1668746, hive/trunk/common/src/test/org/apache/hadoop/hive/common/type/TestHiveIntervalYearMonth.java
    hive/branches/cbo/common/src/test/org/apache/hive/common/util/TestDateTimeMath.java
      - copied unchanged from r1668746, hive/trunk/common/src/test/org/apache/hive/common/util/TestDateTimeMath.java
    hive/branches/cbo/metastore/scripts/upgrade/mssql/pre-1-upgrade-0.12.0-to-0.13.0.mssql.sql
      - copied unchanged from r1668746, hive/trunk/metastore/scripts/upgrade/mssql/pre-1-upgrade-0.12.0-to-0.13.0.mssql.sql
    hive/branches/cbo/metastore/scripts/upgrade/mssql/pre-1-upgrade-0.13.0-to-0.14.0.mssql.sql
      - copied unchanged from r1668746, hive/trunk/metastore/scripts/upgrade/mssql/pre-1-upgrade-0.13.0-to-0.14.0.mssql.sql
    hive/branches/cbo/metastore/scripts/upgrade/mysql/021-HIVE-7018.mysql.sql
      - copied unchanged from r1668746, hive/trunk/metastore/scripts/upgrade/mysql/021-HIVE-7018.mysql.sql
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
      - copied unchanged from r1668746, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/KeyValueContainer.java
      - copied unchanged from r1668746, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/KeyValueContainer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/ObjectContainer.java
      - copied unchanged from r1668746, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/ObjectContainer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/MetadataReader.java
      - copied unchanged from r1668746, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/MetadataReader.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
      - copied unchanged from r1668746, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseDTI.java
      - copied unchanged from r1668746, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseDTI.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPDTIMinus.java
      - copied unchanged from r1668746, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPDTIMinus.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPDTIPlus.java
      - copied unchanged from r1668746, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPDTIPlus.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java
      - copied unchanged from r1668746, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java
      - copied unchanged from r1668746, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java
    hive/branches/cbo/ql/src/test/queries/clientnegative/interval_1.q
      - copied unchanged from r1668746, hive/trunk/ql/src/test/queries/clientnegative/interval_1.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/interval_2.q
      - copied unchanged from r1668746, hive/trunk/ql/src/test/queries/clientnegative/interval_2.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/interval_3.q
      - copied unchanged from r1668746, hive/trunk/ql/src/test/queries/clientnegative/interval_3.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/encryption_unencrypted_nonhdfs_external_tables.q
      - copied unchanged from r1668746, hive/trunk/ql/src/test/queries/clientpositive/encryption_unencrypted_nonhdfs_external_tables.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/hybridhashjoin.q
      - copied unchanged from r1668746, hive/trunk/ql/src/test/queries/clientpositive/hybridhashjoin.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/interval_1.q
      - copied unchanged from r1668746, hive/trunk/ql/src/test/queries/clientpositive/interval_1.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/interval_2.q
      - copied unchanged from r1668746, hive/trunk/ql/src/test/queries/clientpositive/interval_2.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/interval_3.q
      - copied unchanged from r1668746, hive/trunk/ql/src/test/queries/clientpositive/interval_3.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/interval_arithmetic.q
      - copied unchanged from r1668746, hive/trunk/ql/src/test/queries/clientpositive/interval_arithmetic.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/interval_comparison.q
      - copied unchanged from r1668746, hive/trunk/ql/src/test/queries/clientpositive/interval_comparison.q
    hive/branches/cbo/ql/src/test/results/clientnegative/interval_1.q.out
      - copied unchanged from r1668746, hive/trunk/ql/src/test/results/clientnegative/interval_1.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/interval_2.q.out
      - copied unchanged from r1668746, hive/trunk/ql/src/test/results/clientnegative/interval_2.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/interval_3.q.out
      - copied unchanged from r1668746, hive/trunk/ql/src/test/results/clientnegative/interval_3.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/encrypted/encryption_unencrypted_nonhdfs_external_tables.q.out
      - copied unchanged from r1668746, hive/trunk/ql/src/test/results/clientpositive/encrypted/encryption_unencrypted_nonhdfs_external_tables.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/interval_1.q.out
      - copied unchanged from r1668746, hive/trunk/ql/src/test/results/clientpositive/interval_1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/interval_2.q.out
      - copied unchanged from r1668746, hive/trunk/ql/src/test/results/clientpositive/interval_2.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/interval_3.q.out
      - copied unchanged from r1668746, hive/trunk/ql/src/test/results/clientpositive/interval_3.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/interval_arithmetic.q.out
      - copied unchanged from r1668746, hive/trunk/ql/src/test/results/clientpositive/interval_arithmetic.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/interval_comparison.q.out
      - copied unchanged from r1668746, hive/trunk/ql/src/test/results/clientpositive/interval_comparison.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/hybridhashjoin.q.out
      - copied unchanged from r1668746, hive/trunk/ql/src/test/results/clientpositive/tez/hybridhashjoin.q.out
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/io/HiveIntervalDayTimeWritable.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/io/HiveIntervalDayTimeWritable.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/io/HiveIntervalYearMonthWritable.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/io/HiveIntervalYearMonthWritable.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyHiveIntervalDayTime.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyHiveIntervalDayTime.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyHiveIntervalYearMonth.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyHiveIntervalYearMonth.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyHiveIntervalDayTimeObjectInspector.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyHiveIntervalDayTimeObjectInspector.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyHiveIntervalYearMonthObjectInspector.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyHiveIntervalYearMonthObjectInspector.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryHiveIntervalDayTime.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryHiveIntervalDayTime.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryHiveIntervalYearMonth.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryHiveIntervalYearMonth.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/HiveIntervalDayTimeObjectInspector.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/HiveIntervalDayTimeObjectInspector.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/HiveIntervalYearMonthObjectInspector.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/HiveIntervalYearMonthObjectInspector.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveIntervalDayTimeObjectInspector.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveIntervalDayTimeObjectInspector.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveIntervalYearMonthObjectInspector.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveIntervalYearMonthObjectInspector.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/SettableHiveIntervalDayTimeObjectInspector.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/SettableHiveIntervalDayTimeObjectInspector.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/SettableHiveIntervalYearMonthObjectInspector.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/SettableHiveIntervalYearMonthObjectInspector.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantHiveIntervalDayTimeObjectInspector.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantHiveIntervalDayTimeObjectInspector.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantHiveIntervalYearMonthObjectInspector.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantHiveIntervalYearMonthObjectInspector.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableHiveIntervalDayTimeObjectInspector.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableHiveIntervalDayTimeObjectInspector.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableHiveIntervalYearMonthObjectInspector.java
      - copied unchanged from r1668746, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableHiveIntervalYearMonthObjectInspector.java
    hive/branches/cbo/serde/src/test/org/apache/hadoop/hive/serde2/io/TestHiveIntervalDayTimeWritable.java
      - copied unchanged from r1668746, hive/trunk/serde/src/test/org/apache/hadoop/hive/serde2/io/TestHiveIntervalDayTimeWritable.java
    hive/branches/cbo/serde/src/test/org/apache/hadoop/hive/serde2/io/TestHiveIntervalYearMonthWritable.java
      - copied unchanged from r1668746, hive/trunk/serde/src/test/org/apache/hadoop/hive/serde2/io/TestHiveIntervalYearMonthWritable.java
    hive/branches/cbo/service/src/test/org/apache/hive/service/auth/TestLdapAuthenticationProviderImpl.java
      - copied unchanged from r1668746, hive/trunk/service/src/test/org/apache/hive/service/auth/TestLdapAuthenticationProviderImpl.java
Removed:
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezCacheAccess.java
    hive/branches/cbo/ql/src/test/queries/clientnegative/invalid_arithmetic_type.q
    hive/branches/cbo/ql/src/test/results/clientnegative/invalid_arithmetic_type.q.out
Modified:
    hive/branches/cbo/   (props changed)
    hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/branches/cbo/common/src/java/org/apache/hive/common/util/DateUtils.java
    hive/branches/cbo/itests/src/test/resources/testconfiguration.properties
    hive/branches/cbo/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.12.0-to-0.13.0.mssql.sql
    hive/branches/cbo/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.13.0-to-0.14.0.mssql.sql
    hive/branches/cbo/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql
    hive/branches/cbo/metastore/scripts/upgrade/mysql/upgrade-1.1.0-to-1.2.0.mysql.sql
    hive/branches/cbo/metastore/scripts/upgrade/oracle/pre-0-upgrade-0.13.0-to-0.14.0.oracle.sql
    hive/branches/cbo/metastore/scripts/upgrade/postgres/pre-0-upgrade-0.13.0-to-0.14.0.postgres.sql
    hive/branches/cbo/pom.xml
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/CollectOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ForwardOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableDummyOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableLoader.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewForwardOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ListSinkOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MapredContext.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ObjectCache.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/RCFileMergeOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/TezDummyStoreOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HashTableLoader.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMergeFileRecordHandler.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ObjectCache.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAppMasterEventOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorLimitOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSelectOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookUtils.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/VectorizedRCFileRecordReader.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileMapper.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/CompressionCodec.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/InStream.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/PositionProvider.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RunLengthIntegerReaderV2.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentFactory.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lib/ForwardWalker.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SplitSparkWorkResolver.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorDesc.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicateTransitivePropagate.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/ppd/SyntheticJoinPredicate.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseArithmetic.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseUnary.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPMinus.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNegative.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPPlus.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestBytesBytesMultiHashMap.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorFilterOperator.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorLimitOperator.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSelectOperator.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizedRowBatchCtx.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/FakeCaptureOutputOperator.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/FakeVectorDataSourceOperator.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestFileDump.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInStream.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestIntegerCompressionReader.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRecordReaderImpl.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/testutil/OperatorTestUtils.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMinus.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPPlus.java
    hive/branches/cbo/ql/src/test/queries/clientnegative/alter_rename_partition_failure.q
    hive/branches/cbo/ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/fs_default_name2.q.out
    hive/branches/cbo/serde/if/serde.thrift
    hive/branches/cbo/serde/src/gen/thrift/gen-cpp/serde_constants.cpp
    hive/branches/cbo/serde/src/gen/thrift/gen-cpp/serde_constants.h
    hive/branches/cbo/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/serdeConstants.java
    hive/branches/cbo/serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php
    hive/branches/cbo/serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py
    hive/branches/cbo/serde/src/gen/thrift/gen-rb/serde_constants.rb
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/ByteStream.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyFactory.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySimpleSerDe.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyPrimitiveObjectInspectorFactory.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryFactory.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/PrimitiveObjectInspector.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorConverter.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorFactory.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorUtils.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoFactory.java
    hive/branches/cbo/service/src/java/org/apache/hive/service/auth/LdapAuthenticationProviderImpl.java
    hive/branches/cbo/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java

Propchange: hive/branches/cbo/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Mar 23 22:02:13 2015
@@ -3,4 +3,4 @@
 /hive/branches/spark:1608589-1660298
 /hive/branches/tez:1494760-1622766
 /hive/branches/vectorization:1466908-1527856
-/hive/trunk:1605012-1667397
+/hive/trunk:1605012-1668746

Modified: hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Mon Mar 23 22:02:13 2015
@@ -711,6 +711,11 @@ public class HiveConf extends Configurat
     HIVEMAPJOINUSEOPTIMIZEDTABLE("hive.mapjoin.optimized.hashtable", true,
         "Whether Hive should use memory-optimized hash table for MapJoin. Only works on Tez,\n" +
         "because memory-optimized hashtable cannot be serialized."),
+    HIVEUSEHYBRIDGRACEHASHJOIN("hive.mapjoin.hybridgrace.hashtable", false, "Whether to use hybrid" +
+        "grace hash join as the join method for mapjoin."),
+    HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ("hive.mapjoin.hybridgrace.memcheckfrequency", 1024, "For " +
+        "hybrid grace hash join, how often (how many rows apart) we check if memory is full. " +
+        "This number should be power of 2."),
     HIVEHASHTABLEWBSIZE("hive.mapjoin.optimized.hashtable.wbsize", 10 * 1024 * 1024,
         "Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to\n" +
         "store data. This is one buffer size. HT may be slightly faster if this is larger, but for small\n" +
@@ -2035,7 +2040,9 @@ public class HiveConf extends Configurat
     SPARK_RPC_CHANNEL_LOG_LEVEL("hive.spark.client.channel.log.level", null,
       "Channel logging level for remote Spark driver.  One of {DEBUG, ERROR, INFO, TRACE, WARN}."),
     SPARK_RPC_SASL_MECHANISM("hive.spark.client.rpc.sasl.mechanisms", "DIGEST-MD5",
-      "Name of the SASL mechanism to use for authentication.");
+      "Name of the SASL mechanism to use for authentication."),
+    NWAYJOINREORDER("hive.reorder.nway.joins", true,
+      "Runs reordering of tables within single n-way join (i.e.: picks streamtable)");
 
     public final String varname;
     private final String defaultExpr;

Modified: hive/branches/cbo/common/src/java/org/apache/hive/common/util/DateUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/common/src/java/org/apache/hive/common/util/DateUtils.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/common/src/java/org/apache/hive/common/util/DateUtils.java (original)
+++ hive/branches/cbo/common/src/java/org/apache/hive/common/util/DateUtils.java Mon Mar 23 22:02:13 2015
@@ -18,6 +18,7 @@
 
 package org.apache.hive.common.util;
 
+import java.math.BigDecimal;
 import java.text.SimpleDateFormat;
 
 /**
@@ -36,4 +37,21 @@ public class DateUtils {
   public static SimpleDateFormat getDateFormat() {
     return dateFormatLocal.get();
   }
+
+  public static final int NANOS_PER_SEC = 1000000000;
+  public static final BigDecimal MAX_INT_BD = new BigDecimal(Integer.MAX_VALUE);
+  public static final BigDecimal NANOS_PER_SEC_BD = new BigDecimal(NANOS_PER_SEC);
+
+  public static int parseNumericValueWithRange(String fieldName,
+      String strVal, int minValue, int maxValue) throws IllegalArgumentException {
+    int result = 0;
+    if (strVal != null) {
+      result = Integer.parseInt(strVal);
+      if (result < minValue || result > maxValue) {
+        throw new IllegalArgumentException(String.format("%s value %d outside range [%d, %d]",
+            fieldName, result, minValue, maxValue));
+      }
+    }
+    return result;
+  }
 }
\ No newline at end of file

Modified: hive/branches/cbo/itests/src/test/resources/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/src/test/resources/testconfiguration.properties?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/itests/src/test/resources/testconfiguration.properties (original)
+++ hive/branches/cbo/itests/src/test/resources/testconfiguration.properties Mon Mar 23 22:02:13 2015
@@ -287,6 +287,7 @@ minitez.query.files=bucket_map_join_tez1
   bucket_map_join_tez2.q,\
   dynamic_partition_pruning.q,\
   dynamic_partition_pruning_2.q,\
+  hybridhashjoin.q,\
   mapjoin_decimal.q,\
   lvj_mapjoin.q, \
   mrr.q,\
@@ -314,7 +315,8 @@ encrypted.query.files=encryption_join_un
   encryption_join_with_different_encryption_keys.q,\
   encryption_select_read_only_encrypted_tbl.q,\
   encryption_select_read_only_unencrypted_tbl.q,\
-  encryption_load_data_to_encrypted_tables.q
+  encryption_load_data_to_encrypted_tables.q, \
+  encryption_unencrypted_nonhdfs_external_tables.q
 
 beeline.positive.exclude=add_part_exist.q,\
   alter1.q,\

Modified: hive/branches/cbo/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.12.0-to-0.13.0.mssql.sql
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.12.0-to-0.13.0.mssql.sql?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.12.0-to-0.13.0.mssql.sql (original)
+++ hive/branches/cbo/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.12.0-to-0.13.0.mssql.sql Mon Mar 23 22:02:13 2015
@@ -19,3 +19,10 @@ CREATE TABLE PART_COL_STATS
     PARTITION_NAME varchar(767) NOT NULL,
     "TABLE_NAME" varchar(128) NOT NULL
 );
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+

Modified: hive/branches/cbo/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.13.0-to-0.14.0.mssql.sql
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.13.0-to-0.14.0.mssql.sql?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.13.0-to-0.14.0.mssql.sql (original)
+++ hive/branches/cbo/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.13.0-to-0.14.0.mssql.sql Mon Mar 23 22:02:13 2015
@@ -21,3 +21,9 @@ CREATE TABLE PART_COL_STATS
     PARTITION_NAME varchar(767) NOT NULL,
     "TABLE_NAME" varchar(128) NOT NULL
 );
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);

Modified: hive/branches/cbo/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql (original)
+++ hive/branches/cbo/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql Mon Mar 23 22:02:13 2015
@@ -211,15 +211,12 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS`
   `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   `SD_ID` bigint(20) DEFAULT NULL,
   `TBL_ID` bigint(20) DEFAULT NULL,
-  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
   PRIMARY KEY (`PART_ID`),
   UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
   KEY `PARTITIONS_N49` (`TBL_ID`),
   KEY `PARTITIONS_N50` (`SD_ID`),
-  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
   CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 /*!40101 SET character_set_client = @saved_cs_client */;
 
@@ -590,15 +587,12 @@ CREATE TABLE IF NOT EXISTS `TBLS` (
   `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   `VIEW_EXPANDED_TEXT` mediumtext,
   `VIEW_ORIGINAL_TEXT` mediumtext,
-  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
   PRIMARY KEY (`TBL_ID`),
   UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
   KEY `TBLS_N50` (`SD_ID`),
   KEY `TBLS_N49` (`DB_ID`),
-  KEY `TBLS_N51` (`LINK_TARGET_ID`),
   CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
-  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
+  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 /*!40101 SET character_set_client = @saved_cs_client */;
 

Modified: hive/branches/cbo/metastore/scripts/upgrade/mysql/upgrade-1.1.0-to-1.2.0.mysql.sql
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/scripts/upgrade/mysql/upgrade-1.1.0-to-1.2.0.mysql.sql?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/metastore/scripts/upgrade/mysql/upgrade-1.1.0-to-1.2.0.mysql.sql (original)
+++ hive/branches/cbo/metastore/scripts/upgrade/mysql/upgrade-1.1.0-to-1.2.0.mysql.sql Mon Mar 23 22:02:13 2015
@@ -1,5 +1,5 @@
 SELECT 'Upgrading MetaStore schema from 1.1.0 to 1.2.0' AS ' ';
-
+SOURCE 021-HIVE-7018.mysql.sql;
 UPDATE VERSION SET SCHEMA_VERSION='1.2.0', VERSION_COMMENT='Hive release version 1.2.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 1.1.0 to 1.2.0' AS ' ';
 

Modified: hive/branches/cbo/metastore/scripts/upgrade/oracle/pre-0-upgrade-0.13.0-to-0.14.0.oracle.sql
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/scripts/upgrade/oracle/pre-0-upgrade-0.13.0-to-0.14.0.oracle.sql?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/metastore/scripts/upgrade/oracle/pre-0-upgrade-0.13.0-to-0.14.0.oracle.sql (original)
+++ hive/branches/cbo/metastore/scripts/upgrade/oracle/pre-0-upgrade-0.13.0-to-0.14.0.oracle.sql Mon Mar 23 22:02:13 2015
@@ -21,3 +21,9 @@ MAX_COL_LEN NUMBER,
 NUM_TRUES NUMBER,
 NUM_FALSES NUMBER,
 LAST_ANALYZED NUMBER NOT NULL);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);

Modified: hive/branches/cbo/metastore/scripts/upgrade/postgres/pre-0-upgrade-0.13.0-to-0.14.0.postgres.sql
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/scripts/upgrade/postgres/pre-0-upgrade-0.13.0-to-0.14.0.postgres.sql?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/metastore/scripts/upgrade/postgres/pre-0-upgrade-0.13.0-to-0.14.0.postgres.sql (original)
+++ hive/branches/cbo/metastore/scripts/upgrade/postgres/pre-0-upgrade-0.13.0-to-0.14.0.postgres.sql Mon Mar 23 22:02:13 2015
@@ -21,3 +21,9 @@ CREATE TABLE "PART_COL_STATS" (
  "NUM_FALSES" bigint,
  "LAST_ANALYZED" bigint NOT NULL
 );
+
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;

Modified: hive/branches/cbo/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/cbo/pom.xml?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/pom.xml (original)
+++ hive/branches/cbo/pom.xml Mon Mar 23 22:02:13 2015
@@ -100,7 +100,7 @@
     <antlr.version>3.4</antlr.version>
     <avro.version>1.7.5</avro.version>
     <bonecp.version>0.8.0.RELEASE</bonecp.version>
-    <calcite.version>1.1.0-incubating-SNAPSHOT</calcite.version>
+    <calcite.version>1.1.0-incubating</calcite.version>
     <datanucleus-api-jdo.version>3.2.6</datanucleus-api-jdo.version>
     <datanucleus-core.version>3.2.10</datanucleus-core.version>
     <datanucleus-rdbms.version>3.2.9</datanucleus-rdbms.version>

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java Mon Mar 23 22:02:13 2015
@@ -440,15 +440,13 @@ public class Driver implements CommandPr
       sem.validate();
       perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ANALYZE);
 
-      plan = new QueryPlan(command, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId,
-        SessionState.get().getCommandType());
+      // Command should be redacted before passing it to the QueryPlan in order
+      // to avoid returning sensitive data
+      String queryStr = HookUtils.redactLogString(conf, command);
+
+      plan = new QueryPlan(queryStr, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId,
+          SessionState.get().getCommandType());
 
-      String queryStr = plan.getQueryStr();
-      List<Redactor> queryRedactors = getHooks(ConfVars.QUERYREDACTORHOOKS, Redactor.class);
-      for (Redactor redactor : queryRedactors) {
-        redactor.setConf(conf);
-        queryStr = redactor.redactQuery(queryStr);
-      }
       conf.setVar(HiveConf.ConfVars.HIVEQUERYSTRING, queryStr);
 
       conf.set("mapreduce.workflow.id", "hive_" + queryId);

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java Mon Mar 23 22:02:13 2015
@@ -17,6 +17,13 @@
  */
 package org.apache.hadoop.hive.ql.exec;
 
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.Future;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -28,11 +35,6 @@ import org.apache.hadoop.hive.ql.plan.Dy
 import org.apache.hadoop.hive.ql.plan.FileMergeDesc;
 import org.apache.hadoop.mapred.JobConf;
 
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.HashSet;
-import java.util.Set;
-
 /**
  * Fast file merge operator for ORC and RCfile. This is an abstract class which
  * does not process any rows. Refer {@link org.apache.hadoop.hive.ql.exec.OrcFileMergeOperator}
@@ -63,8 +65,8 @@ public abstract class AbstractFileMergeO
   protected transient DynamicPartitionCtx dpCtx;
 
   @Override
-  public void initializeOp(Configuration hconf) throws HiveException {
-    super.initializeOp(hconf);
+  public Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
     this.jc = new JobConf(hconf);
     incompatFileSet = new HashSet<Path>();
     autoDelete = false;
@@ -92,6 +94,7 @@ public abstract class AbstractFileMergeO
       throw new HiveException("Failed to initialize AbstractFileMergeOperator",
           e);
     }
+    return result;
   }
 
   // sets up temp and task temp path

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java Mon Mar 23 22:02:13 2015
@@ -20,7 +20,9 @@ package org.apache.hadoop.hive.ql.exec;
 
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
+import java.util.concurrent.Future;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.exec.persistence.RowContainer;
@@ -57,7 +59,7 @@ public abstract class AbstractMapJoinOpe
 
   @Override
   @SuppressWarnings("unchecked")
-  protected void initializeOp(Configuration hconf) throws HiveException {
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
     if (conf.getGenJoinKeys()) {
       int tagLen = conf.getTagLength();
       joinKeys = new List[tagLen];
@@ -66,7 +68,7 @@ public abstract class AbstractMapJoinOpe
           inputObjInspectors,NOTSKIPBIGTABLE, tagLen);
     }
 
-    super.initializeOp(hconf);
+    Collection<Future<?>> result = super.initializeOp(hconf);
 
     numMapRowsRead = 0;
 
@@ -81,7 +83,7 @@ public abstract class AbstractMapJoinOpe
         !hasFilter(posBigTable), reporter);
     storage[posBigTable] = bigPosRC;
 
-    initializeChildren(hconf);
+    return result;
   }
 
   @Override

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java Mon Mar 23 22:02:13 2015
@@ -20,7 +20,9 @@ package org.apache.hadoop.hive.ql.exec;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.Collection;
 import java.util.Collections;
+import java.util.concurrent.Future;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -50,11 +52,14 @@ public class AppMasterEventOperator exte
   protected transient long MAX_SIZE;
 
   @Override
-  public void initializeOp(Configuration hconf) throws HiveException {
+  public Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
+
     MAX_SIZE = HiveConf.getLongVar(hconf, ConfVars.TEZ_DYNAMIC_PARTITION_PRUNING_MAX_EVENT_SIZE);
     serializer =
         (Serializer) ReflectionUtils.newInstance(conf.getTable().getDeserializerClass(), null);
     initDataBuffer(false);
+    return result;
   }
 
   protected void initDataBuffer(boolean skipPruning) throws HiveException {
@@ -71,7 +76,7 @@ public class AppMasterEventOperator exte
   }
 
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
     if (hasReachedMaxSize) {
       return;
     }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/CollectOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/CollectOperator.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/CollectOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/CollectOperator.java Mon Mar 23 22:02:13 2015
@@ -20,11 +20,13 @@ package org.apache.hadoop.hive.ql.exec;
 
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.Collection;
+import java.util.concurrent.Future;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.api.OperatorType;
 import org.apache.hadoop.hive.ql.plan.CollectDesc;
+import org.apache.hadoop.hive.ql.plan.api.OperatorType;
 import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
@@ -41,16 +43,17 @@ public class CollectOperator extends Ope
   transient int maxSize;
 
   @Override
-  protected void initializeOp(Configuration hconf) throws HiveException {
-    super.initializeOp(hconf);
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
     rowList = new ArrayList<Object>();
     maxSize = conf.getBufferSize().intValue();
+    return result;
   }
 
   boolean firstRow = true;
 
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
     ObjectInspector rowInspector = inputObjInspectors[tag];
     if (firstRow) {
       firstRow = false;

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java Mon Mar 23 22:02:13 2015
@@ -368,7 +368,7 @@ public class ColumnStatsTask extends Tas
         return persistPartitionStats();
       }
     } catch (Exception e) {
-        LOG.info(e);
+      LOG.error("Failed to run column stats task", e);
     }
     return 1;
   }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java Mon Mar 23 22:02:13 2015
@@ -21,9 +21,11 @@ package org.apache.hadoop.hive.ql.exec;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.Future;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -185,7 +187,8 @@ public abstract class CommonJoinOperator
 
   @Override
   @SuppressWarnings("unchecked")
-  protected void initializeOp(Configuration hconf) throws HiveException {
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
     this.handleSkewJoin = conf.getHandleSkewJoin();
     this.hconf = hconf;
 
@@ -319,6 +322,7 @@ public abstract class CommonJoinOperator
     if (isLogInfoEnabled) {
       LOG.info("JOIN " + outputObjInspector.getTypeName() + " totalsz = " + totalSz);
     }
+    return result;
   }
 
   transient boolean newGroupStarted = false;

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java Mon Mar 23 22:02:13 2015
@@ -20,9 +20,11 @@ package org.apache.hadoop.hive.ql.exec;
 
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.concurrent.Future;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -85,10 +87,10 @@ public class CommonMergeJoinOperator ext
 
   @SuppressWarnings("unchecked")
   @Override
-  public void initializeOp(Configuration hconf) throws HiveException {
-    super.initializeOp(hconf);
+  public Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
     firstFetchHappened = false;
-    initializeChildren(hconf);
+
     int maxAlias = 0;
     for (byte pos = 0; pos < order.length; pos++) {
       if (pos > maxAlias) {
@@ -132,6 +134,7 @@ public class CommonMergeJoinOperator ext
     }
 
     sources = ((TezContext) MapredContext.get()).getRecordSources();
+    return result;
   }
 
   @Override
@@ -155,7 +158,7 @@ public class CommonMergeJoinOperator ext
    * push but the rest is pulled until we run out of records.
    */
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
     posBigTable = (byte) conf.getBigTablePosition();
 
     byte alias = (byte) tag;

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Mon Mar 23 22:02:13 2015
@@ -56,6 +56,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -219,7 +220,6 @@ public class DDLTask extends Task<DDLWor
   private static final long serialVersionUID = 1L;
   private static final Log LOG = LogFactory.getLog("hive.ql.exec.DDLTask");
 
-  transient HiveConf conf;
   private static final int separator = Utilities.tabCode;
   private static final int terminator = Utilities.newLineCode;
 
@@ -243,7 +243,6 @@ public class DDLTask extends Task<DDLWor
   @Override
   public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx) {
     super.initialize(conf, queryPlan, ctx);
-    this.conf = conf;
 
     // Pick the formatter to use to display the results.  Either the
     // normal human readable output or a json object.
@@ -601,7 +600,7 @@ public class DDLTask extends Task<DDLWor
     aliasToWork.put(mergeFilesDesc.getInputDir().toString(), mergeOp);
     mergeWork.setAliasToWork(aliasToWork);
     DriverContext driverCxt = new DriverContext();
-    Task task = null;
+    Task task;
     if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
       TezWork tezWork = new TezWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID));
       mergeWork.setName("File Merge");
@@ -939,10 +938,17 @@ public class DDLTask extends Task<DDLWor
 
     Table tbl = db.getTable(renamePartitionDesc.getTableName());
 
-    Partition oldPart = db.getPartition(tbl, renamePartitionDesc.getOldPartSpec(), false);
-    Partition part = db.getPartition(tbl, renamePartitionDesc.getOldPartSpec(), false);
+    LinkedHashMap<String, String> oldPartSpec = renamePartitionDesc.getOldPartSpec();
+    Partition oldPart = db.getPartition(tbl, oldPartSpec, false);
+    if (oldPart == null) {
+      String partName = FileUtils.makePartName(new ArrayList<String>(oldPartSpec.keySet()),
+          new ArrayList<String>(oldPartSpec.values()));
+      throw new HiveException("Rename partition: source partition [" + partName
+          + "] does not exist.");
+    }
+    Partition part = db.getPartition(tbl, oldPartSpec, false);
     part.setValues(renamePartitionDesc.getNewPartSpec());
-    db.renamePartition(tbl, renamePartitionDesc.getOldPartSpec(), part);
+    db.renamePartition(tbl, oldPartSpec, part);
     Partition newPart = db
         .getPartition(tbl, renamePartitionDesc.getNewPartSpec(), false);
     work.getInputs().add(new ReadEntity(oldPart));

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java Mon Mar 23 22:02:13 2015
@@ -21,9 +21,11 @@ package org.apache.hadoop.hive.ql.exec;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.concurrent.Future;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -108,7 +110,8 @@ public class DemuxOperator extends Opera
   private int[][] newChildOperatorsTag;
 
   @Override
-  protected void initializeOp(Configuration hconf) throws HiveException {
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
     // A DemuxOperator should have at least one child
     if (childOperatorsArray.length == 0) {
       throw new HiveException(
@@ -180,7 +183,7 @@ public class DemuxOperator extends Opera
     if (isLogInfoEnabled) {
       LOG.info("newChildOperatorsTag " + Arrays.toString(newChildOperatorsTag));
     }
-    initializeChildren(hconf);
+    return result;
   }
 
   private int[] toArray(List<Integer> list) {
@@ -253,7 +256,7 @@ public class DemuxOperator extends Opera
   }
 
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
     int currentChildIndex = newTagToChildIndex[tag];
 
     // Check if we start to forward rows to a new child.
@@ -277,7 +280,7 @@ public class DemuxOperator extends Opera
     if (child.getDone()) {
       childrenDone++;
     } else {
-      child.processOp(row, oldTag);
+      child.process(row, oldTag);
     }
 
     // if all children are done, this operator is also done

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java Mon Mar 23 22:02:13 2015
@@ -19,6 +19,8 @@
 package org.apache.hadoop.hive.ql.exec;
 
 import java.io.Serializable;
+import java.util.Collection;
+import java.util.concurrent.Future;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -72,13 +74,14 @@ public class DummyStoreOperator extends
   }
 
   @Override
-  protected void initializeOp(Configuration hconf) throws HiveException {
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> ret = super.initializeOp(hconf);
     /*
-     * The conversion to standard object inspector was necessitated by HIVE-5973. The issue 
-     * happens when a select operator preceeds this operator as in the case of a subquery. The 
-     * select operator does not allocate a new object to hold the deserialized row. This affects 
+     * The conversion to standard object inspector was necessitated by HIVE-5973. The issue
+     * happens when a select operator preceeds this operator as in the case of a subquery. The
+     * select operator does not allocate a new object to hold the deserialized row. This affects
      * the operation of the SMB join which puts the object in a priority queue. Since all elements
-     * of the priority queue point to the same object, the join was resulting in incorrect 
+     * of the priority queue point to the same object, the join was resulting in incorrect
      * results.
      *
      * So the fix is to make a copy of the object as done in the processOp phase below. This
@@ -87,11 +90,11 @@ public class DummyStoreOperator extends
      */
     outputObjInspector = ObjectInspectorUtils.getStandardObjectInspector(inputObjInspectors[0]);
     result = new InspectableObject(null, outputObjInspector);
-    initializeChildren(hconf);
+    return ret;
   }
 
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
     // Store the row. See comments above for why we need a new copy of the row.
     result.o = ObjectInspectorUtils.copyToStandardObject(row, inputObjInspectors[0],
         ObjectInspectorCopyOption.WRITABLE);

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java Mon Mar 23 22:02:13 2015
@@ -29,7 +29,6 @@ import java.util.Map;
 import java.util.Properties;
 
 import org.apache.commons.lang3.StringEscapeUtils;
-import com.google.common.collect.Iterators;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configurable;
@@ -74,6 +73,8 @@ import org.apache.hadoop.util.Reflection
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hive.common.util.AnnotationUtils;
 
+import com.google.common.collect.Iterators;
+
 /**
  * FetchTask implementation.
  **/
@@ -93,7 +94,7 @@ public class FetchOperator implements Se
   private final boolean isPartitioned;
   private final boolean isNonNativeTable;
   private StructObjectInspector vcsOI;
-  private List<VirtualColumn> vcCols;
+  private final List<VirtualColumn> vcCols;
   private ExecMapperContext context;
 
   private transient Deserializer tableSerDe;
@@ -178,7 +179,7 @@ public class FetchOperator implements Se
     if (hasVC || work.getSplitSample() != null) {
       context = new ExecMapperContext(job);
       if (operator != null) {
-        operator.setExecContext(context);
+        operator.passExecContext(context);
       }
     }
     setFetchOperatorContext(job, paths);
@@ -203,7 +204,7 @@ public class FetchOperator implements Se
        JobConf conf) throws IOException {
     if (Configurable.class.isAssignableFrom(inputFormatClass) ||
         JobConfigurable.class.isAssignableFrom(inputFormatClass)) {
-      return (InputFormat<WritableComparable, Writable>) ReflectionUtils
+      return ReflectionUtils
           .newInstance(inputFormatClass, conf);
     }
     InputFormat format = inputFormats.get(inputFormatClass.getName());
@@ -406,7 +407,7 @@ public class FetchOperator implements Se
   public boolean pushRow() throws IOException, HiveException {
     if (work.getRowsComputedUsingStats() != null) {
       for (List<Object> row : work.getRowsComputedUsingStats()) {
-        operator.processOp(row, 0);
+        operator.process(row, 0);
       }
       flushRow();
       return true;
@@ -421,7 +422,7 @@ public class FetchOperator implements Se
   }
 
   protected void pushRow(InspectableObject row) throws HiveException {
-    operator.processOp(row.o, 0);
+    operator.process(row.o, 0);
   }
 
   protected void flushRow() throws HiveException {
@@ -656,7 +657,7 @@ public class FetchOperator implements Se
     // what's different is that this is evaluated by unit of row using RecordReader.getPos()
     // and that is evaluated by unit of split using InputSplit.getLength().
     private long shrinkedLength = -1;
-    private InputFormat inputFormat;
+    private final InputFormat inputFormat;
 
     public FetchInputFormatSplit(InputSplit split, InputFormat inputFormat) {
       super(split, inputFormat.getClass().getName());

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java Mon Mar 23 22:02:13 2015
@@ -18,16 +18,20 @@
 
 package org.apache.hadoop.hive.ql.exec;
 
+import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_TEMPORARY_TABLE_STORAGE;
+
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.Future;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -76,8 +80,6 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.util.ReflectionUtils;
 
-import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_TEMPORARY_TABLE_STORAGE;
-
 /**
  * File Sink operator implementation.
  **/
@@ -319,7 +321,8 @@ public class FileSinkOperator extends Te
   }
 
   @Override
-  protected void initializeOp(Configuration hconf) throws HiveException {
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
     try {
       this.hconf = hconf;
       filesCreated = false;
@@ -425,14 +428,13 @@ public class FileSinkOperator extends Te
       }
 
       statsMap.put(Counter.RECORDS_OUT + "_" + suffix, row_count);
-
-      initializeChildren(hconf);
     } catch (HiveException e) {
       throw e;
     } catch (Exception e) {
       e.printStackTrace();
       throw new HiveException(e);
     }
+    return result;
   }
 
   /**
@@ -630,7 +632,7 @@ public class FileSinkOperator extends Te
   protected Writable recordValue;
 
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
     /* Create list bucketing sub-directory only if stored-as-directories is on. */
     String lbDirName = null;
     lbDirName = (lbCtx == null) ? null : generateListBucketingDirName(row);

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java Mon Mar 23 22:02:13 2015
@@ -19,6 +19,8 @@
 package org.apache.hadoop.hive.ql.exec;
 
 import java.io.Serializable;
+import java.util.Collection;
+import java.util.concurrent.Future;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -28,7 +30,6 @@ import org.apache.hadoop.hive.ql.plan.Fi
 import org.apache.hadoop.hive.ql.plan.api.OperatorType;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
-import org.apache.hadoop.io.LongWritable;
 
 /**
  * Filter operator implementation.
@@ -49,7 +50,8 @@ public class FilterOperator extends Oper
   }
 
   @Override
-  protected void initializeOp(Configuration hconf) throws HiveException {
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
     try {
       heartbeatInterval = HiveConf.getIntVar(hconf,
           HiveConf.ConfVars.HIVESENDHEARTBEAT);
@@ -63,11 +65,11 @@ public class FilterOperator extends Oper
     } catch (Throwable e) {
       throw new HiveException(e);
     }
-    initializeChildren(hconf);
+    return result;
   }
 
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
     ObjectInspector rowInspector = inputObjInspectors[tag];
     if (conditionInspector == null) {
       conditionInspector = (PrimitiveObjectInspector) conditionEvaluator

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ForwardOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ForwardOperator.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ForwardOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ForwardOperator.java Mon Mar 23 22:02:13 2015
@@ -19,7 +19,10 @@
 package org.apache.hadoop.hive.ql.exec;
 
 import java.io.Serializable;
+import java.util.Collection;
+import java.util.concurrent.Future;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.ForwardDesc;
 import org.apache.hadoop.hive.ql.plan.api.OperatorType;
@@ -32,7 +35,7 @@ public class ForwardOperator extends Ope
   private static final long serialVersionUID = 1L;
 
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
     forward(row, inputObjInspectors[tag]);
   }
 
@@ -57,4 +60,9 @@ public class ForwardOperator extends Ope
   static public String getOperatorName() {
     return "FOR";
   }
+
+  @Override
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    return super.initializeOp(hconf);
+  }
 }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java Mon Mar 23 22:02:13 2015
@@ -337,6 +337,8 @@ public final class FunctionRegistry {
 
     system.registerGenericUDF(serdeConstants.DATE_TYPE_NAME, GenericUDFToDate.class);
     system.registerGenericUDF(serdeConstants.TIMESTAMP_TYPE_NAME, GenericUDFTimestamp.class);
+    system.registerGenericUDF(serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME, GenericUDFToIntervalYearMonth.class);
+    system.registerGenericUDF(serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME, GenericUDFToIntervalDayTime.class);
     system.registerGenericUDF(serdeConstants.BINARY_TYPE_NAME, GenericUDFToBinary.class);
     system.registerGenericUDF(serdeConstants.DECIMAL_TYPE_NAME, GenericUDFToDecimal.class);
     system.registerGenericUDF(serdeConstants.VARCHAR_TYPE_NAME, GenericUDFToVarchar.class);

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java Mon Mar 23 22:02:13 2015
@@ -24,12 +24,14 @@ import java.lang.reflect.Field;
 import java.sql.Timestamp;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.Future;
 
 import javolution.util.FastBitSet;
 
@@ -178,7 +180,8 @@ public class GroupByOperator extends Ope
   }
 
   @Override
-  protected void initializeOp(Configuration hconf) throws HiveException {
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
     numRowsInput = 0;
     numRowsHashTbl = 0;
 
@@ -390,7 +393,7 @@ public class GroupByOperator extends Ope
     memoryMXBean = ManagementFactory.getMemoryMXBean();
     maxMemory = memoryMXBean.getHeapMemoryUsage().getMax();
     memoryThreshold = this.getConf().getMemoryThreshold();
-    initializeChildren(hconf);
+    return result;
   }
 
   /**
@@ -700,7 +703,7 @@ public class GroupByOperator extends Ope
   }
 
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
     firstRow = false;
     ObjectInspector rowInspector = inputObjInspectors[tag];
     // Total number of input rows is needed for hash aggregation only

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableDummyOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableDummyOperator.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableDummyOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableDummyOperator.java Mon Mar 23 22:02:13 2015
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hive.ql.exec;
 
 import java.io.Serializable;
+import java.util.Collection;
+import java.util.concurrent.Future;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -31,21 +33,22 @@ public class HashTableDummyOperator exte
   private static final long serialVersionUID = 1L;
 
   @Override
-  protected void initializeOp(Configuration hconf) throws HiveException {
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
     TableDesc tbl = this.getConf().getTbl();
     try {
       Deserializer serde = tbl.getDeserializerClass().newInstance();
       SerDeUtils.initializeSerDe(serde, hconf, tbl.getProperties(), null);
       this.outputObjInspector = serde.getObjectInspector();
-      initializeChildren(hconf);
     } catch (Exception e) {
       LOG.error("Generating output obj inspector from dummy object error", e);
       e.printStackTrace();
     }
+    return result;
   }
 
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
     throw new HiveException();
   }
 

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableLoader.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableLoader.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableLoader.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableLoader.java Mon Mar 23 22:02:13 2015
@@ -19,11 +19,9 @@ package org.apache.hadoop.hive.ql.exec;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext;
-import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainerSerDe;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
 
 /**
  * HashTableLoader is an interface used by MapJoinOperator used to load the hashtables
@@ -31,8 +29,9 @@ import org.apache.hadoop.hive.ql.plan.Ma
  */
 public interface HashTableLoader {
 
-  void init(ExecMapperContext context, Configuration hconf, MapJoinOperator joinOp);
+  void init(ExecMapperContext context, MapredContext mrContext, Configuration hconf,
+      MapJoinOperator joinOp);
 
-  void load(MapJoinTableContainer[] mapJoinTables,
-      MapJoinTableContainerSerDe[] mapJoinTableSerdes, long memUsage) throws HiveException;
+  void load(MapJoinTableContainer[] mapJoinTables, MapJoinTableContainerSerDe[] mapJoinTableSerdes,
+      long memUsage) throws HiveException;
 }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java Mon Mar 23 22:02:13 2015
@@ -22,7 +22,9 @@ import java.io.IOException;
 import java.io.ObjectOutputStream;
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
+import java.util.concurrent.Future;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -33,10 +35,10 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.mapjoin.MapJoinMemoryExhaustionHandler;
 import org.apache.hadoop.hive.ql.exec.persistence.HashMapWrapper;
+import org.apache.hadoop.hive.ql.exec.persistence.MapJoinEagerRowContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKeyObject;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinObjectSerDeContext;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinPersistableTableContainer;
-import org.apache.hadoop.hive.ql.exec.persistence.MapJoinEagerRowContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinRowContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainerSerDe;
@@ -114,7 +116,8 @@ public class HashTableSinkOperator exten
 
   @Override
   @SuppressWarnings("unchecked")
-  protected void initializeOp(Configuration hconf) throws HiveException {
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
     boolean isSilent = HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVESESSIONSILENT);
     console = new LogHelper(LOG, isSilent);
     memoryExhaustionHandler = new MapJoinMemoryExhaustionHandler(console, conf.getHashtableMemoryUsage());
@@ -189,6 +192,7 @@ public class HashTableSinkOperator exten
     } catch (SerDeException e) {
       throw new HiveException(e);
     }
+    return result;
   }
 
   public MapJoinTableContainer[] getMapJoinTables() {
@@ -219,7 +223,7 @@ public class HashTableSinkOperator exten
    * This operator only process small tables Read the key/value pairs Load them into hashtable
    */
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
     byte alias = (byte)tag;
     // compute keys and values as StandardObjects. Use non-optimized key (MR).
     Object[] currentKey = new Object[joinKeys[alias].size()];

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java Mon Mar 23 22:02:13 2015
@@ -20,7 +20,9 @@ package org.apache.hadoop.hive.ql.exec;
 
 import java.io.IOException;
 import java.io.Serializable;
+import java.util.Collection;
 import java.util.List;
+import java.util.concurrent.Future;
 
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.conf.Configuration;
@@ -38,8 +40,7 @@ import org.apache.hadoop.io.LongWritable
 /**
  * Join operator implementation.
  */
-public class JoinOperator extends CommonJoinOperator<JoinDesc> implements
-    Serializable {
+public class JoinOperator extends CommonJoinOperator<JoinDesc> implements Serializable {
   private static final long serialVersionUID = 1L;
 
   private transient SkewJoinHandler skewJoinKeyContext = null;
@@ -55,19 +56,19 @@ public class JoinOperator extends Common
   private final transient LongWritable skewjoin_followup_jobs = new LongWritable(0);
 
   @Override
-  protected void initializeOp(Configuration hconf) throws HiveException {
-    super.initializeOp(hconf);
-    initializeChildren(hconf);
+  protected Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+    Collection<Future<?>> result = super.initializeOp(hconf);
     if (handleSkewJoin) {
       skewJoinKeyContext = new SkewJoinHandler(this);
       skewJoinKeyContext.initiliaze(hconf);
       skewJoinKeyContext.setSkewJoinJobCounter(skewjoin_followup_jobs);
     }
     statsMap.put(SkewkeyTableCounter.SKEWJOINFOLLOWUPJOBS.toString(), skewjoin_followup_jobs);
+    return result;
   }
 
   @Override
-  public void processOp(Object row, int tag) throws HiveException {
+  public void process(Object row, int tag) throws HiveException {
     try {
       reportProgress();
 

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java?rev=1668750&r1=1668749&r2=1668750&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java Mon Mar 23 22:02:13 2015
@@ -48,6 +48,15 @@ import org.apache.hadoop.util.Reflection
 
 public class JoinUtil {
 
+  /**
+   * Represents the join result between two tables
+   */
+  public static enum JoinResult {
+    MATCH,    // A match is found
+    NOMATCH,  // No match is found, and the current row will be dropped
+    SPILL     // The current row has been spilled to disk, as the join is postponed
+  }
+
   public static List<ObjectInspector>[] getObjectInspectorsFromEvaluators(
       List<ExprNodeEvaluator>[] exprEntries,
       ObjectInspector[] inputObjInspector,



Mime
View raw message