hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From br...@apache.org
Subject svn commit: r1615452 [1/4] - in /hive/branches/spark: ./ bin/ common/ common/src/java/org/apache/hadoop/hive/conf/ conf/ contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/ contrib/src/test/queries/clientnegative/ contrib/src/test/queries/...
Date Sun, 03 Aug 2014 20:48:39 GMT
Author: brock
Date: Sun Aug  3 20:48:35 2014
New Revision: 1615452

URL: http://svn.apache.org/r1615452
Log:
Merge trunk into spark branch

Added:
    hive/branches/spark/contrib/src/test/queries/clientpositive/url_hook.q
      - copied unchanged from r1615451, hive/trunk/contrib/src/test/queries/clientpositive/url_hook.q
    hive/branches/spark/contrib/src/test/results/clientpositive/url_hook.q.out
      - copied unchanged from r1615451, hive/trunk/contrib/src/test/results/clientpositive/url_hook.q.out
    hive/branches/spark/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java
      - copied unchanged from r1615451, hive/trunk/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java
    hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/
      - copied from r1615451, hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/
    hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/
      - copied from r1615451, hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/merge/
      - copied from r1615451, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/merge/
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileKeyWrapper.java
      - copied unchanged from r1615451, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileKeyWrapper.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileMergeMapper.java
      - copied unchanged from r1615451, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileMergeMapper.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileStripeMergeInputFormat.java
      - copied unchanged from r1615451, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileStripeMergeInputFormat.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileStripeMergeRecordReader.java
      - copied unchanged from r1615451, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileStripeMergeRecordReader.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileValueWrapper.java
      - copied unchanged from r1615451, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileValueWrapper.java
    hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java
      - copied unchanged from r1615451, hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java
    hive/branches/spark/ql/src/test/queries/clientnegative/orc_merge1.q
      - copied unchanged from r1615451, hive/trunk/ql/src/test/queries/clientnegative/orc_merge1.q
    hive/branches/spark/ql/src/test/queries/clientnegative/orc_merge2.q
      - copied unchanged from r1615451, hive/trunk/ql/src/test/queries/clientnegative/orc_merge2.q
    hive/branches/spark/ql/src/test/queries/clientnegative/orc_merge3.q
      - copied unchanged from r1615451, hive/trunk/ql/src/test/queries/clientnegative/orc_merge3.q
    hive/branches/spark/ql/src/test/queries/clientnegative/orc_merge4.q
      - copied unchanged from r1615451, hive/trunk/ql/src/test/queries/clientnegative/orc_merge4.q
    hive/branches/spark/ql/src/test/queries/clientnegative/orc_merge5.q
      - copied unchanged from r1615451, hive/trunk/ql/src/test/queries/clientnegative/orc_merge5.q
    hive/branches/spark/ql/src/test/queries/clientpositive/alter_merge_2_orc.q
      - copied unchanged from r1615451, hive/trunk/ql/src/test/queries/clientpositive/alter_merge_2_orc.q
    hive/branches/spark/ql/src/test/queries/clientpositive/alter_merge_orc.q
      - copied unchanged from r1615451, hive/trunk/ql/src/test/queries/clientpositive/alter_merge_orc.q
    hive/branches/spark/ql/src/test/queries/clientpositive/alter_merge_stats_orc.q
      - copied unchanged from r1615451, hive/trunk/ql/src/test/queries/clientpositive/alter_merge_stats_orc.q
    hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge1.q
      - copied unchanged from r1615451, hive/trunk/ql/src/test/queries/clientpositive/orc_merge1.q
    hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge2.q
      - copied unchanged from r1615451, hive/trunk/ql/src/test/queries/clientpositive/orc_merge2.q
    hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge3.q
      - copied unchanged from r1615451, hive/trunk/ql/src/test/queries/clientpositive/orc_merge3.q
    hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge4.q
      - copied unchanged from r1615451, hive/trunk/ql/src/test/queries/clientpositive/orc_merge4.q
    hive/branches/spark/ql/src/test/queries/clientpositive/vector_string_concat.q
      - copied unchanged from r1615451, hive/trunk/ql/src/test/queries/clientpositive/vector_string_concat.q
    hive/branches/spark/ql/src/test/results/clientnegative/orc_merge1.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientnegative/orc_merge1.q.out
    hive/branches/spark/ql/src/test/results/clientnegative/orc_merge2.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientnegative/orc_merge2.q.out
    hive/branches/spark/ql/src/test/results/clientnegative/orc_merge3.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientnegative/orc_merge3.q.out
    hive/branches/spark/ql/src/test/results/clientnegative/orc_merge4.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientnegative/orc_merge4.q.out
    hive/branches/spark/ql/src/test/results/clientnegative/orc_merge5.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientnegative/orc_merge5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/alter_merge_2_orc.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientpositive/alter_merge_2_orc.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/alter_merge_orc.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientpositive/alter_merge_orc.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/orc_merge1.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientpositive/orc_merge1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/orc_merge2.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientpositive/orc_merge2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/orc_merge3.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientpositive/orc_merge3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/orc_merge4.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientpositive/orc_merge4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/alter_merge_2_orc.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientpositive/tez/alter_merge_2_orc.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/alter_merge_orc.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientpositive/tez/alter_merge_orc.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/alter_merge_stats_orc.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientpositive/tez/alter_merge_stats_orc.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/orc_merge1.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientpositive/tez/orc_merge1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/orc_merge2.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientpositive/tez/orc_merge2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/orc_merge3.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientpositive/tez/orc_merge3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/orc_merge4.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientpositive/tez/orc_merge4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/vector_string_concat.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientpositive/tez/vector_string_concat.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/vector_string_concat.q.out
      - copied unchanged from r1615451, hive/trunk/ql/src/test/results/clientpositive/vector_string_concat.q.out
Removed:
    hive/branches/spark/conf/hive-default.xml.template
    hive/branches/spark/contrib/src/test/queries/clientnegative/url_hook.q
    hive/branches/spark/contrib/src/test/results/clientnegative/url_hook.q.out
    hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/fileformats/
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/MergeWork.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileBlockMergeOutputFormat.java
Modified:
    hive/branches/spark/   (props changed)
    hive/branches/spark/.gitignore
    hive/branches/spark/bin/hive.cmd
    hive/branches/spark/common/pom.xml
    hive/branches/spark/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/branches/spark/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/TestURLHook.java
    hive/branches/spark/data/conf/hive-site.xml
    hive/branches/spark/hcatalog/core/pom.xml
    hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java
    hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java
    hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalDynamicPartitioned.java
    hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalNonPartitioned.java
    hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalPartitioned.java
    hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableDynamicPartitioned.java
    hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableNonPartitioned.java
    hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutablePartitioned.java
    hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatNonPartitioned.java
    hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitioned.java
    hive/branches/spark/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java
    hive/branches/spark/hcatalog/streaming/src/test/sit
    hive/branches/spark/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
    hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
    hive/branches/spark/itests/qtest/testconfiguration.properties
    hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
    hive/branches/spark/metastore/pom.xml
    hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
    hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
    hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java
    hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
    hive/branches/spark/pom.xml
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Context.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringConcatColScalar.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringConcatScalarCol.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateString.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldLong.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileBlockMergeInputFormat.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagate.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeJoinProc.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/AlterTablePartMergeFilesDesc.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/DefaultHiveAuthorizationProvider.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveRoleGrant.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
    hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java
    hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_cannot_create_all_role.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_cannot_create_default_role.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_cannot_create_none_role.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_caseinsensitivity.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_drop_db_cascade.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_drop_db_empty.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_drop_role_no_admin.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_priv_current_role_neg.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_role_cycles1.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_role_cycles2.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_role_grant.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_role_grant2.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_role_grant_nosuchrole.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_role_grant_otherrole.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_role_grant_otheruser.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_rolehierarchy_privs.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_set_role_neg2.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_show_grant_otherrole.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_show_grant_otheruser_all.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_show_grant_otheruser_alltabs.q
    hive/branches/spark/ql/src/test/queries/clientnegative/authorization_show_grant_otheruser_wtab.q
    hive/branches/spark/ql/src/test/queries/clientpositive/authorization_1_sql_std.q
    hive/branches/spark/ql/src/test/queries/clientpositive/authorization_9.q
    hive/branches/spark/ql/src/test/queries/clientpositive/authorization_admin_almighty1.q
    hive/branches/spark/ql/src/test/queries/clientpositive/authorization_admin_almighty2.q
    hive/branches/spark/ql/src/test/queries/clientpositive/authorization_create_func1.q
    hive/branches/spark/ql/src/test/queries/clientpositive/authorization_create_macro1.q
    hive/branches/spark/ql/src/test/queries/clientpositive/authorization_insert.q
    hive/branches/spark/ql/src/test/queries/clientpositive/authorization_owner_actions_db.q
    hive/branches/spark/ql/src/test/queries/clientpositive/authorization_role_grant1.q
    hive/branches/spark/ql/src/test/queries/clientpositive/authorization_role_grant2.q
    hive/branches/spark/ql/src/test/queries/clientpositive/authorization_set_show_current_role.q
    hive/branches/spark/ql/src/test/queries/clientpositive/authorization_show_grant.q
    hive/branches/spark/ql/src/test/queries/clientpositive/authorization_view_sqlstd.q
    hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_join.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/authorization_9.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/join_nullsafe.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_10.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/metadataonly1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/orc_createas1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/rcfile_createas1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/rcfile_merge1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/rcfile_merge2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/rcfile_merge3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/metadataonly1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/union_remove_10.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/union_remove_11.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/union_remove_12.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/union_remove_13.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/union_remove_14.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/union_remove_16.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/union_remove_9.q.out
    hive/branches/spark/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableHiveDecimalObjectInspector.java
    hive/branches/spark/service/src/java/org/apache/hive/service/cli/CLIService.java
    hive/branches/spark/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
    hive/branches/spark/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java

Propchange: hive/branches/spark/
------------------------------------------------------------------------------
  Merged /hive/trunk:r1613740-1615451

Modified: hive/branches/spark/.gitignore
URL: http://svn.apache.org/viewvc/hive/branches/spark/.gitignore?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/.gitignore (original)
+++ hive/branches/spark/.gitignore Sun Aug  3 20:48:35 2014
@@ -13,6 +13,7 @@ common/src/gen
 *.iml
 *.ipr
 *.iws
+*.swp
 derby.log
 datanucleus.log
 .arc
@@ -25,3 +26,4 @@ hcatalog/core/target
 hcatalog/webhcat/java-client/target
 hcatalog/storage-handlers/hbase/target
 hcatalog/webhcat/svr/target
+conf/hive-default.xml.template

Modified: hive/branches/spark/bin/hive.cmd
URL: http://svn.apache.org/viewvc/hive/branches/spark/bin/hive.cmd?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/bin/hive.cmd (original)
+++ hive/branches/spark/bin/hive.cmd Sun Aug  3 20:48:35 2014
@@ -236,6 +236,21 @@ if defined HIVE_CLASSPATH (
   set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HIVE_CLASSPATH%
 )
 
+@rem set hbase components
+if defined HBASE_HOME (
+  if not defined HBASE_CONF_DIR (
+    if exist %HBASE_HOME%\conf (
+      set HBASE_CONF_DIR=%HBASE_HOME%\conf
+    )
+  )
+  if defined HBASE_CONF_DIR (
+    call :AddToHadoopClassPath %HBASE_CONF_DIR%	
+  ) 
+  if exist %HBASE_HOME%\lib (
+    call :AddToHadoopClassPath %HBASE_HOME%\lib\*
+  ) 
+)
+
 if defined AUX_PARAM (
         set HIVE_OPTS=%HIVE_OPTS% -hiveconf hive.aux.jars.path="%AUX_PARAM%"
 	set AUX_JARS_CMD_LINE="-libjars %AUX_PARAM%"
@@ -359,3 +374,12 @@ if not defined AUX_PARAM (
 	)
 )
 goto :EOF
+
+:AddToHadoopClassPath
+if defined HADOOP_CLASSPATH (
+  set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%1
+) else (
+    set HADOOP_CLASSPATH=%1
+  )  
+)
+goto :EOF

Modified: hive/branches/spark/common/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/common/pom.xml?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/common/pom.xml (original)
+++ hive/branches/spark/common/pom.xml Sun Aug  3 20:48:35 2014
@@ -108,17 +108,46 @@
         </dependency>
       </dependencies>
     </profile>
+
+    <profile>
+      <id>dist</id>
+      <build>
+        <resources>
+          <resource>
+            <directory>../conf/</directory>
+            <includes>
+              <include>hive-default.xml.template</include>
+            </includes>
+          </resource>
+        </resources>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-antrun-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>generate-template</id>
+                <phase>package</phase>
+                <configuration>
+                  <target>
+                    <property name="compile.classpath" refid="maven.runtime.classpath"/>
+                    <taskdef name="templategen" classname="org.apache.hadoop.hive.ant.GenHiveTemplate"
+                             classpath="${compile.classpath}"/>
+                    <templategen templateFile="${basedir}/../conf/hive-default.xml.template"/>
+                  </target>
+                </configuration>
+                <goals>
+                  <goal>run</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
   </profiles>
 
   <build>
-    <resources>
-      <resource>
-        <directory>../conf/</directory>
-        <includes>
-          <include>hive-default.xml.template</include>
-        </includes>
-      </resource>
-    </resources>
     <sourceDirectory>${basedir}/src/java</sourceDirectory>
     <testSourceDirectory>${basedir}/src/test</testSourceDirectory>
     <scriptSourceDirectory>${basedir}/src/scripts</scriptSourceDirectory>
@@ -149,21 +178,6 @@
               <goal>run</goal>
             </goals>
           </execution>
-          <execution>
-            <id>generate-template</id>
-            <phase>package</phase>
-            <configuration>
-              <target>
-                <property name="compile.classpath" refid="maven.runtime.classpath"/>
-                <taskdef name="templategen" classname="org.apache.hadoop.hive.ant.GenHiveTemplate"
-                         classpath="${compile.classpath}"/>
-                <templategen templateFile="${basedir}/../conf/hive-default.xml.template"/>
-              </target>
-            </configuration>
-            <goals>
-              <goal>run</goal>
-            </goals>
-          </execution>
         </executions>
       </plugin>
       <plugin>

Modified: hive/branches/spark/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/branches/spark/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Sun Aug  3 20:48:35 2014
@@ -108,7 +108,6 @@ public class HiveConf extends Configurat
       HiveConf.ConfVars.METASTOREPWD,
       HiveConf.ConfVars.METASTORECONNECTURLHOOK,
       HiveConf.ConfVars.METASTORECONNECTURLKEY,
-      HiveConf.ConfVars.METASTOREFORCERELOADCONF,
       HiveConf.ConfVars.METASTORESERVERMINTHREADS,
       HiveConf.ConfVars.METASTORESERVERMAXTHREADS,
       HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE,
@@ -352,11 +351,6 @@ public class HiveConf extends Configurat
         "jdbc:derby:;databaseName=metastore_db;create=true",
         "JDBC connect string for a JDBC metastore"),
 
-    METASTOREFORCERELOADCONF("hive.metastore.force.reload.conf", false, 
-        "Whether to force reloading of the metastore configuration (including\n" +
-        "the connection URL, before the next metastore query that accesses the\n" +
-        "datastore. Once reloaded, this value is reset to false. Used for\n" +
-        "testing only."),
     HMSHANDLERATTEMPTS("hive.hmshandler.retry.attempts", 1,
         "The number of times to retry a HMSHandler call if there were a connection error"),
     HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", 1000,
@@ -739,7 +733,7 @@ public class HiveConf extends Configurat
     // HWI
     HIVEHWILISTENHOST("hive.hwi.listen.host", "0.0.0.0", "This is the host address the Hive Web Interface will listen on"),
     HIVEHWILISTENPORT("hive.hwi.listen.port", "9999", "This is the port the Hive Web Interface will listen on"),
-    HIVEHWIWARFILE("hive.hwi.war.file", "${system:HWI_WAR_FILE}",
+    HIVEHWIWARFILE("hive.hwi.war.file", "${env:HWI_WAR_FILE}",
         "This sets the path to the HWI war file, relative to ${HIVE_HOME}. "),
 
     HIVEHADOOPMAXMEM("hive.mapred.local.mem", 0, "mapper/reducer memory in local mode"),
@@ -783,6 +777,14 @@ public class HiveConf extends Configurat
     HIVEMERGERCFILEBLOCKLEVEL("hive.merge.rcfile.block.level", true, ""),
     HIVEMERGEINPUTFORMATBLOCKLEVEL("hive.merge.input.format.block.level",
         "org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat", ""),
+    HIVEMERGEORCFILESTRIPELEVEL("hive.merge.orcfile.stripe.level", true,
+        "When hive.merge.mapfiles or hive.merge.mapredfiles is enabled while writing a\n" +
+        " table with ORC file format, enabling this config will do stripe level fast merge\n" +
+        " for small ORC files. Note that enabling this config will not honor padding tolerance\n" +
+        " config (hive.exec.orc.block.padding.tolerance)."),
+    HIVEMERGEINPUTFORMATSTRIPELEVEL("hive.merge.input.format.stripe.level",
+        "org.apache.hadoop.hive.ql.io.orc.OrcFileStripeMergeInputFormat", 
+	"Input file format to use for ORC stripe level merging (for internal use only)"),
     HIVEMERGECURRENTJOBHASDYNAMICPARTITIONS(
         "hive.merge.current.job.has.dynamic.partitions", false, ""),
 
@@ -1388,10 +1390,9 @@ public class HiveConf extends Configurat
         "which you can then extract a URL from and pass to PropertyConfigurator.configure(URL)."),
 
     // Hive global init file location
-    HIVE_GLOBAL_INIT_FILE_LOCATION("hive.global.init.file.location", System.getenv("HIVE_CONF_DIR"),
+    HIVE_GLOBAL_INIT_FILE_LOCATION("hive.server2.global.init.file.location", "${env:HIVE_CONF_DIR}",
         "The location of HS2 global init file (.hiverc).\n" +
-        "If the property is not set, then HS2 will search for the file in $HIVE_CONF_DIR/.\n" +
-        "If the property is set, the value must be a valid path where the init file is located."),
+        "If the property is reset, the value must be a valid path where the init file is located."),
 
     // prefix used to auto generated column aliases (this should be started with '_')
     HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL("hive.autogen.columnalias.prefix.label", "_c",
@@ -1549,7 +1550,7 @@ public class HiveConf extends Configurat
         "Comma separated list of non-SQL Hive commands users are authorized to execute"),
 
     HIVE_CONF_RESTRICTED_LIST("hive.conf.restricted.list",
-        "hive.security.authenticator.manager,hive.security.authorization.manager",
+        "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role",
         "Comma separated list of configuration options which are immutable at runtime"),
 
     // If this is set all move tasks at the end of a multi-insert query will only begin once all
@@ -1670,7 +1671,9 @@ public class HiveConf extends Configurat
         "  none: default(past) behavior. Implies only alphaNumeric and underscore are valid characters in identifiers.\n" +
         "  column: implies column names can contain any character."
     ),
-    USERS_IN_ADMIN_ROLE("hive.users.in.admin.role", "",
+
+    // role names are case-insensitive
+    USERS_IN_ADMIN_ROLE("hive.users.in.admin.role", "", false,
         "Comma separated list of users who are in admin role for bootstrapping.\n" +
         "More users can be added in ADMIN role later."),
 
@@ -1716,25 +1719,31 @@ public class HiveConf extends Configurat
     private final String description;
 
     private final boolean excluded;
+    private final boolean caseSensitive;
 
     ConfVars(String varname, Object defaultVal, String description) {
-      this(varname, defaultVal, null, description, false);
+      this(varname, defaultVal, null, description, true, false);
     }
 
     ConfVars(String varname, Object defaultVal, String description, boolean excluded) {
-      this(varname, defaultVal, null, description, excluded);
+      this(varname, defaultVal, null, description, true, excluded);
+    }
+
+    ConfVars(String varname, String defaultVal, boolean caseSensitive, String description) {
+      this(varname, defaultVal, null, description, caseSensitive, false);
     }
 
     ConfVars(String varname, Object defaultVal, Validator validator, String description) {
-      this(varname, defaultVal, validator, description, false);
+      this(varname, defaultVal, validator, description, true, false);
     }
 
-    ConfVars(String varname, Object defaultVal, Validator validator, String description, boolean excluded) {
+    ConfVars(String varname, Object defaultVal, Validator validator, String description, boolean caseSensitive, boolean excluded) {
       this.varname = varname;
       this.validator = validator;
       this.description = description;
       this.defaultExpr = defaultVal == null ? null : String.valueOf(defaultVal);
       this.excluded = excluded;
+      this.caseSensitive = caseSensitive;
       if (defaultVal == null || defaultVal instanceof String) {
         this.valClass = String.class;
         this.valType = VarType.STRING;
@@ -1801,6 +1810,10 @@ public class HiveConf extends Configurat
       return excluded;
     }
 
+    public boolean isCaseSensitive() {
+      return caseSensitive;
+    }
+
     @Override
     public String toString() {
       return varname;

Modified: hive/branches/spark/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/TestURLHook.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/TestURLHook.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/TestURLHook.java (original)
+++ hive/branches/spark/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/TestURLHook.java Sun Aug  3 20:48:35 2014
@@ -28,7 +28,8 @@ import org.apache.hadoop.hive.metastore.
  */
 public class TestURLHook implements JDOConnectionURLHook {
 
-  static String originalUrl = null;
+  private String originalUrl;
+
   @Override
   public String getJdoConnectionUrl(Configuration conf) throws Exception {
     if (originalUrl == null) {

Modified: hive/branches/spark/data/conf/hive-site.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/data/conf/hive-site.xml?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/data/conf/hive-site.xml (original)
+++ hive/branches/spark/data/conf/hive-site.xml Sun Aug  3 20:48:35 2014
@@ -240,4 +240,9 @@
   <value>minimal</value>
 </property>
 
+<property>
+  <name>hive.users.in.admin.role</name>
+  <value>hive_admin_user</value>
+</property>
+
 </configuration>

Modified: hive/branches/spark/hcatalog/core/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/core/pom.xml?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/core/pom.xml (original)
+++ hive/branches/spark/hcatalog/core/pom.xml Sun Aug  3 20:48:35 2014
@@ -60,6 +60,13 @@
       <artifactId>hive-exec</artifactId>
       <version>${project.version}</version>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-exec</artifactId>
+      <version>${project.version}</version>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
     <!-- inter-project -->
     <dependency>
       <groupId>com.google.guava</groupId>

Modified: hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java (original)
+++ hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java Sun Aug  3 20:48:35 2014
@@ -19,13 +19,15 @@
 
 package org.apache.hive.hcatalog.mapreduce;
 
+import com.google.common.collect.ImmutableSet;
+
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-
-import junit.framework.Assert;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -40,10 +42,10 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
-import org.apache.hadoop.hive.ql.io.RCFileOutputFormat;
-import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
+import org.apache.hadoop.hive.ql.io.StorageFormats;
+import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe;
 import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.avro.AvroSerDe;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
@@ -53,15 +55,23 @@ import org.apache.hadoop.mapreduce.JobSt
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
 import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
+
 import org.apache.hive.hcatalog.common.HCatConstants;
 import org.apache.hive.hcatalog.common.HCatUtil;
 import org.apache.hive.hcatalog.data.DefaultHCatRecord;
 import org.apache.hive.hcatalog.data.HCatRecord;
 import org.apache.hive.hcatalog.data.schema.HCatFieldSchema;
 import org.apache.hive.hcatalog.data.schema.HCatSchema;
+
+import junit.framework.Assert;
+
 import org.junit.After;
+import org.junit.Assume;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -69,42 +79,62 @@ import static org.junit.Assert.assertTru
 
 /**
  * Test for HCatOutputFormat. Writes a partition using HCatOutputFormat and reads
- * it back using HCatInputFormat, checks the column values and counts.
+ * it back using HCatInputFormat, checks the column values and counts. This class
+ * can be tested to test different partitioning schemes.
+ *
+ * This is a parameterized test that tests HCatOutputFormat and HCatInputFormat against Hive's
+ * native storage formats enumerated using {@link org.apache.hive.hcatalog.mapreduce.StorageFormats}.
  */
+@RunWith(Parameterized.class)
 public abstract class HCatMapReduceTest extends HCatBaseTest {
-
   private static final Logger LOG = LoggerFactory.getLogger(HCatMapReduceTest.class);
+
   protected static String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME;
-  protected static String tableName = "testHCatMapReduceTable";
+  protected static final String TABLE_NAME = "testHCatMapReduceTable";
 
   private static List<HCatRecord> writeRecords = new ArrayList<HCatRecord>();
   private static List<HCatRecord> readRecords = new ArrayList<HCatRecord>();
 
-  protected abstract List<FieldSchema> getPartitionKeys();
-
-  protected abstract List<FieldSchema> getTableColumns();
-
   private static FileSystem fs;
   private String externalTableLocation = null;
+  protected String tableName;
+  protected String serdeClass;
+  protected String inputFormatClass;
+  protected String outputFormatClass;
 
-  protected Boolean isTableExternal() {
-    return false;
+  /**
+   * List of SerDe classes that the HCatalog core tests will not be run against.
+   */
+  public static final Set<String> DISABLED_SERDES = ImmutableSet.of(
+      AvroSerDe.class.getName(),
+      ParquetHiveSerDe.class.getName());
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> generateParameters() {
+    return StorageFormats.asParameters();
   }
 
-  protected boolean isTableImmutable() {
-    return true;
+  /**
+   * Test constructor that sets the storage format class names provided by the test parameter.
+   */
+  public HCatMapReduceTest(String name, String serdeClass, String inputFormatClass,
+      String outputFormatClass) throws Exception {
+    this.serdeClass = serdeClass;
+    this.inputFormatClass = inputFormatClass;
+    this.outputFormatClass = outputFormatClass;
+    this.tableName = TABLE_NAME + "_" + name;
   }
 
-  protected String inputFormat() {
-    return RCFileInputFormat.class.getName();
-  }
+  protected abstract List<FieldSchema> getPartitionKeys();
+
+  protected abstract List<FieldSchema> getTableColumns();
 
-  protected String outputFormat() { 
-    return RCFileOutputFormat.class.getName(); 
+  protected Boolean isTableExternal() {
+    return false;
   }
 
-  protected String serdeClass() { 
-    return ColumnarSerDe.class.getName(); 
+  protected boolean isTableImmutable() {
+    return true;
   }
 
   @BeforeClass
@@ -143,13 +173,16 @@ public abstract class HCatMapReduceTest 
 
   @Before
   public void createTable() throws Exception {
-    String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName;
+    // Use Junit's Assume to skip running this fixture against any storage formats whose
+    // SerDe is in the disabled serdes list.
+    Assume.assumeTrue(!DISABLED_SERDES.contains(serdeClass));
 
+    String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName;
     try {
       client.dropTable(databaseName, tableName);
     } catch (Exception e) {
-    } //can fail with NoSuchObjectException
-
+      // Can fail with NoSuchObjectException.
+    }
 
     Table tbl = new Table();
     tbl.setDbName(databaseName);
@@ -160,10 +193,9 @@ public abstract class HCatMapReduceTest 
       tbl.setTableType(TableType.MANAGED_TABLE.toString());
     }
     StorageDescriptor sd = new StorageDescriptor();
-
     sd.setCols(getTableColumns());
-    tbl.setPartitionKeys(getPartitionKeys());
 
+    tbl.setPartitionKeys(getPartitionKeys());
     tbl.setSd(sd);
 
     sd.setBucketCols(new ArrayList<String>(2));
@@ -171,12 +203,12 @@ public abstract class HCatMapReduceTest 
     sd.getSerdeInfo().setName(tbl.getTableName());
     sd.getSerdeInfo().setParameters(new HashMap<String, String>());
     sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
-    if (isTableExternal()){
+    if (isTableExternal()) {
       sd.getSerdeInfo().getParameters().put("EXTERNAL", "TRUE");
     }
-    sd.getSerdeInfo().setSerializationLib(serdeClass());
-    sd.setInputFormat(inputFormat());
-    sd.setOutputFormat(outputFormat());
+    sd.getSerdeInfo().setSerializationLib(serdeClass);
+    sd.setInputFormat(inputFormatClass);
+    sd.setOutputFormat(outputFormatClass);
 
     Map<String, String> tableParams = new HashMap<String, String>();
     if (isTableExternal()) {
@@ -190,68 +222,59 @@ public abstract class HCatMapReduceTest 
     client.createTable(tbl);
   }
 
-  //Create test input file with specified number of rows
+  /*
+   * Create test input file with specified number of rows
+   */
   private void createInputFile(Path path, int rowCount) throws IOException {
-
     if (fs.exists(path)) {
       fs.delete(path, true);
     }
 
     FSDataOutputStream os = fs.create(path);
-
     for (int i = 0; i < rowCount; i++) {
       os.writeChars(i + "\n");
     }
-
     os.close();
   }
 
-  public static class MapCreate extends
-      Mapper<LongWritable, Text, BytesWritable, HCatRecord> {
-
-    static int writeCount = 0; //test will be in local mode
+  public static class MapCreate extends Mapper<LongWritable, Text, BytesWritable, HCatRecord> {
+    // Test will be in local mode.
+    static int writeCount = 0;
 
     @Override
-    public void map(LongWritable key, Text value, Context context
-    ) throws IOException, InterruptedException {
-      {
-        try {
-          HCatRecord rec = writeRecords.get(writeCount);
-          context.write(null, rec);
-          writeCount++;
-
-        } catch (Exception e) {
-
-          e.printStackTrace(System.err); //print since otherwise exception is lost
-          throw new IOException(e);
-        }
+    public void map(LongWritable key, Text value, Context context)
+        throws IOException, InterruptedException {
+      try {
+        HCatRecord rec = writeRecords.get(writeCount);
+        context.write(null, rec);
+        writeCount++;
+      } catch (Exception e) {
+        // Print since otherwise exception is lost.
+        e.printStackTrace(System.err);
+        throw new IOException(e);
       }
     }
   }
 
-  public static class MapRead extends
-      Mapper<WritableComparable, HCatRecord, BytesWritable, Text> {
-
+  public static class MapRead extends Mapper<WritableComparable, HCatRecord, BytesWritable, Text> {
     static int readCount = 0; //test will be in local mode
 
     @Override
-    public void map(WritableComparable key, HCatRecord value, Context context
-    ) throws IOException, InterruptedException {
-      {
-        try {
-          readRecords.add(value);
-          readCount++;
-        } catch (Exception e) {
-          e.printStackTrace(); //print since otherwise exception is lost
-          throw new IOException(e);
-        }
+    public void map(WritableComparable key, HCatRecord value, Context context)
+        throws IOException, InterruptedException {
+      try {
+        readRecords.add(value);
+        readCount++;
+      } catch (Exception e) {
+        // Print since otherwise exception is lost.
+        e.printStackTrace();
+        throw new IOException(e);
       }
     }
   }
 
-  Job runMRCreate(Map<String, String> partitionValues,
-          List<HCatFieldSchema> partitionColumns, List<HCatRecord> records,
-          int writeCount, boolean assertWrite) throws Exception {
+  Job runMRCreate(Map<String, String> partitionValues, List<HCatFieldSchema> partitionColumns,
+      List<HCatRecord> records, int writeCount, boolean assertWrite) throws Exception {
     return runMRCreate(partitionValues, partitionColumns, records, writeCount, assertWrite,
         true, null);
   }
@@ -267,10 +290,9 @@ public abstract class HCatMapReduceTest 
    * @return
    * @throws Exception
    */
-  Job runMRCreate(Map<String, String> partitionValues,
-          List<HCatFieldSchema> partitionColumns, List<HCatRecord> records,
-          int writeCount, boolean assertWrite, boolean asSingleMapTask,
-          String customDynamicPathPattern) throws Exception {
+  Job runMRCreate(Map<String, String> partitionValues, List<HCatFieldSchema> partitionColumns,
+      List<HCatRecord> records, int writeCount, boolean assertWrite, boolean asSingleMapTask,
+      String customDynamicPathPattern) throws Exception {
 
     writeRecords = records;
     MapCreate.writeCount = 0;
@@ -355,7 +377,6 @@ public abstract class HCatMapReduceTest 
    * @throws Exception
    */
   List<HCatRecord> runMRRead(int readCount, String filter) throws Exception {
-
     MapRead.readCount = 0;
     readRecords.clear();
 
@@ -388,9 +409,7 @@ public abstract class HCatMapReduceTest 
     return readRecords;
   }
 
-
   protected HCatSchema getTableSchema() throws Exception {
-
     Configuration conf = new Configuration();
     Job job = new Job(conf, "hcat mapreduce read schema test");
     job.setJarByClass(this.getClass());

Modified: hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java (original)
+++ hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java Sun Aug  3 20:48:35 2014
@@ -37,8 +37,10 @@ import org.apache.hive.hcatalog.data.Def
 import org.apache.hive.hcatalog.data.HCatRecord;
 import org.apache.hive.hcatalog.data.schema.HCatFieldSchema;
 import org.apache.hive.hcatalog.data.schema.HCatSchemaUtils;
+
 import org.junit.BeforeClass;
 import org.junit.Test;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -53,9 +55,10 @@ public class TestHCatDynamicPartitioned 
   protected static final int NUM_RECORDS = 20;
   protected static final int NUM_PARTITIONS = 5;
 
-  @BeforeClass
-  public static void generateInputData() throws Exception {
-    tableName = "testHCatDynamicPartitionedTable";
+  public TestHCatDynamicPartitioned(String formatName, String serdeClass, String inputFormatClass,
+      String outputFormatClass) throws Exception {
+    super(formatName, serdeClass, inputFormatClass, outputFormatClass);
+    tableName = "testHCatDynamicPartitionedTable_" + formatName;
     generateWriteRecords(NUM_RECORDS, NUM_PARTITIONS, 0);
     generateDataColumns();
   }

Modified: hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalDynamicPartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalDynamicPartitioned.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalDynamicPartitioned.java (original)
+++ hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalDynamicPartitioned.java Sun Aug  3 20:48:35 2014
@@ -24,18 +24,20 @@ import org.junit.Test;
 
 public class TestHCatExternalDynamicPartitioned extends TestHCatDynamicPartitioned {
 
+  public TestHCatExternalDynamicPartitioned(String formatName, String serdeClass,
+      String inputFormatClass, String outputFormatClass)
+      throws Exception {
+    super(formatName, serdeClass, inputFormatClass, outputFormatClass);
+    tableName = "testHCatExternalDynamicPartitionedTable_" + formatName;
+    generateWriteRecords(NUM_RECORDS, NUM_PARTITIONS, 0);
+    generateDataColumns();
+  }
+
   @Override
   protected Boolean isTableExternal() {
     return true;
   }
 
-  @BeforeClass
-  public static void generateInputData() throws Exception {
-    tableName = "testHCatExternalDynamicPartitionedTable";
-    generateWriteRecords(NUM_RECORDS, NUM_PARTITIONS, 0);
-    generateDataColumns();
-  }
-
   /**
    * Run the external dynamic partitioning test but with single map task
    * @throws Exception

Modified: hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalNonPartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalNonPartitioned.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalNonPartitioned.java (original)
+++ hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalNonPartitioned.java Sun Aug  3 20:48:35 2014
@@ -20,6 +20,11 @@
 package org.apache.hive.hcatalog.mapreduce;
 
 public class TestHCatExternalNonPartitioned extends TestHCatNonPartitioned {
+  public TestHCatExternalNonPartitioned(String formatName, String serdeName,
+      String inputFormatClass, String outputFormatClass)
+      throws Exception {
+    super(formatName, serdeName, inputFormatClass, outputFormatClass);
+  }
 
   @Override
   protected Boolean isTableExternal() {

Modified: hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalPartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalPartitioned.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalPartitioned.java (original)
+++ hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalPartitioned.java Sun Aug  3 20:48:35 2014
@@ -20,6 +20,11 @@
 package org.apache.hive.hcatalog.mapreduce;
 
 public class TestHCatExternalPartitioned extends TestHCatPartitioned {
+  public TestHCatExternalPartitioned(String formatName, String serdeClass,
+      String inputFormatClass, String outputFormatClass)
+      throws Exception {
+    super(formatName, serdeClass, inputFormatClass, outputFormatClass);
+  }
 
   @Override
   protected Boolean isTableExternal() {

Modified: hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableDynamicPartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableDynamicPartitioned.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableDynamicPartitioned.java (original)
+++ hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableDynamicPartitioned.java Sun Aug  3 20:48:35 2014
@@ -20,6 +20,11 @@
 package org.apache.hive.hcatalog.mapreduce;
 
 public class TestHCatMutableDynamicPartitioned extends TestHCatDynamicPartitioned {
+  public TestHCatMutableDynamicPartitioned(String formatName, String serdeClass,
+      String inputFormatClass, String outputFormatClass)
+      throws Exception {
+    super(formatName, serdeClass, inputFormatClass, outputFormatClass);
+  }
 
   @Override
   protected boolean isTableImmutable() {

Modified: hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableNonPartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableNonPartitioned.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableNonPartitioned.java (original)
+++ hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableNonPartitioned.java Sun Aug  3 20:48:35 2014
@@ -20,7 +20,11 @@
 package org.apache.hive.hcatalog.mapreduce;
 
 public class TestHCatMutableNonPartitioned extends TestHCatNonPartitioned {
-
+  public TestHCatMutableNonPartitioned(String formatName, String serdeClass,
+      String inputFormatClass, String outputFormatClass)
+      throws Exception {
+    super(formatName, serdeClass, inputFormatClass, outputFormatClass);
+  }
 
   @Override
   protected boolean isTableImmutable() {

Modified: hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutablePartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutablePartitioned.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutablePartitioned.java (original)
+++ hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutablePartitioned.java Sun Aug  3 20:48:35 2014
@@ -20,6 +20,11 @@
 package org.apache.hive.hcatalog.mapreduce;
 
 public class TestHCatMutablePartitioned extends TestHCatPartitioned {
+  public TestHCatMutablePartitioned(String formatName, String serdeClass,
+      String inputFormatClass, String outputFormatClass)
+      throws Exception {
+    super(formatName, serdeClass, inputFormatClass, outputFormatClass);
+  }
 
   @Override
   protected boolean isTableImmutable() {

Modified: hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatNonPartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatNonPartitioned.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatNonPartitioned.java (original)
+++ hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatNonPartitioned.java Sun Aug  3 20:48:35 2014
@@ -43,16 +43,14 @@ import static org.junit.Assert.assertFal
 import static org.junit.Assert.assertNull;
 
 public class TestHCatNonPartitioned extends HCatMapReduceTest {
-
   private static List<HCatRecord> writeRecords;
   static List<HCatFieldSchema> partitionColumns;
 
-  @BeforeClass
-  public static void oneTimeSetUp() throws Exception {
-
+  public TestHCatNonPartitioned(String formatName, String serdeClass, String inputFormatClass,
+      String outputFormatClass) throws Exception {
+    super(formatName, serdeClass, inputFormatClass, outputFormatClass);
     dbName = null; //test if null dbName works ("default" is used)
-    tableName = "testHCatNonPartitionedTable";
-
+    tableName = "testHCatNonPartitionedTable_" + formatName;
     writeRecords = new ArrayList<HCatRecord>();
 
     for (int i = 0; i < 20; i++) {

Modified: hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitioned.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitioned.java (original)
+++ hive/branches/spark/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitioned.java Sun Aug  3 20:48:35 2014
@@ -49,10 +49,10 @@ public class TestHCatPartitioned extends
   private static List<HCatRecord> writeRecords;
   private static List<HCatFieldSchema> partitionColumns;
 
-  @BeforeClass
-  public static void oneTimeSetUp() throws Exception {
-
-    tableName = "testHCatPartitionedTable";
+  public TestHCatPartitioned(String formatName, String serdeClass, String inputFormatClass,
+      String outputFormatClass) throws Exception {
+    super(formatName, serdeClass, inputFormatClass, outputFormatClass);
+    tableName = "testHCatPartitionedTable_" + formatName;
     writeRecords = new ArrayList<HCatRecord>();
 
     for (int i = 0; i < 20; i++) {
@@ -68,7 +68,6 @@ public class TestHCatPartitioned extends
     partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", serdeConstants.STRING_TYPE_NAME, "")));
   }
 
-
   @Override
   protected List<FieldSchema> getPartitionKeys() {
     List<FieldSchema> fields = new ArrayList<FieldSchema>();

Modified: hive/branches/spark/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java (original)
+++ hive/branches/spark/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java Sun Aug  3 20:48:35 2014
@@ -119,7 +119,6 @@ public class HCatLoader extends HCatBase
       if (!HCatUtil.checkJobContextIfRunningFromBackend(job)) {
         //Combine credentials and credentials from job takes precedence for freshness
         Credentials crd = jobCredentials.get(INNER_SIGNATURE_PREFIX + "_" + signature);
-        crd.addAll(job.getCredentials());
         job.getCredentials().addAll(crd);
       }
     } else {

Modified: hive/branches/spark/hcatalog/streaming/src/test/sit
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/streaming/src/test/sit?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/streaming/src/test/sit (original)
+++ hive/branches/spark/hcatalog/streaming/src/test/sit Sun Aug  3 20:48:35 2014
@@ -33,7 +33,7 @@ for jar in ${HIVE_HOME}/hcatalog/share/h
   CLASSPATH=${CLASSPATH}:$jar
 done
 
-CLASSPATH=${CLASSPATH}:${HADOOP_HOME}/conf
+CLASSPATH=${CLASSPATH}:${HADOOP_HOME}/etc/hadoop
 CLASSPATH=${CLASSPATH}:${HIVE_HOME}/conf
 
 $JAVA_HOME/bin/java -cp ${CLASSPATH} org.apache.hive.hcatalog.streaming.StreamingIntegrationTester $@

Modified: hive/branches/spark/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java (original)
+++ hive/branches/spark/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java Sun Aug  3 20:48:35 2014
@@ -178,7 +178,6 @@ public class MiniHS2 extends AbstractHiv
     hiveConf.setVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, getHost());
     hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT, getBinaryPort());
     hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT, getHttpPort());
-    HiveMetaStore.HMSHandler.resetDefaultDBFlag();
 
     Path scratchDir = new Path(baseDfsDir, "scratch");
     fs.mkdirs(scratchDir);

Modified: hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java (original)
+++ hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java Sun Aug  3 20:48:35 2014
@@ -42,9 +42,9 @@ public class TestMetastoreVersion extend
   @Override
   protected void setUp() throws Exception {
     super.setUp();
-    Field defDb = HiveMetaStore.HMSHandler.class.getDeclaredField("createDefaultDB");
+    Field defDb = HiveMetaStore.HMSHandler.class.getDeclaredField("currentUrl");
     defDb.setAccessible(true);
-    defDb.setBoolean(null, false);
+    defDb.set(null, null);
     hiveConf = new HiveConf(this.getClass());
     System.setProperty("hive.metastore.event.listeners",
         DummyListener.class.getName());

Modified: hive/branches/spark/itests/qtest/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/qtest/testconfiguration.properties?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/itests/qtest/testconfiguration.properties (original)
+++ hive/branches/spark/itests/qtest/testconfiguration.properties Sun Aug  3 20:48:35 2014
@@ -1,5 +1,5 @@
 minimr.query.files=stats_counter_partitioned.q,list_bucket_dml_10.q,input16_cc.q,scriptfile1.q,scriptfile1_win.q,bucket4.q,bucketmapjoin6.q,disable_merge_for_bucketing.q,reduce_deduplicate.q,smb_mapjoin_8.q,join1.q,groupby2.q,bucketizedhiveinputformat.q,bucketmapjoin7.q,optrstat_groupby.q,bucket_num_reducers.q,bucket5.q,load_fs2.q,bucket_num_reducers2.q,infer_bucket_sort_merge.q,infer_bucket_sort_reducers_power_two.q,infer_bucket_sort_dyn_part.q,infer_bucket_sort_bucketed_table.q,infer_bucket_sort_map_operators.q,infer_bucket_sort_num_buckets.q,leftsemijoin_mr.q,schemeAuthority.q,schemeAuthority2.q,truncate_column_buckets.q,remote_script.q,,load_hdfs_file_with_space_in_the_name.q,parallel_orderby.q,import_exported_table.q,stats_counter.q,auto_sortmerge_join_16.q,quotedid_smb.q,file_with_header_footer.q,external_table_with_space_in_location_path.q,root_dir_external_table.q,index_bitmap3.q,ql_rewrite_gbtoidx.q,index_bitmap_auto.q,udf_using.q,empty_dir_in_table.q,temp_table_external.q
 minimr.query.negative.files=cluster_tasklog_retrieval.q,minimr_broken_pipe.q,mapreduce_stack_trace.q,mapreduce_stack_trace_turnoff.q,mapreduce_stack_trace_hadoop20.q,mapreduce_stack_trace_turnoff_hadoop20.q,file_with_header_footer_negative.q,udf_local_resource.q
 minitez.query.files=tez_fsstat.q,mapjoin_decimal.q,tez_join_tests.q,tez_joins_explain.q,mrr.q,tez_dml.q,tez_insert_overwrite_local_directory_1.q,tez_union.q,bucket_map_join_tez1.q,bucket_map_join_tez2.q,tez_schema_evolution.q,tez_join_hash.q
-minitez.query.files.shared=cross_product_check_1.q,cross_product_check_2.q,dynpart_sort_opt_vectorization.q,dynpart_sort_optimization.q,orc_analyze.q,join0.q,join1.q,auto_join0.q,auto_join1.q,bucket2.q,bucket3.q,bucket4.q,count.q,create_merge_compressed.q,cross_join.q,ctas.q,custom_input_output_format.q,disable_merge_for_bucketing.q,enforce_order.q,filter_join_breaktask.q,filter_join_breaktask2.q,groupby1.q,groupby2.q,groupby3.q,having.q,insert1.q,insert_into1.q,insert_into2.q,leftsemijoin.q,limit_pushdown.q,load_dyn_part1.q,load_dyn_part2.q,load_dyn_part3.q,mapjoin_mapjoin.q,mapreduce1.q,mapreduce2.q,merge1.q,merge2.q,metadata_only_queries.q,sample1.q,subquery_in.q,subquery_exists.q,vectorization_15.q,ptf.q,stats_counter.q,stats_noscan_1.q,stats_counter_partitioned.q,union2.q,union3.q,union4.q,union5.q,union6.q,union7.q,union8.q,union9.q,transform1.q,transform2.q,transform_ppr1.q,transform_ppr2.q,script_env_var1.q,script_env_var2.q,script_pipe.q,scriptfile1.q,metadataonly1.q,temp_t
 able.q,vectorized_ptf.q,optimize_nullscan.q,vector_cast_constant.q
+minitez.query.files.shared=orc_merge1.q,orc_merge2.q,orc_merge3.q,orc_merge4.q,alter_merge_orc.q,alter_merge_2_orc.q,alter_merge_stats_orc.q,cross_product_check_1.q,cross_product_check_2.q,dynpart_sort_opt_vectorization.q,dynpart_sort_optimization.q,orc_analyze.q,join0.q,join1.q,auto_join0.q,auto_join1.q,bucket2.q,bucket3.q,bucket4.q,count.q,create_merge_compressed.q,cross_join.q,ctas.q,custom_input_output_format.q,disable_merge_for_bucketing.q,enforce_order.q,filter_join_breaktask.q,filter_join_breaktask2.q,groupby1.q,groupby2.q,groupby3.q,having.q,insert1.q,insert_into1.q,insert_into2.q,leftsemijoin.q,limit_pushdown.q,load_dyn_part1.q,load_dyn_part2.q,load_dyn_part3.q,mapjoin_mapjoin.q,mapreduce1.q,mapreduce2.q,merge1.q,merge2.q,metadata_only_queries.q,sample1.q,subquery_in.q,subquery_exists.q,vectorization_15.q,ptf.q,stats_counter.q,stats_noscan_1.q,stats_counter_partitioned.q,union2.q,union3.q,union4.q,union5.q,union6.q,union7.q,union8.q,union9.q,transform1.q,transform2.q,transf
 orm_ppr1.q,transform_ppr2.q,script_env_var1.q,script_env_var2.q,script_pipe.q,scriptfile1.q,metadataonly1.q,temp_table.q,vectorized_ptf.q,optimize_nullscan.q,vector_cast_constant.q,vector_string_concat.q
 beeline.positive.exclude=add_part_exist.q,alter1.q,alter2.q,alter4.q,alter5.q,alter_rename_partition.q,alter_rename_partition_authorization.q,archive.q,archive_corrupt.q,archive_multi.q,archive_mr_1806.q,archive_multi_mr_1806.q,authorization_1.q,authorization_2.q,authorization_4.q,authorization_5.q,authorization_6.q,authorization_7.q,ba_table1.q,ba_table2.q,ba_table3.q,ba_table_udfs.q,binary_table_bincolserde.q,binary_table_colserde.q,cluster.q,columnarserde_create_shortcut.q,combine2.q,constant_prop.q,create_nested_type.q,create_or_replace_view.q,create_struct_table.q,create_union_table.q,database.q,database_location.q,database_properties.q,ddltime.q,describe_database_json.q,drop_database_removes_partition_dirs.q,escape1.q,escape2.q,exim_00_nonpart_empty.q,exim_01_nonpart.q,exim_02_00_part_empty.q,exim_02_part.q,exim_03_nonpart_over_compat.q,exim_04_all_part.q,exim_04_evolved_parts.q,exim_05_some_part.q,exim_06_one_part.q,exim_07_all_part_over_nonoverlap.q,exim_08_nonpart_rename.q,
 exim_09_part_spec_nonoverlap.q,exim_10_external_managed.q,exim_11_managed_external.q,exim_12_external_location.q,exim_13_managed_location.q,exim_14_managed_location_over_existing.q,exim_15_external_part.q,exim_16_part_external.q,exim_17_part_managed.q,exim_18_part_external.q,exim_19_00_part_external_location.q,exim_19_part_external_location.q,exim_20_part_managed_location.q,exim_21_export_authsuccess.q,exim_22_import_exist_authsuccess.q,exim_23_import_part_authsuccess.q,exim_24_import_nonexist_authsuccess.q,global_limit.q,groupby_complex_types.q,groupby_complex_types_multi_single_reducer.q,index_auth.q,index_auto.q,index_auto_empty.q,index_bitmap.q,index_bitmap1.q,index_bitmap2.q,index_bitmap3.q,index_bitmap_auto.q,index_bitmap_rc.q,index_compact.q,index_compact_1.q,index_compact_2.q,index_compact_3.q,index_stale_partitioned.q,init_file.q,input16.q,input16_cc.q,input46.q,input_columnarserde.q,input_dynamicserde.q,input_lazyserde.q,input_testxpath3.q,input_testxpath4.q,insert2_overwr
 ite_partitions.q,insertexternal1.q,join_thrift.q,lateral_view.q,load_binary_data.q,load_exist_part_authsuccess.q,load_nonpart_authsuccess.q,load_part_authsuccess.q,loadpart_err.q,lock1.q,lock2.q,lock3.q,lock4.q,merge_dynamic_partition.q,multi_insert.q,multi_insert_move_tasks_share_dependencies.q,null_column.q,ppd_clusterby.q,query_with_semi.q,rename_column.q,sample6.q,sample_islocalmode_hook.q,set_processor_namespaces.q,show_tables.q,source.q,split_sample.q,str_to_map.q,transform1.q,udaf_collect_set.q,udaf_context_ngrams.q,udaf_histogram_numeric.q,udaf_ngrams.q,udaf_percentile_approx.q,udf_array.q,udf_bitmap_and.q,udf_bitmap_or.q,udf_explode.q,udf_format_number.q,udf_map.q,udf_map_keys.q,udf_map_values.q,udf_max.q,udf_min.q,udf_named_struct.q,udf_percentile.q,udf_printf.q,udf_sentences.q,udf_sort_array.q,udf_split.q,udf_struct.q,udf_substr.q,udf_translate.q,udf_union.q,udf_xpath.q,udtf_stack.q,view.q,virtual_column.q

Modified: hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java (original)
+++ hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java Sun Aug  3 20:48:35 2014
@@ -1738,10 +1738,10 @@ public class QTestUtil {
         (command != null ? " running " + command : "") + (debugHint != null ? debugHint : ""));
   }
 
+  // for negative tests, which is succeeded.. no need to print the query string
   public void failed(String fname, String debugHint) {
-    String command = SessionState.get() != null ? SessionState.get().getLastCommand() : null;
     Assert.fail("Client Execution was expected to fail, but succeeded with error code 0 " +
-        (command != null ? " running " + command : "") + (debugHint != null ? debugHint : ""));
+        (debugHint != null ? debugHint : ""));
   }
 
   public void failedDiff(int ecode, String fname, String debugHint) {
@@ -1755,7 +1755,9 @@ public class QTestUtil {
     e.printStackTrace();
     System.err.println("Failed query: " + fname);
     System.err.flush();
-    Assert.fail("Unexpected exception" + (command != null ? " running " + command : "") +
+    Assert.fail("Unexpected exception " +
+        org.apache.hadoop.util.StringUtils.stringifyException(e) + "\n" +
+        (command != null ? " running " + command : "") +
         (debugHint != null ? debugHint : ""));
   }
 }

Modified: hive/branches/spark/metastore/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/pom.xml?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/metastore/pom.xml (original)
+++ hive/branches/spark/metastore/pom.xml Sun Aug  3 20:48:35 2014
@@ -92,6 +92,16 @@
       <version>${datanucleus-rdbms.version}</version>
     </dependency>
     <dependency>
+      <groupId>commons-pool</groupId>
+      <artifactId>commons-pool</artifactId>
+      <version>${commons-pool.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>commons-dbcp</groupId>
+      <artifactId>commons-dbcp</artifactId>
+      <version>${commons-dbcp.version}</version>
+    </dependency>
+    <dependency>
       <groupId>javax.jdo</groupId>
       <artifactId>jdo-api</artifactId>
       <version>${jdo-api.version}</version>

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Sun Aug  3 20:48:35 2014
@@ -89,7 +89,6 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeResponse;
 import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleRequest;
 import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleResponse;
-import org.apache.hadoop.hive.metastore.api.GrantRevokeType;
 import org.apache.hadoop.hive.metastore.api.HeartbeatRequest;
 import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest;
 import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
@@ -241,13 +240,12 @@ public class HiveMetaStore extends Thrif
   public static class HMSHandler extends FacebookBase implements
       IHMSHandler {
     public static final Log LOG = HiveMetaStore.LOG;
-    private static boolean createDefaultDB = false;
-    private static boolean defaultRolesCreated = false;
-    private static boolean adminUsersAdded = false;
     private String rawStoreClassName;
     private final HiveConf hiveConf; // stores datastore (jpox) properties,
                                      // right now they come from jpox.properties
 
+    private static String currentUrl;
+
     private Warehouse wh; // hdfs warehouse
     private final ThreadLocal<RawStore> threadLocalMS =
         new ThreadLocal<RawStore>() {
@@ -316,8 +314,6 @@ public class HiveMetaStore extends Thrif
           address, cmd).toString());
     }
 
-    // The next serial number to be assigned
-    private boolean checkForDefaultDb;
     private static int nextSerialNum = 0;
     private static ThreadLocal<Integer> threadLocalId = new ThreadLocal<Integer>() {
       @Override
@@ -350,10 +346,6 @@ public class HiveMetaStore extends Thrif
       return threadLocalId.get();
     }
 
-    public static void resetDefaultDBFlag() {
-      createDefaultDB = false;
-    }
-
     public HMSHandler(String name) throws MetaException {
       super(name);
       hiveConf = new HiveConf(this.getClass());
@@ -387,8 +379,6 @@ public class HiveMetaStore extends Thrif
 
     private boolean init() throws MetaException {
       rawStoreClassName = hiveConf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL);
-      checkForDefaultDb = hiveConf.getBoolean(
-          "hive.metastore.checkForDefaultDb", true);
       initListeners = MetaStoreUtils.getMetaStoreListeners(
           MetaStoreInitListener.class, hiveConf,
           hiveConf.getVar(HiveConf.ConfVars.METASTORE_INIT_HOOKS));
@@ -404,9 +394,12 @@ public class HiveMetaStore extends Thrif
       wh = new Warehouse(hiveConf);
 
       synchronized (HMSHandler.class) {
-        createDefaultDB();
-        createDefaultRoles();
-        addAdminUsers();
+        if (currentUrl == null || !currentUrl.equals(MetaStoreInit.getConnectionURL(hiveConf))) {
+          createDefaultDB();
+          createDefaultRoles();
+          addAdminUsers();
+          currentUrl = MetaStoreInit.getConnectionURL(hiveConf);
+        }
       }
 
       if (hiveConf.getBoolean("hive.metastore.metrics.enabled", false)) {
@@ -517,7 +510,6 @@ public class HiveMetaStore extends Thrif
         db.setOwnerType(PrincipalType.ROLE);
         ms.createDatabase(db);
       }
-      HMSHandler.createDefaultDB = true;
     }
 
     /**
@@ -526,9 +518,6 @@ public class HiveMetaStore extends Thrif
      * @throws MetaException
      */
     private void createDefaultDB() throws MetaException {
-      if (HMSHandler.createDefaultDB || !checkForDefaultDb) {
-        return;
-      }
       try {
         createDefaultDB_core(getMS());
       } catch (InvalidObjectException e) {
@@ -541,11 +530,6 @@ public class HiveMetaStore extends Thrif
 
     private void createDefaultRoles() throws MetaException {
 
-      if(defaultRolesCreated) {
-        LOG.debug("Admin role already created previously.");
-        return;
-      }
-
       RawStore ms = getMS();
       try {
         ms.addRole(ADMIN, ADMIN);
@@ -579,16 +563,10 @@ public class HiveMetaStore extends Thrif
         // Unlikely to be thrown.
         LOG.warn("Failed while granting global privs to admin", e);
       }
-
-      defaultRolesCreated = true;
     }
 
     private void addAdminUsers() throws MetaException {
 
-      if(adminUsersAdded) {
-        LOG.debug("Admin users already added.");
-        return;
-      }
       // now add pre-configured users to admin role
       String userStr = HiveConf.getVar(hiveConf,ConfVars.USERS_IN_ADMIN_ROLE,"").trim();
       if (userStr.isEmpty()) {
@@ -623,7 +601,6 @@ public class HiveMetaStore extends Thrif
           LOG.debug(userName + " already in admin role", e);
         }
       }
-      adminUsersAdded = true;
     }
 
     private void logInfo(String m) {

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Sun Aug  3 20:48:35 2014
@@ -34,6 +34,7 @@ import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
@@ -144,6 +145,8 @@ public class HiveMetaStoreClient impleme
   private String tokenStrForm;
   private final boolean localMetaStore;
 
+  private Map<String, String> currentMetaVars;
+
   // for thrift connects
   private int retries = 5;
   private int retryDelaySeconds = 0;
@@ -171,6 +174,7 @@ public class HiveMetaStoreClient impleme
       // through the network
       client = HiveMetaStore.newHMSHandler("hive client", conf);
       isConnected = true;
+      snapshotActiveConf();
       return;
     }
 
@@ -231,6 +235,26 @@ public class HiveMetaStoreClient impleme
   }
 
   @Override
+  public boolean isCompatibleWith(HiveConf conf) {
+    if (currentMetaVars == null) {
+      return false; // recreate
+    }
+    boolean compatible = true;
+    for (ConfVars oneVar : HiveConf.metaVars) {
+      // Since metaVars are all of different types, use string for comparison
+      String oldVar = currentMetaVars.get(oneVar.varname);
+      String newVar = conf.get(oneVar.varname, "");
+      if (oldVar == null ||
+          (oneVar.isCaseSensitive() ? !oldVar.equals(newVar) : !oldVar.equalsIgnoreCase(newVar))) {
+        LOG.info("Mestastore configuration " + oneVar.varname +
+            " changed from " + oldVar + " to " + newVar);
+        compatible = false;
+      }
+    }
+    return compatible;
+  }
+
+  @Override
   public void reconnect() throws MetaException {
     if (localMetaStore) {
       // For direct DB connections we don't yet support reestablishing connections.
@@ -383,9 +407,19 @@ public class HiveMetaStoreClient impleme
       throw new MetaException("Could not connect to meta store using any of the URIs provided." +
         " Most recent failure: " + StringUtils.stringifyException(tte));
     }
+
+    snapshotActiveConf();
+
     LOG.info("Connected to metastore.");
   }
 
+  private void snapshotActiveConf() {
+    currentMetaVars = new HashMap<String, String>(HiveConf.metaVars.length);
+    for (ConfVars oneVar : HiveConf.metaVars) {
+      currentMetaVars.put(oneVar.varname, conf.get(oneVar.varname, ""));
+    }
+  }
+
   public String getTokenStrForm() throws IOException {
     return tokenStrForm;
    }
@@ -393,6 +427,7 @@ public class HiveMetaStoreClient impleme
   @Override
   public void close() {
     isConnected = false;
+    currentMetaVars = null;
     try {
       if (null != client) {
         client.shutdown();

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=1615452&r1=1615451&r2=1615452&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Sun Aug  3 20:48:35 2014
@@ -19,9 +19,9 @@
 package org.apache.hadoop.hive.metastore;
 
 import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.CompactionType;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
 import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
 import org.apache.hadoop.hive.metastore.api.LockRequest;
 import org.apache.hadoop.hive.metastore.api.LockResponse;
@@ -41,19 +41,14 @@ import org.apache.hadoop.hive.common.Obj
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.CompactionType;
 import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
 import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest;
 import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse;
 import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
 import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
-import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeRequest;
-import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeResponse;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 import org.apache.hadoop.hive.metastore.api.Index;
@@ -61,28 +56,18 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.LockRequest;
-import org.apache.hadoop.hive.metastore.api.LockResponse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
-import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
-import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
-import org.apache.hadoop.hive.metastore.api.TxnOpenException;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.thrift.TException;
 
 /**
  * TODO Unnecessary when the server sides for both dbstore and filestore are
@@ -91,6 +76,12 @@ import org.apache.thrift.TException;
 public interface IMetaStoreClient {
 
   /**
+   * Returns whether current client is convertible with conf or not
+   * @return
+   */
+  public boolean isCompatibleWith(HiveConf conf);
+
+  /**
    *  Tries to reconnect this MetaStoreClient to the MetaStore.
    */
   public void reconnect() throws MetaException;



Mime
View raw message