hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sze...@apache.org
Subject svn commit: r1654861 [1/4] - in /hive/trunk: ./ common/src/java/org/apache/hadoop/hive/conf/ hbase-handler/ itests/src/test/resources/ itests/util/src/main/java/org/apache/hadoop/hive/ql/ ql/src/java/org/apache/hadoop/hive/ql/exec/ ql/src/java/org/apac...
Date Mon, 26 Jan 2015 18:38:34 GMT
Author: szehon
Date: Mon Jan 26 18:38:31 2015
New Revision: 1654861

URL: http://svn.apache.org/r1654861
Log:
HIVE-9448 : Merge spark to trunk 1/23/15 (Szehon, reviewed by Xuefu)

Added:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/LocalSparkJobMonitor.java
      - copied unchanged from r1654414, hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/LocalSparkJobMonitor.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java
      - copied unchanged from r1654414, hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/LocalSparkJobRef.java
      - copied unchanged from r1654414, hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/LocalSparkJobRef.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/RemoteSparkJobRef.java
      - copied unchanged from r1654414, hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/RemoteSparkJobRef.java
    hive/trunk/spark-client/src/test/java/org/apache/hive/spark/client/TestJobHandle.java
      - copied unchanged from r1654414, hive/branches/spark/spark-client/src/test/java/org/apache/hive/spark/client/TestJobHandle.java
Modified:
    hive/trunk/   (props changed)
    hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/trunk/hbase-handler/pom.xml   (props changed)
    hive/trunk/itests/src/test/resources/testconfiguration.properties
    hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/LocalHiveSparkClient.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/session/SparkSession.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/session/SparkSessionImpl.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/session/SparkSessionManagerImpl.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobRef.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobStatus.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/RemoteSparkJobStatus.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SetSparkReducerParallelism.java
    hive/trunk/ql/src/test/results/clientpositive/spark/auto_join0.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/auto_join15.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/auto_join18.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/auto_join18_multi_distinct.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/auto_join20.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/auto_join21.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/auto_join23.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/auto_join26.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/auto_join27.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/auto_join28.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/auto_join29.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/auto_join30.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/auto_join31.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/auto_join32.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/auto_join6.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/auto_join7.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/count.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/cross_join.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/ctas.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby1.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby10.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby11.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby2.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby3.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby4.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby8.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby9.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby_complex_types.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby_multi_insert_common_distinct.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby_position.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/having.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/identity_project_remove_skip.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/innerjoin.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/input14.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/input17.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/input18.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join0.q.java1.7.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join1.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join10.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join11.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join12.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join13.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join14.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join15.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join16.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join17.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join18.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join18_multi_distinct.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join19.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join2.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join20.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join21.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join22.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join23.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join29.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join3.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join30.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join31.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join34.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join35.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join38.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join4.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join40.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join5.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join6.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join7.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join8.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join9.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_cond_pushdown_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_cond_pushdown_2.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_cond_pushdown_3.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_cond_pushdown_4.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual1.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual2.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual3.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual4.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_hive_626.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_merge_multi_expressions.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_merging.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_nullsafe.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_reorder.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_reorder2.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_reorder3.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_thrift.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_vc.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/join_view.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/mapjoin_distinct.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/mapjoin_mapjoin.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/mapreduce1.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/mapreduce2.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/merge1.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/merge2.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/mergejoins.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/mergejoins_mixed.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/multi_insert_mixed.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/multigroupby_singlemr.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/orc_analyze.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out
    hive/trunk/ql/src/test/results/clientpositive/spark/parallel.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/ppd_join.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/ppd_join2.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/ppd_join3.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/ppd_outer_join1.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/ppd_outer_join2.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/ppd_outer_join3.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/ppd_outer_join5.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/ppd_transform.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/ptf.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/ptf_streaming.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/reduce_deduplicate_exclude_join.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/sample8.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoin.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoin_union_remove_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoin_union_remove_2.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoinopt1.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoinopt11.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoinopt12.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoinopt13.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoinopt14.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoinopt16.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoinopt17.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoinopt19.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoinopt2.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoinopt20.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoinopt3.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoinopt4.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoinopt5.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoinopt6.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoinopt7.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoinopt8.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/skewjoinopt9.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/smb_mapjoin_25.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/sort.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/subquery_exists.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/subquery_in.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.java1.7.out
    hive/trunk/ql/src/test/results/clientpositive/spark/tez_join_tests.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/tez_joins_explain.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/transform_ppr1.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/transform_ppr2.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union11.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union14.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union15.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union19.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union28.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union3.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union30.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union33.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union5.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union7.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_ppr.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_remove_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_remove_10.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_remove_15.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_remove_16.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_remove_18.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_remove_19.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_remove_2.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_remove_20.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_remove_24.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_remove_4.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_remove_5.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_remove_6.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_remove_7.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_remove_8.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/union_remove_9.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/vectorization_14.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/vectorization_16.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/vectorization_9.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out
    hive/trunk/spark-client/pom.xml
    hive/trunk/spark-client/src/main/java/org/apache/hive/spark/client/BaseProtocol.java
    hive/trunk/spark-client/src/main/java/org/apache/hive/spark/client/JobHandle.java
    hive/trunk/spark-client/src/main/java/org/apache/hive/spark/client/JobHandleImpl.java
    hive/trunk/spark-client/src/main/java/org/apache/hive/spark/client/RemoteDriver.java
    hive/trunk/spark-client/src/main/java/org/apache/hive/spark/client/SparkClient.java
    hive/trunk/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientFactory.java
    hive/trunk/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java
    hive/trunk/spark-client/src/main/java/org/apache/hive/spark/client/rpc/Rpc.java
    hive/trunk/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcConfiguration.java
    hive/trunk/spark-client/src/test/java/org/apache/hive/spark/client/TestSparkClient.java
    hive/trunk/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestRpc.java

Propchange: hive/trunk/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Jan 26 18:38:31 2015
@@ -1,5 +1,5 @@
 /hive/branches/branch-0.11:1480385,1480458,1481120,1481344,1481346,1481348,1481352,1483872,1505184
 /hive/branches/cbo:1605012-1627125
-/hive/branches/spark:1608589-1651242
+/hive/branches/spark:1608589-1654414
 /hive/branches/tez:1494760-1622766
 /hive/branches/vectorization:1466908-1527856

Modified: hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Mon Jan 26 18:38:31 2015
@@ -1998,12 +1998,27 @@ public class HiveConf extends Configurat
         "hive.tez.exec.inplace.progress",
         true,
         "Updates tez job execution progress in-place in the terminal."),
-    SPARK_CLIENT_FUTURE_TIMEOUT(
-        "hive.spark.client.future.timeout",
-        "60s",
-        new TimeValidator(TimeUnit.SECONDS),
-        "remote spark client JobHandle future timeout value in seconds.")
-    ;
+    SPARK_CLIENT_FUTURE_TIMEOUT("hive.spark.client.future.timeout",
+      "60s", new TimeValidator(TimeUnit.SECONDS),
+      "Timeout for requests from Hive client to remote Spark driver."),
+    SPARK_JOB_MONITOR_TIMEOUT("hive.spark.job.monitor.timeout",
+      "60s", new TimeValidator(TimeUnit.SECONDS),
+      "Timeout for job monitor to get Spark job state."),
+    SPARK_RPC_CLIENT_CONNECT_TIMEOUT("hive.spark.client.connect.timeout",
+      "1000ms", new TimeValidator(TimeUnit.MILLISECONDS),
+      "Timeout for remote Spark driver in connecting back to Hive client."),
+    SPARK_RPC_CLIENT_HANDSHAKE_TIMEOUT("hive.spark.client.server.connect.timeout",
+      "20000ms", new TimeValidator(TimeUnit.MILLISECONDS),
+      "Timeout for handshake between Hive client and remote Spark driver.  Checked by both processes."),
+    SPARK_RPC_SECRET_RANDOM_BITS("hive.spark.client.secret.bits", "256",
+      "Number of bits of randomness in the generated secret for communication between Hive client and remote Spark driver. " +
+      "Rounded down to the nearest multiple of 8."),
+    SPARK_RPC_MAX_THREADS("hive.spark.client.rpc.threads", 8,
+      "Maximum number of threads for remote Spark driver's RPC event loop."),
+    SPARK_RPC_MAX_MESSAGE_SIZE("hive.spark.client.rpc.max.size", 50 * 1024 * 1024,
+      "Maximum message size in bytes for communication between Hive client and remote Spark driver. Default is 50MB."),
+    SPARK_RPC_CHANNEL_LOG_LEVEL("hive.spark.client.channel.log.level", null,
+      "Channel logging level for remote Spark driver.  One of {DEBUG, ERROR, INFO, TRACE, WARN}.");
 
     public final String varname;
     private final String defaultExpr;

Propchange: hive/trunk/hbase-handler/pom.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Jan 26 18:38:31 2015
@@ -1,6 +1,6 @@
 /hive/branches/branch-0.11/hbase-handler/pom.xml:1480385,1480458,1481120,1481344,1481346,1481348,1481352,1483872,1505184
 /hive/branches/cbo/hbase-handler/pom.xml:1605012-1627125
-/hive/branches/spark/hbase-handler/pom.xml:1608589-1651242
+/hive/branches/spark/hbase-handler/pom.xml:1608589-1654414
 /hive/branches/tez/hbase-handler/pom.xml:1494760-1622766
 /hive/branches/vectorization/hbase-handler/pom.xml:1466908-1527856
 /hive/trunk/hbase-handler/pom.xml:1494760-1537575

Modified: hive/trunk/itests/src/test/resources/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/trunk/itests/src/test/resources/testconfiguration.properties?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/itests/src/test/resources/testconfiguration.properties (original)
+++ hive/trunk/itests/src/test/resources/testconfiguration.properties Mon Jan 26 18:38:31 2015
@@ -675,6 +675,7 @@ spark.query.files=add_part_multiple.q, \
   join35.q, \
   join36.q, \
   join37.q, \
+  join38.q, \
   join39.q, \
   join4.q, \
   join40.q, \
@@ -699,9 +700,11 @@ spark.query.files=add_part_multiple.q, \
   join_empty.q, \
   join_filters_overlap.q, \
   join_hive_626.q, \
+  join_literals.q, \
   join_map_ppr.q, \
   join_merge_multi_expressions.q, \
   join_merging.q, \
+  join_nullsafe.q, \
   join_rc.q, \
   join_reorder.q, \
   join_reorder2.q, \
@@ -881,6 +884,7 @@ spark.query.files=add_part_multiple.q, \
   stats_partscan_1_23.q, \
   statsfs.q, \
   subquery_exists.q, \
+  subquery_in.q, \
   subquery_multiinsert.q, \
   table_access_keys_stats.q, \
   temp_table.q, \

Modified: hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
URL: http://svn.apache.org/viewvc/hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java (original)
+++ hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java Mon Jan 26 18:38:31 2015
@@ -78,6 +78,8 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.exec.spark.session.SparkSession;
+import org.apache.hadoop.hive.ql.exec.spark.session.SparkSessionManagerImpl;
 import org.apache.hadoop.hive.ql.lockmgr.zookeeper.CuratorFrameworkSingleton;
 import org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager;
 import org.apache.hadoop.hive.ql.metadata.Hive;
@@ -149,6 +151,7 @@ public class QTestUtil {
   private boolean miniMr = false;
   private String hadoopVer = null;
   private QTestSetup setup = null;
+  private SparkSession sparkSession = null;
   private boolean isSessionStateStarted = false;
   private static final String javaVersion = getJavaVersion();
 
@@ -418,6 +421,15 @@ public class QTestUtil {
     }
 
     setup.tearDown();
+    if (sparkSession != null) {
+      try {
+        SparkSessionManagerImpl.getInstance().closeSession(sparkSession);
+      } catch (Exception ex) {
+        LOG.error("Error closing spark session.", ex);
+      } finally {
+        sparkSession = null;
+      }
+    }
     if (mr != null) {
       mr.shutdown();
       mr = null;
@@ -834,7 +846,7 @@ public class QTestUtil {
     HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
     "org.apache.hadoop.hive.ql.security.DummyAuthenticator");
     Utilities.clearWorkMap();
-    CliSessionState ss = new CliSessionState(conf);
+    CliSessionState ss = createSessionState();
     assert ss != null;
     ss.in = System.in;
 
@@ -864,6 +876,9 @@ public class QTestUtil {
     SessionState oldSs = SessionState.get();
 
     if (oldSs != null && (clusterType == MiniClusterType.tez || clusterType == MiniClusterType.spark)) {
+      sparkSession = oldSs.getSparkSession();
+      ss.setSparkSession(sparkSession);
+      oldSs.setSparkSession(null);
       oldSs.close();
     }
 
@@ -882,6 +897,29 @@ public class QTestUtil {
     return outf.getAbsolutePath();
   }
 
+  private CliSessionState createSessionState() {
+   return new CliSessionState(conf) {
+      public void setSparkSession(SparkSession sparkSession) {
+        super.setSparkSession(sparkSession);
+        if (sparkSession != null) {
+          try {
+            // Wait a little for cluster to init, at most 4 minutes
+            long endTime = System.currentTimeMillis() + 240000;
+            while (sparkSession.getMemoryAndCores().getSecond() <= 1) {
+              if (System.currentTimeMillis() >= endTime) {
+                LOG.error("Timed out waiting for Spark cluster to init");
+                break;
+              }
+              Thread.sleep(100);
+            }
+          } catch (Exception e) {
+            LOG.error(e);
+          }
+        }
+      }
+    };
+  }
+
   private CliSessionState startSessionState()
       throws IOException {
 
@@ -890,7 +928,7 @@ public class QTestUtil {
 
     String execEngine = conf.get("hive.execution.engine");
     conf.set("hive.execution.engine", "mr");
-    CliSessionState ss = new CliSessionState(conf);
+    CliSessionState ss = createSessionState();
     assert ss != null;
     ss.in = System.in;
     ss.out = System.out;
@@ -898,6 +936,9 @@ public class QTestUtil {
 
     SessionState oldSs = SessionState.get();
     if (oldSs != null && (clusterType == MiniClusterType.tez || clusterType == MiniClusterType.spark)) {
+      sparkSession = oldSs.getSparkSession();
+      ss.setSparkSession(sparkSession);
+      oldSs.setSparkSession(null);
       oldSs.close();
     }
     if (oldSs != null && oldSs.out != null && oldSs.out != System.out) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Mon Jan 26 18:38:31 2015
@@ -31,7 +31,6 @@ import java.io.ByteArrayOutputStream;
 import java.io.DataInput;
 import java.io.EOFException;
 import java.io.File;
-import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
@@ -237,8 +236,13 @@ public final class Utilities {
     // prevent instantiation
   }
 
-  private static Map<Path, BaseWork> gWorkMap = Collections
-      .synchronizedMap(new HashMap<Path, BaseWork>());
+  private static ThreadLocal<Map<Path, BaseWork>> gWorkMap =
+      new ThreadLocal<Map<Path, BaseWork>>() {
+    protected Map<Path, BaseWork> initialValue() {
+      return new HashMap<Path, BaseWork>();
+    }
+  };
+
   private static final String CLASS_NAME = Utilities.class.getName();
   private static final Log LOG = LogFactory.getLog(CLASS_NAME);
 
@@ -345,7 +349,7 @@ public final class Utilities {
    */
   public static void setBaseWork(Configuration conf, String name, BaseWork work) {
     Path path = getPlanPath(conf, name);
-    gWorkMap.put(path, work);
+    gWorkMap.get().put(path, work);
   }
 
   /**
@@ -357,15 +361,14 @@ public final class Utilities {
    * @throws RuntimeException if the configuration files are not proper or if plan can not be loaded
    */
   private static BaseWork getBaseWork(Configuration conf, String name) {
-    BaseWork gWork = null;
     Path path = null;
     InputStream in = null;
     try {
       path = getPlanPath(conf, name);
       LOG.info("PLAN PATH = " + path);
       assert path != null;
-      if (!gWorkMap.containsKey(path)
-        || HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
+      BaseWork gWork = gWorkMap.get().get(path);
+      if (gWork == null) {
         Path localPath;
         if (conf.getBoolean("mapreduce.task.uberized", false) && name.equals(REDUCE_PLAN_NAME)) {
           localPath = new Path(name);
@@ -415,10 +418,9 @@ public final class Utilities {
         } else if (name.contains(MERGE_PLAN_NAME)) {
           gWork = deserializePlan(in, MapWork.class, conf);
         }
-        gWorkMap.put(path, gWork);
-      } else {
+        gWorkMap.get().put(path, gWork);
+      } else if (LOG.isDebugEnabled()) {
         LOG.debug("Found plan in cache for name: " + name);
-        gWork = gWorkMap.get(path);
       }
       return gWork;
     } catch (FileNotFoundException fnf) {
@@ -710,7 +712,7 @@ public final class Utilities {
       }
 
       // Cache the plan in this process
-      gWorkMap.put(planPath, w);
+      gWorkMap.get().put(planPath, w);
       return planPath;
     } catch (Exception e) {
       String msg = "Error caching " + name + ": " + e;
@@ -3629,15 +3631,15 @@ public final class Utilities {
     Path mapPath = getPlanPath(conf, MAP_PLAN_NAME);
     Path reducePath = getPlanPath(conf, REDUCE_PLAN_NAME);
     if (mapPath != null) {
-      gWorkMap.remove(mapPath);
+      gWorkMap.get().remove(mapPath);
     }
     if (reducePath != null) {
-      gWorkMap.remove(reducePath);
+      gWorkMap.get().remove(reducePath);
     }
   }
 
   public static void clearWorkMap() {
-    gWorkMap.clear();
+    gWorkMap.get().clear();
   }
 
   /**

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java Mon Jan 26 18:38:31 2015
@@ -21,14 +21,18 @@ package org.apache.hadoop.hive.ql.exec.s
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
+import java.util.Arrays;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.compress.utils.CharsetNames;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hive.spark.client.rpc.RpcConfiguration;
 import org.apache.spark.SparkConf;
 import org.apache.spark.SparkException;
 
@@ -95,7 +99,8 @@ public class HiveSparkClientFactory {
       }
     }
 
-    // load properties from hive configurations.
+    // load properties from hive configurations, including both spark.* properties
+    // and properties for remote driver RPC.
     for (Map.Entry<String, String> entry : hiveConf) {
       String propertyName = entry.getKey();
       if (propertyName.startsWith("spark")) {
@@ -105,6 +110,13 @@ public class HiveSparkClientFactory {
           "load spark configuration from hive configuration (%s -> %s).",
           propertyName, value));
       }
+      if (RpcConfiguration.HIVE_SPARK_RSC_CONFIGS.contains(propertyName)) {
+        String value = RpcConfiguration.getValue(hiveConf, propertyName);
+        sparkConf.put(propertyName, value);
+        LOG.info(String.format(
+          "load RPC configuration from hive configuration (%s -> %s).",
+          propertyName, value));
+      }
     }
 
     return sparkConf;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/LocalHiveSparkClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/LocalHiveSparkClient.java?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/LocalHiveSparkClient.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/LocalHiveSparkClient.java Mon Jan 26 18:38:31 2015
@@ -32,6 +32,7 @@ import org.apache.hadoop.hive.ql.DriverC
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.exec.spark.status.SparkJobRef;
 import org.apache.hadoop.hive.ql.exec.spark.status.impl.JobMetricsListener;
+import org.apache.hadoop.hive.ql.exec.spark.status.impl.LocalSparkJobRef;
 import org.apache.hadoop.hive.ql.exec.spark.status.impl.LocalSparkJobStatus;
 import org.apache.hadoop.hive.ql.io.HiveKey;
 import org.apache.hadoop.hive.ql.plan.BaseWork;
@@ -136,7 +137,7 @@ public class LocalHiveSparkClient implem
     int jobId = future.jobIds().get(0);
     LocalSparkJobStatus sparkJobStatus = new LocalSparkJobStatus(
       sc, jobId, jobMetricsListener, sparkCounters, plan.getCachedRDDIds(), future);
-    return new SparkJobRef(Integer.toString(jobId), sparkJobStatus);
+    return new LocalSparkJobRef(Integer.toString(jobId), hiveConf,  sparkJobStatus, sc);
   }
 
   /**

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java Mon Jan 26 18:38:31 2015
@@ -35,6 +35,7 @@ import org.apache.hadoop.hive.ql.Context
 import org.apache.hadoop.hive.ql.DriverContext;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.exec.spark.status.SparkJobRef;
+import org.apache.hadoop.hive.ql.exec.spark.status.impl.RemoteSparkJobRef;
 import org.apache.hadoop.hive.ql.exec.spark.status.impl.RemoteSparkJobStatus;
 import org.apache.hadoop.hive.ql.io.HiveKey;
 import org.apache.hadoop.hive.ql.plan.BaseWork;
@@ -76,10 +77,13 @@ public class RemoteHiveSparkClient imple
   private transient List<String> localJars = new ArrayList<String>();
   private transient List<String> localFiles = new ArrayList<String>();
 
+  private final transient long sparkClientTimtout;
+
   RemoteHiveSparkClient(HiveConf hiveConf, Map<String, String> conf) throws IOException, SparkException {
     this.hiveConf = hiveConf;
     sparkConf = HiveSparkClientFactory.generateSparkConf(conf);
-    remoteClient = SparkClientFactory.createClient(conf);
+    remoteClient = SparkClientFactory.createClient(conf, hiveConf);
+    sparkClientTimtout = hiveConf.getTimeVar(HiveConf.ConfVars.SPARK_CLIENT_FUTURE_TIMEOUT, TimeUnit.SECONDS);
   }
 
   @Override
@@ -89,17 +93,14 @@ public class RemoteHiveSparkClient imple
 
   @Override
   public int getExecutorCount() throws Exception {
-    long timeout = hiveConf.getTimeVar(HiveConf.ConfVars.SPARK_CLIENT_FUTURE_TIMEOUT, TimeUnit.SECONDS);
     Future<Integer> handler = remoteClient.getExecutorCount();
-    return handler.get(timeout, TimeUnit.SECONDS).intValue();
+    return handler.get(sparkClientTimtout, TimeUnit.SECONDS).intValue();
   }
 
   @Override
   public int getDefaultParallelism() throws Exception {
-    long timeout = hiveConf.getTimeVar(
-        HiveConf.ConfVars.SPARK_CLIENT_FUTURE_TIMEOUT, TimeUnit.SECONDS);
     Future<Integer> handler = remoteClient.getDefaultParallelism();
-    return handler.get(timeout, TimeUnit.SECONDS);
+    return handler.get(sparkClientTimtout, TimeUnit.SECONDS);
   }
 
   @Override
@@ -118,11 +119,10 @@ public class RemoteHiveSparkClient imple
     byte[] scratchDirBytes = KryoSerializer.serialize(emptyScratchDir);
     byte[] sparkWorkBytes = KryoSerializer.serialize(sparkWork);
 
-    long timeout = hiveConf.getTimeVar(HiveConf.ConfVars.SPARK_CLIENT_FUTURE_TIMEOUT, TimeUnit.SECONDS);
-
-    JobHandle<Serializable> jobHandle = remoteClient.submit(
-        new JobStatusJob(jobConfBytes, scratchDirBytes, sparkWorkBytes));
-    return new SparkJobRef(jobHandle.getClientJobId(), new RemoteSparkJobStatus(remoteClient, jobHandle, timeout));
+    JobStatusJob job = new JobStatusJob(jobConfBytes, scratchDirBytes, sparkWorkBytes);
+    JobHandle<Serializable> jobHandle = remoteClient.submit(job);
+    RemoteSparkJobStatus sparkJobStatus = new RemoteSparkJobStatus(remoteClient, jobHandle, sparkClientTimtout);
+    return new RemoteSparkJobRef(hiveConf, jobHandle, sparkJobStatus);
   }
 
   private void refreshLocalResources(SparkWork sparkWork, HiveConf conf) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java Mon Jan 26 18:38:31 2015
@@ -49,7 +49,7 @@ import org.apache.hadoop.hive.ql.exec.sp
 import org.apache.hadoop.hive.ql.exec.spark.session.SparkSession;
 import org.apache.hadoop.hive.ql.exec.spark.session.SparkSessionManager;
 import org.apache.hadoop.hive.ql.exec.spark.session.SparkSessionManagerImpl;
-import org.apache.hadoop.hive.ql.exec.spark.status.SparkJobMonitor;
+import org.apache.hadoop.hive.ql.exec.spark.status.LocalSparkJobMonitor;
 import org.apache.hadoop.hive.ql.exec.spark.status.SparkJobRef;
 import org.apache.hadoop.hive.ql.exec.spark.status.SparkJobStatus;
 import org.apache.hadoop.hive.ql.log.PerfLogger;
@@ -65,7 +65,6 @@ import org.apache.hadoop.hive.ql.plan.Op
 import org.apache.hadoop.hive.ql.plan.ReduceWork;
 import org.apache.hadoop.hive.ql.plan.SparkWork;
 import org.apache.hadoop.hive.ql.plan.StatsWork;
-import org.apache.hadoop.hive.ql.plan.UnionWork;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.apache.hadoop.hive.ql.stats.StatsFactory;
 import org.apache.hadoop.util.StringUtils;
@@ -102,19 +101,20 @@ public class SparkTask extends Task<Spar
       SparkJobRef jobRef = sparkSession.submit(driverContext, sparkWork);
       perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_SUBMIT_JOB);
 
+      rc = jobRef.monitorJob();
       SparkJobStatus sparkJobStatus = jobRef.getSparkJobStatus();
-      if (sparkJobStatus != null) {
-        SparkJobMonitor monitor = new SparkJobMonitor(sparkJobStatus);
-        rc = monitor.startMonitor();
-        // for RSC, we should get the counters after job has finished
+      if (rc == 0) {
         sparkCounters = sparkJobStatus.getCounter();
+        // for RSC, we should get the counters after job has finished
         SparkStatistics sparkStatistics = sparkJobStatus.getSparkStatistics();
         if (LOG.isInfoEnabled() && sparkStatistics != null) {
           LOG.info(String.format("=====Spark Job[%s] statistics=====", jobRef.getJobId()));
           logSparkStatistic(sparkStatistics);
         }
-        sparkJobStatus.cleanup();
+      } else if (rc == 2) { // Cancel job if the monitor found job submission timeout.
+        jobRef.cancelJob();
       }
+      sparkJobStatus.cleanup();
     } catch (Exception e) {
       String msg = "Failed to execute spark task, with exception '" + Utilities.getNameMessage(e) + "'";
 
@@ -124,6 +124,7 @@ public class SparkTask extends Task<Spar
       LOG.error(msg, e);
       rc = 1;
     } finally {
+      Utilities.clearWork(conf);
       if (sparkSession != null && sparkSessionManager != null) {
         rc = close(rc);
         try {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/session/SparkSession.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/session/SparkSession.java?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/session/SparkSession.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/session/SparkSession.java Mon Jan 26 18:38:31 2015
@@ -17,14 +17,13 @@
  */
 package org.apache.hadoop.hive.ql.exec.spark.session;
 
+import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.DriverContext;
 import org.apache.hadoop.hive.ql.exec.spark.status.SparkJobRef;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.SparkWork;
 
-import scala.Tuple2;
-
 public interface SparkSession {
   /**
    * Initializes a Spark session for DAG execution.
@@ -44,10 +43,10 @@ public interface SparkSession {
    * Get Spark shuffle memory per task, and total number of cores. This
    * information can be used to estimate how many reducers a task can have.
    *
-   * @return a tuple, the first element is the shuffle memory per task in bytes,
+   * @return an object pair, the first element is the shuffle memory per task in bytes,
    *  the second element is the number of total cores usable by the client
    */
-  Tuple2<Long, Integer> getMemoryAndCores() throws Exception;
+  ObjectPair<Long, Integer> getMemoryAndCores() throws Exception;
 
   /**
    * @return true if the session is open and ready to submit jobs.

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/session/SparkSessionImpl.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/session/SparkSessionImpl.java?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/session/SparkSessionImpl.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/session/SparkSessionImpl.java Mon Jan 26 18:38:31 2015
@@ -22,6 +22,7 @@ import java.util.UUID;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.DriverContext;
 import org.apache.hadoop.hive.ql.exec.spark.HiveSparkClient;
@@ -32,8 +33,6 @@ import org.apache.hadoop.hive.ql.plan.Sp
 import org.apache.spark.SparkConf;
 import org.apache.spark.util.Utils;
 
-import scala.Tuple2;
-
 import com.google.common.base.Preconditions;
 
 public class SparkSessionImpl implements SparkSession {
@@ -66,12 +65,12 @@ public class SparkSessionImpl implements
   }
 
   @Override
-  public Tuple2<Long, Integer> getMemoryAndCores() throws Exception {
+  public ObjectPair<Long, Integer> getMemoryAndCores() throws Exception {
     SparkConf sparkConf = hiveSparkClient.getSparkConf();
     int numExecutors = hiveSparkClient.getExecutorCount();
     // at start-up, we may be unable to get number of executors
     if (numExecutors <= 0) {
-      return new Tuple2<Long, Integer>(-1L, -1);
+      return new ObjectPair<Long, Integer>(-1L, -1);
     }
     int executorMemoryInMB = Utils.memoryStringToMb(
         sparkConf.get("spark.executor.memory", "512m"));
@@ -94,7 +93,7 @@ public class SparkSessionImpl implements
     LOG.info("Spark cluster current has executors: " + numExecutors
         + ", total cores: " + totalCores + ", memory per executor: "
         + executorMemoryInMB + "M, memoryFraction: " + memoryFraction);
-    return new Tuple2<Long, Integer>(Long.valueOf(memoryPerTaskInBytes),
+    return new ObjectPair<Long, Integer>(Long.valueOf(memoryPerTaskInBytes),
         Integer.valueOf(totalCores));
   }
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/session/SparkSessionManagerImpl.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/session/SparkSessionManagerImpl.java?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/session/SparkSessionManagerImpl.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/session/SparkSessionManagerImpl.java Mon Jan 26 18:38:31 2015
@@ -97,28 +97,16 @@ public class SparkSessionManagerImpl imp
    *   - create a new session and add it to the list.
    */
   @Override
-  public SparkSession getSession(SparkSession existingSession, HiveConf conf,
-      boolean doOpen) throws HiveException {
+  public SparkSession getSession(SparkSession existingSession, HiveConf conf, boolean doOpen)
+      throws HiveException {
     setup(conf);
 
     if (existingSession != null) {
-      if (canReuseSession(existingSession, conf)) {
-        // Open the session if it is closed.
-        if (!existingSession.isOpen() && doOpen) {
-          existingSession.open(conf);
-        }
-
-        Preconditions.checkState(createdSessions.contains(existingSession));
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(String.format("Existing session (%s) is reused.",
-              existingSession.getSessionId()));
-        }
-        return existingSession;
-      } else {
-        // Close the session, as the client is holding onto a session that can't be used
-        // by the client anymore.
-        closeSession(existingSession);
+      // Open the session if it is closed.
+      if (!existingSession.isOpen() && doOpen) {
+	existingSession.open(conf);
       }
+      return existingSession;
     }
 
     SparkSession sparkSession = new SparkSessionImpl();
@@ -134,24 +122,6 @@ public class SparkSessionManagerImpl imp
     return sparkSession;
   }
 
-  /**
-   * Currently we only match the userNames in existingSession conf and given conf.
-   */
-  private boolean canReuseSession(SparkSession existingSession, HiveConf conf) throws HiveException {
-    try {
-      UserGroupInformation newUgi = Utils.getUGI();
-      String newUserName = newUgi.getShortUserName();
-
-      // TODO this we need to store the session username somewhere else as getUGIForConf never used the conf
-      UserGroupInformation ugiInSession = Utils.getUGI();
-      String userNameInSession = ugiInSession.getShortUserName();
-
-      return newUserName.equals(userNameInSession);
-    } catch (Exception ex) {
-      throw new HiveException("Failed to get user info from HiveConf.", ex);
-    }
-  }
-
   @Override
   public void returnSession(SparkSession sparkSession) throws HiveException {
     // In this particular SparkSessionManager implementation, we don't recycle

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java Mon Jan 26 18:38:31 2015
@@ -15,8 +15,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.hadoop.hive.ql.exec.spark.status;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.spark.status.impl.LocalSparkJobStatus;
+import org.apache.hadoop.hive.ql.log.PerfLogger;
+import org.apache.hadoop.hive.ql.session.SessionState;
+
 import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.HashSet;
@@ -24,141 +32,29 @@ import java.util.Map;
 import java.util.Set;
 import java.util.SortedSet;
 import java.util.TreeSet;
+import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.log.PerfLogger;
-import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
-import org.apache.spark.JobExecutionStatus;
+abstract class SparkJobMonitor {
 
-/**
- * SparkJobMonitor monitor a single Spark job status in a loop until job finished/failed/killed.
- * It print current job status to console and sleep current thread between monitor interval.
- */
-public class SparkJobMonitor {
-
-  private static final String CLASS_NAME = SparkJobMonitor.class.getName();
-  private static final Log LOG = LogFactory.getLog(CLASS_NAME);
+  protected static final String CLASS_NAME = SparkJobMonitor.class.getName();
+  protected static final Log LOG = LogFactory.getLog(CLASS_NAME);
+  protected static SessionState.LogHelper console = new SessionState.LogHelper(LOG);
+  protected final PerfLogger perfLogger = PerfLogger.getPerfLogger();
+  protected final int checkInterval = 1000;
+  protected final long monitorTimeoutInteval;
 
-  private transient LogHelper console;
-  private final PerfLogger perfLogger = PerfLogger.getPerfLogger();
-  private final int checkInterval = 1000;
+  private Set<String> completed = new HashSet<String>();
   private final int printInterval = 3000;
   private long lastPrintTime;
-  private Set<String> completed;
 
-  private SparkJobStatus sparkJobStatus;
-
-  public SparkJobMonitor(SparkJobStatus sparkJobStatus) {
-    this.sparkJobStatus = sparkJobStatus;
-    console = new LogHelper(LOG);
+  protected SparkJobMonitor(HiveConf hiveConf) {
+    monitorTimeoutInteval = hiveConf.getTimeVar(HiveConf.ConfVars.SPARK_JOB_MONITOR_TIMEOUT, TimeUnit.SECONDS);
   }
 
-  public int startMonitor() {
-    completed = new HashSet<String>();
-
-    boolean running = false;
-    boolean done = false;
-    int rc = 0;
-    JobExecutionStatus lastState = null;
-    Map<String, SparkStageProgress> lastProgressMap = null;
-    long startTime = -1;
-
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_RUN_JOB);
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_SUBMIT_TO_RUNNING);
-
-    while (true) {
-      JobExecutionStatus state = sparkJobStatus.getState();
-      try {
-        if (LOG.isDebugEnabled()) {
-          console.printInfo("state = " + state);
-        }
-        if (state != null && state != JobExecutionStatus.UNKNOWN
-          && (state != lastState || state == JobExecutionStatus.RUNNING)) {
-          lastState = state;
-          Map<String, SparkStageProgress> progressMap = sparkJobStatus.getSparkStageProgress();
-
-          switch (state) {
-          case RUNNING:
-            if (!running) {
-              perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_SUBMIT_TO_RUNNING);
-              // print job stages.
-              console.printInfo("\nQuery Hive on Spark job["
-                + sparkJobStatus.getJobId() + "] stages:");
-              for (int stageId : sparkJobStatus.getStageIds()) {
-                console.printInfo(Integer.toString(stageId));
-              }
-
-              console.printInfo("\nStatus: Running (Hive on Spark job["
-                + sparkJobStatus.getJobId() + "])");
-              startTime = System.currentTimeMillis();
-              running = true;
-
-              console.printInfo("Job Progress Format\nCurrentTime StageId_StageAttemptId: "
-                + "SucceededTasksCount(+RunningTasksCount-FailedTasksCount)/TotalTasksCount [StageCost]");
-            }
-
+  public abstract int startMonitor();
 
-            printStatus(progressMap, lastProgressMap);
-            lastProgressMap = progressMap;
-            break;
-          case SUCCEEDED:
-            printStatus(progressMap, lastProgressMap);
-            lastProgressMap = progressMap;
-            if (startTime < 0) {
-              console.printInfo("Status: Finished successfully within a check interval.");
-            } else {
-              double duration = (System.currentTimeMillis() - startTime) / 1000.0;
-              console.printInfo("Status: Finished successfully in "
-                + String.format("%.2f seconds", duration));
-            }
-            running = false;
-            done = true;
-            break;
-          case FAILED:
-            console.printError("Status: Failed");
-            running = false;
-            done = true;
-            rc = 2;
-            break;
-          case UNKNOWN:
-            console.printError("Status: Unknown");
-            running = false;
-            done = true;
-            rc = 2;
-            break;
-          }
-        }
-        if (!done) {
-          Thread.sleep(checkInterval);
-        }
-      } catch (Exception e) {
-        String msg = " with exception '" + Utilities.getNameMessage(e) + "'";
-        if (state == null || state.equals(JobExecutionStatus.UNKNOWN)) {
-          msg = "Job Submission failed" + msg;
-        } else {
-          msg = "Ended Job = " + sparkJobStatus.getJobId() + msg;
-        }
-
-        // Has to use full name to make sure it does not conflict with
-        // org.apache.commons.lang.StringUtils
-        LOG.error(msg, e);
-        console.printError(msg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
-        rc = 1;
-      } finally {
-        if (done) {
-          break;
-        }
-      }
-    }
-
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_RUN_JOB);
-    return rc;
-  }
-
-  private void printStatus(Map<String, SparkStageProgress> progressMap,
-                           Map<String, SparkStageProgress> lastProgressMap) {
+  protected void printStatus(Map<String, SparkStageProgress> progressMap,
+    Map<String, SparkStageProgress> lastProgressMap) {
 
     // do not print duplicate status while still in middle of print interval.
     boolean isDuplicateState = isSameAsPreviousProgress(progressMap, lastProgressMap);
@@ -209,13 +105,13 @@ public class SparkJobMonitor {
           if (failed > 0) {
             /* tasks finished but some failed */
             reportBuffer.append(
-                String.format(
-                    "%s: %d(-%d)/%d Finished with failed tasks\t",
-                    stageName, complete, failed, total));
+              String.format(
+                "%s: %d(-%d)/%d Finished with failed tasks\t",
+                stageName, complete, failed, total));
           } else {
             if (complete == total) {
               reportBuffer.append(
-                  String.format("%s: %d/%d Finished\t", stageName, complete, total));
+                String.format("%s: %d/%d Finished\t", stageName, complete, total));
             } else {
               reportBuffer.append(String.format("%s: %d/%d\t", stageName, complete, total));
             }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobRef.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobRef.java?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobRef.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobRef.java Mon Jan 26 18:38:31 2015
@@ -17,36 +17,13 @@
  */
 package org.apache.hadoop.hive.ql.exec.spark.status;
 
-public class SparkJobRef {
+public interface SparkJobRef {
 
-  private String jobId;
+  public String getJobId();
 
-  private SparkJobStatus sparkJobStatus;
+  public SparkJobStatus getSparkJobStatus();
 
-  public SparkJobRef() { }
+  public boolean cancelJob();
 
-  public SparkJobRef(String jobId) {
-    this.jobId = jobId;
-  }
-
-  public SparkJobRef(String jobId, SparkJobStatus sparkJobStatus) {
-    this.jobId = jobId;
-    this.sparkJobStatus = sparkJobStatus;
-  }
-
-  public String getJobId() {
-    return jobId;
-  }
-
-  public void setJobId(String jobId) {
-    this.jobId = jobId;
-  }
-
-  public SparkJobStatus getSparkJobStatus() {
-    return sparkJobStatus;
-  }
-
-  public void setSparkJobStatus(SparkJobStatus sparkJobStatus) {
-    this.sparkJobStatus = sparkJobStatus;
-  }
+  public int monitorJob();
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobStatus.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobStatus.java?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobStatus.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobStatus.java Mon Jan 26 18:38:31 2015
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hive.ql.exec.spark.status;
 
 import org.apache.hadoop.hive.ql.exec.spark.Statistic.SparkStatistics;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hive.spark.counter.SparkCounters;
 import org.apache.spark.JobExecutionStatus;
 
@@ -30,11 +31,11 @@ public interface SparkJobStatus {
 
   int getJobId();
 
-  JobExecutionStatus getState();
+  JobExecutionStatus getState() throws HiveException;
 
-  int[] getStageIds();
+  int[] getStageIds() throws HiveException;
 
-  Map<String, SparkStageProgress> getSparkStageProgress();
+  Map<String, SparkStageProgress> getSparkStageProgress() throws HiveException;
 
   SparkCounters getCounter();
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/RemoteSparkJobStatus.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/RemoteSparkJobStatus.java?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/RemoteSparkJobStatus.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/RemoteSparkJobStatus.java Mon Jan 26 18:38:31 2015
@@ -22,6 +22,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.spark.Statistic.SparkStatistics;
 import org.apache.hadoop.hive.ql.exec.spark.Statistic.SparkStatisticsBuilder;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hive.spark.client.MetricsCollection;
 import org.apache.hive.spark.client.metrics.Metrics;
 import org.apache.hive.spark.client.metrics.ShuffleReadMetrics;
@@ -42,6 +43,7 @@ import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 
 /**
@@ -49,11 +51,6 @@ import java.util.concurrent.TimeUnit;
  */
 public class RemoteSparkJobStatus implements SparkJobStatus {
   private static final Log LOG = LogFactory.getLog(RemoteSparkJobStatus.class.getName());
-  // time (in milliseconds) to wait for a spark job to be submitted on remote cluster
-  // after this period, we decide the job submission has failed so that client won't hang forever
-  private static final long WAIT_SUBMISSION_TIMEOUT = 30000;
-  // remember when the monitor starts
-  private final long startTime;
   private final SparkClient sparkClient;
   private final JobHandle<Serializable> jobHandle;
   private final transient long sparkClientTimeoutInSeconds;
@@ -62,7 +59,6 @@ public class RemoteSparkJobStatus implem
     this.sparkClient = sparkClient;
     this.jobHandle = jobHandle;
     this.sparkClientTimeoutInSeconds = timeoutInSeconds;
-    startTime = System.nanoTime();
   }
 
   @Override
@@ -71,19 +67,19 @@ public class RemoteSparkJobStatus implem
   }
 
   @Override
-  public JobExecutionStatus getState() {
+  public JobExecutionStatus getState() throws HiveException {
     SparkJobInfo sparkJobInfo = getSparkJobInfo();
     return sparkJobInfo != null ? sparkJobInfo.status() : null;
   }
 
   @Override
-  public int[] getStageIds() {
+  public int[] getStageIds() throws HiveException {
     SparkJobInfo sparkJobInfo = getSparkJobInfo();
     return sparkJobInfo != null ? sparkJobInfo.stageIds() : new int[0];
   }
 
   @Override
-  public Map<String, SparkStageProgress> getSparkStageProgress() {
+  public Map<String, SparkStageProgress> getSparkStageProgress() throws HiveException {
     Map<String, SparkStageProgress> stageProgresses = new HashMap<String, SparkStageProgress>();
     for (int stageId : getStageIds()) {
       SparkStageInfo sparkStageInfo = getSparkStageInfo(stageId);
@@ -131,32 +127,24 @@ public class RemoteSparkJobStatus implem
 
   }
 
-  private SparkJobInfo getSparkJobInfo() {
+  private SparkJobInfo getSparkJobInfo() throws HiveException {
     Integer sparkJobId = jobHandle.getSparkJobIds().size() == 1
       ? jobHandle.getSparkJobIds().get(0) : null;
     if (sparkJobId == null) {
-      long duration = TimeUnit.MILLISECONDS.convert(
-          System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
-      if (duration <= WAIT_SUBMISSION_TIMEOUT) {
-        return null;
-      } else {
-        LOG.info("Job hasn't been submitted after " + duration / 1000 + "s. Aborting it.");
-        jobHandle.cancel(false);
-        return getDefaultJobInfo(sparkJobId, JobExecutionStatus.FAILED);
-      }
+      return null;
     }
-    JobHandle<SparkJobInfo> getJobInfo = sparkClient.submit(
+    Future<SparkJobInfo> getJobInfo = sparkClient.run(
         new GetJobInfoJob(jobHandle.getClientJobId(), sparkJobId));
     try {
       return getJobInfo.get(sparkClientTimeoutInSeconds, TimeUnit.SECONDS);
-    } catch (Throwable t) {
-      LOG.warn("Error getting job info", t);
-      return null;
+    } catch (Exception e) {
+      LOG.warn("Failed to get job info.", e);
+      throw new HiveException(e);
     }
   }
 
   private SparkStageInfo getSparkStageInfo(int stageId) {
-    JobHandle<SparkStageInfo> getStageInfo = sparkClient.submit(new GetStageInfoJob(stageId));
+    Future<SparkStageInfo> getStageInfo = sparkClient.run(new GetStageInfoJob(stageId));
     try {
       return getStageInfo.get(sparkClientTimeoutInSeconds, TimeUnit.SECONDS);
     } catch (Throwable t) {
@@ -165,6 +153,10 @@ public class RemoteSparkJobStatus implem
     }
   }
 
+  public JobHandle.State getRemoteJobState() {
+    return jobHandle.getState();
+  }
+
   private static class GetJobInfoJob implements Job<SparkJobInfo> {
     private final String clientJobId;
     private final int sparkJobId;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SetSparkReducerParallelism.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SetSparkReducerParallelism.java?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SetSparkReducerParallelism.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SetSparkReducerParallelism.java Mon Jan 26 18:38:31 2015
@@ -22,6 +22,7 @@ import java.util.Stack;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
@@ -42,8 +43,6 @@ import org.apache.hadoop.hive.ql.parse.s
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
 
-import scala.Tuple2;
-
 /**
  * SetSparkReducerParallelism determines how many reducers should
  * be run for a given reduce sink, clone from SetReducerParallelism.
@@ -53,7 +52,7 @@ public class SetSparkReducerParallelism
   private static final Log LOG = LogFactory.getLog(SetSparkReducerParallelism.class.getName());
 
   // Spark memory per task, and total number of cores
-  private Tuple2<Long, Integer> sparkMemoryAndCores;
+  private ObjectPair<Long, Integer> sparkMemoryAndCores;
 
   @Override
   public Object process(Node nd, Stack<Node> stack,
@@ -135,15 +134,15 @@ public class SetSparkReducerParallelism
             maxReducers, false);
 
         if (sparkMemoryAndCores != null &&
-            sparkMemoryAndCores._1() > 0 && sparkMemoryAndCores._2() > 0) {
+            sparkMemoryAndCores.getFirst() > 0 && sparkMemoryAndCores.getSecond() > 0) {
           // warn the user if bytes per reducer is much larger than memory per task
-          if ((double) sparkMemoryAndCores._1() / bytesPerReducer < 0.5) {
+          if ((double) sparkMemoryAndCores.getFirst() / bytesPerReducer < 0.5) {
             LOG.warn("Average load of a reducer is much larger than its available memory. " +
                 "Consider decreasing hive.exec.reducers.bytes.per.reducer");
           }
 
           // If there are more cores, use the number of cores
-          numReducers = Math.max(numReducers, sparkMemoryAndCores._2());
+          numReducers = Math.max(numReducers, sparkMemoryAndCores.getSecond());
         }
         numReducers = Math.min(numReducers, maxReducers);
         LOG.info("Set parallelism for reduce sink " + sink + " to: " + numReducers +

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/auto_join0.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/auto_join0.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/auto_join0.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/auto_join0.q.out Mon Jan 26 18:38:31 2015
@@ -53,7 +53,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (SORT, 1)
+        Reducer 2 <- Map 1 (SORT, 2)
         Reducer 3 <- Reducer 2 (GROUP, 1)
 #### A masked pattern was here ####
       Vertices:

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/auto_join15.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/auto_join15.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/auto_join15.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/auto_join15.q.out Mon Jan 26 18:38:31 2015
@@ -42,7 +42,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (SORT, 1)
+        Reducer 2 <- Map 1 (SORT, 2)
         Reducer 3 <- Reducer 2 (GROUP, 1)
 #### A masked pattern was here ####
       Vertices:

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/auto_join18.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/auto_join18.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/auto_join18.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/auto_join18.q.out Mon Jan 26 18:38:31 2015
@@ -32,9 +32,9 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (GROUP, 1)
-        Reducer 6 <- Map 5 (GROUP PARTITION-LEVEL SORT, 1)
-        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 1), Reducer 6 (PARTITION-LEVEL SORT, 1)
+        Reducer 2 <- Map 1 (GROUP, 2)
+        Reducer 6 <- Map 5 (GROUP PARTITION-LEVEL SORT, 2)
+        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
         Reducer 4 <- Reducer 3 (GROUP, 1)
 #### A masked pattern was here ####
       Vertices:

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/auto_join18_multi_distinct.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/auto_join18_multi_distinct.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/auto_join18_multi_distinct.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/auto_join18_multi_distinct.q.out Mon Jan 26 18:38:31 2015
@@ -34,9 +34,9 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (GROUP, 1)
-        Reducer 6 <- Map 5 (GROUP PARTITION-LEVEL SORT, 1)
-        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 1), Reducer 6 (PARTITION-LEVEL SORT, 1)
+        Reducer 2 <- Map 1 (GROUP, 2)
+        Reducer 6 <- Map 5 (GROUP PARTITION-LEVEL SORT, 2)
+        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
         Reducer 4 <- Reducer 3 (GROUP, 1)
 #### A masked pattern was here ####
       Vertices:

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/auto_join20.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/auto_join20.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/auto_join20.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/auto_join20.q.out Mon Jan 26 18:38:31 2015
@@ -66,7 +66,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 4 <- Map 3 (SORT, 1)
+        Reducer 4 <- Map 3 (SORT, 2)
         Reducer 5 <- Reducer 4 (GROUP, 1)
 #### A masked pattern was here ####
       Vertices:

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/auto_join21.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/auto_join21.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/auto_join21.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/auto_join21.q.out Mon Jan 26 18:38:31 2015
@@ -57,7 +57,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 4 <- Map 3 (SORT, 1)
+        Reducer 4 <- Map 3 (SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 3 

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/auto_join23.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/auto_join23.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/auto_join23.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/auto_join23.q.out Mon Jan 26 18:38:31 2015
@@ -37,7 +37,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (SORT, 1)
+        Reducer 2 <- Map 1 (SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/auto_join26.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/auto_join26.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/auto_join26.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/auto_join26.q.out Mon Jan 26 18:38:31 2015
@@ -47,7 +47,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 3 <- Map 2 (GROUP, 1)
+        Reducer 3 <- Map 2 (GROUP, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 2 

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/auto_join27.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/auto_join27.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/auto_join27.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/auto_join27.q.out Mon Jan 26 18:38:31 2015
@@ -30,8 +30,8 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Map 6 (PARTITION-LEVEL SORT, 1), Reducer 5 (PARTITION-LEVEL SORT, 1)
-        Reducer 5 <- Map 4 (GROUP, 1)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 6 (PARTITION-LEVEL SORT, 2), Reducer 5 (PARTITION-LEVEL SORT, 2)
+        Reducer 5 <- Map 4 (GROUP, 2)
         Reducer 3 <- Reducer 2 (GROUP, 1)
 #### A masked pattern was here ####
       Vertices:

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/auto_join28.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/auto_join28.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/auto_join28.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/auto_join28.q.out Mon Jan 26 18:38:31 2015
@@ -53,7 +53,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 4 <- Map 3 (SORT, 1)
+        Reducer 4 <- Map 3 (SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 3 
@@ -166,7 +166,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (SORT, 1)
+        Reducer 2 <- Map 1 (SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -279,7 +279,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 3 <- Map 2 (SORT, 1)
+        Reducer 3 <- Map 2 (SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 2 
@@ -389,7 +389,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 4 <- Map 3 (SORT, 1)
+        Reducer 4 <- Map 3 (SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 3 

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/auto_join29.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/auto_join29.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/auto_join29.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/auto_join29.q.out Mon Jan 26 18:38:31 2015
@@ -57,7 +57,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 4 <- Map 3 (SORT, 1)
+        Reducer 4 <- Map 3 (SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 3 

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/auto_join30.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/auto_join30.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/auto_join30.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/auto_join30.q.out Mon Jan 26 18:38:31 2015
@@ -23,7 +23,7 @@ STAGE PLANS:
   Stage: Stage-2
     Spark
       Edges:
-        Reducer 5 <- Map 4 (SORT, 1)
+        Reducer 5 <- Map 4 (SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 4 
@@ -59,7 +59,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (SORT, 1)
+        Reducer 2 <- Map 1 (SORT, 2)
         Reducer 3 <- Reducer 2 (GROUP, 1)
 #### A masked pattern was here ####
       Vertices:

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/auto_join31.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/auto_join31.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/auto_join31.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/auto_join31.q.out Mon Jan 26 18:38:31 2015
@@ -29,8 +29,8 @@ STAGE PLANS:
   Stage: Stage-2
     Spark
       Edges:
-        Reducer 2 <- Map 1 (SORT, 1)
-        Reducer 7 <- Map 6 (SORT, 1)
+        Reducer 2 <- Map 1 (SORT, 2)
+        Reducer 7 <- Map 6 (SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -90,7 +90,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 4 <- Map 3 (SORT, 1)
+        Reducer 4 <- Map 3 (SORT, 2)
         Reducer 5 <- Reducer 4 (GROUP, 1)
 #### A masked pattern was here ####
       Vertices:

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/auto_join32.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/auto_join32.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/auto_join32.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/auto_join32.q.out Mon Jan 26 18:38:31 2015
@@ -54,7 +54,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1)
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/auto_join6.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/auto_join6.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/auto_join6.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/auto_join6.q.out Mon Jan 26 18:38:31 2015
@@ -45,7 +45,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Map 3 (PARTITION-LEVEL SORT, 1)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 3 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/auto_join7.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/auto_join7.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/auto_join7.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/auto_join7.q.out Mon Jan 26 18:38:31 2015
@@ -55,7 +55,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Map 3 (PARTITION-LEVEL SORT, 1), Map 4 (PARTITION-LEVEL SORT, 1)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 3 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out Mon Jan 26 18:38:31 2015
@@ -12,7 +12,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Map 4 (PARTITION-LEVEL SORT, 1)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2)
         Reducer 3 <- Reducer 2 (SORT, 1)
 #### A masked pattern was here ####
       Vertices:

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/count.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/count.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/count.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/count.q.out Mon Jan 26 18:38:31 2015
@@ -43,7 +43,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1)
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/cross_join.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/cross_join.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/cross_join.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/cross_join.q.out Mon Jan 26 18:38:31 2015
@@ -138,7 +138,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Map 3 (PARTITION-LEVEL SORT, 1)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 3 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/ctas.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/ctas.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/ctas.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/ctas.q.out Mon Jan 26 18:38:31 2015
@@ -32,7 +32,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (SORT, 1)
+        Reducer 2 <- Map 1 (SORT, 2)
         Reducer 3 <- Reducer 2 (SORT, 1)
 #### A masked pattern was here ####
       Vertices:

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out Mon Jan 26 18:38:31 2015
@@ -14,7 +14,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (SORT, 1)
+        Reducer 2 <- Map 1 (SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -65,7 +65,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (SORT, 1)
+        Reducer 2 <- Map 1 (SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out Mon Jan 26 18:38:31 2015
@@ -14,7 +14,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (GROUP, 1)
+        Reducer 2 <- Map 1 (GROUP, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -65,7 +65,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (GROUP, 1)
+        Reducer 2 <- Map 1 (GROUP, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out Mon Jan 26 18:38:31 2015
@@ -14,7 +14,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (SORT, 1)
+        Reducer 2 <- Map 1 (SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -64,7 +64,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (SORT, 1)
+        Reducer 2 <- Map 1 (SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/groupby1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/groupby1.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/groupby1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/groupby1.q.out Mon Jan 26 18:38:31 2015
@@ -25,8 +25,8 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1)
-        Reducer 3 <- Reducer 2 (GROUP, 1)
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 2)
+        Reducer 3 <- Reducer 2 (GROUP, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 

Modified: hive/trunk/ql/src/test/results/clientpositive/spark/groupby10.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/spark/groupby10.q.out?rev=1654861&r1=1654860&r2=1654861&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/spark/groupby10.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/spark/groupby10.q.out Mon Jan 26 18:38:31 2015
@@ -55,10 +55,10 @@ STAGE PLANS:
   Stage: Stage-2
     Spark
       Edges:
-        Reducer 5 <- Map 1 (SORT, 1)
-        Reducer 6 <- Map 1 (SORT, 1)
-        Reducer 3 <- Reducer 5 (GROUP, 1)
-        Reducer 4 <- Reducer 6 (GROUP, 1)
+        Reducer 5 <- Map 1 (SORT, 2)
+        Reducer 6 <- Map 1 (SORT, 2)
+        Reducer 3 <- Reducer 5 (GROUP, 2)
+        Reducer 4 <- Reducer 6 (GROUP, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 



Mime
View raw message