hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hashut...@apache.org
Subject svn commit: r1674132 [1/5] - in /hive/branches/cbo: ./ common/src/java/org/apache/hadoop/hive/conf/ common/src/java/org/apache/hive/common/util/ dev-support/ hcatalog/src/test/e2e/templeton/deployers/ hcatalog/src/test/e2e/templeton/deployers/config/hi...
Date Thu, 16 Apr 2015 18:35:41 GMT
Author: hashutosh
Date: Thu Apr 16 18:35:39 2015
New Revision: 1674132

URL: http://svn.apache.org/r1674132
Log:
Merged latest trunk into cbo branch (Ashutosh Chauhan)

Added:
    hive/branches/cbo/common/src/java/org/apache/hive/common/util/ReflectionUtil.java
      - copied unchanged from r1673989, hive/trunk/common/src/java/org/apache/hive/common/util/ReflectionUtil.java
    hive/branches/cbo/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j.properties
      - copied unchanged from r1674131, hive/trunk/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j.properties
    hive/branches/cbo/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-site.mysql.xml
      - copied unchanged from r1674131, hive/trunk/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-site.mysql.xml
    hive/branches/cbo/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java
      - copied unchanged from r1674131, hive/trunk/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
      - copied unchanged from r1674131, hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_set_nonexistent_conf.q
      - copied unchanged from r1673989, hive/trunk/ql/src/test/queries/clientnegative/authorization_set_nonexistent_conf.q
    hive/branches/cbo/ql/src/test/results/clientnegative/authorization_set_nonexistent_conf.q.out
      - copied unchanged from r1673989, hive/trunk/ql/src/test/results/clientnegative/authorization_set_nonexistent_conf.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/union_view.q.out
      - copied unchanged from r1673989, hive/trunk/ql/src/test/results/clientpositive/tez/union_view.q.out
    hive/branches/cbo/service/src/java/org/apache/hive/service/CookieSigner.java
      - copied unchanged from r1674131, hive/trunk/service/src/java/org/apache/hive/service/CookieSigner.java
    hive/branches/cbo/service/src/test/org/apache/hive/service/TestCookieSigner.java
      - copied unchanged from r1674131, hive/trunk/service/src/test/org/apache/hive/service/TestCookieSigner.java
    hive/branches/cbo/service/src/test/org/apache/hive/service/cli/thrift/ThriftCliServiceTestWithCookie.java
      - copied unchanged from r1674131, hive/trunk/service/src/test/org/apache/hive/service/cli/thrift/ThriftCliServiceTestWithCookie.java
Modified:
    hive/branches/cbo/   (props changed)
    hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/branches/cbo/dev-support/jenkins-execute-hms-test.sh
    hive/branches/cbo/hcatalog/src/test/e2e/templeton/deployers/env.sh
    hive/branches/cbo/hcatalog/src/test/e2e/templeton/deployers/start_hive_services.sh
    hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIService.java
    hive/branches/cbo/itests/src/test/resources/testconfiguration.properties
    hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
    hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HttpBasicAuthInterceptor.java
    hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java
    hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/Utils.java
    hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
    hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DefaultFetchFormatter.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ConstantVectorExpression.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputSplit.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFAddMonths.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFArray.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCbrt.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDecode.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFactorial.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLastDay.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLevenshtein.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFMap.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFNextDay.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSoundex.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTrunc.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcTimezone1.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFLevenshtein.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFNextDay.java
    hive/branches/cbo/ql/src/test/queries/clientpositive/optimize_nullscan.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/tez_union.q
    hive/branches/cbo/ql/src/test/results/clientnegative/join_nonexistent_part.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/udf_add_months_error_1.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/udf_last_day_error_1.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/udf_next_day_error_1.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/udf_next_day_error_2.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/annotate_stats_join.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/annotate_stats_select.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/auto_join32.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/bucketmapjoin1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/correlationoptimizer3.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/correlationoptimizer6.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/decimal_udf.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/groupby_sort_6.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/input23.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/input26.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/input8.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/input9.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/join_cond_pushdown_unqual2.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/join_view.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/load_dyn_part14.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/metadataonly1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/nullgroup5.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/num_op_type_conv.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/optimize_nullscan.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/partition_boolexpr.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/ppd_constant_expr.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/ppd_union_view.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/reduce_deduplicate.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/sample6.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/smb_mapjoin9.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/spark/auto_join32.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/spark/auto_join8.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/spark/join8.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/spark/join_view.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/spark/reduce_deduplicate.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/spark/sample6.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/spark/union_view.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/spark/vector_elt.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/metadataonly1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/tez_union.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/vector_coalesce.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/vector_elt.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf4.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf6.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf7.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_case.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_coalesce.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_elt.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_greatest.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_hour.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_if.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_instr.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_isnull_isnotnull.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_least.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_locate.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_minute.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_nvl.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_parse_url.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_second.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_size.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_trunc.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_when.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union30.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union_lateralview.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union_view.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/vector_coalesce.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/vector_decimal_udf.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/vector_elt.q.out
    hive/branches/cbo/service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java
    hive/branches/cbo/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java
    hive/branches/cbo/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
    hive/branches/cbo/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
    hive/branches/cbo/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
    hive/branches/cbo/testutils/metastore/execute-test-on-lxc.sh

Propchange: hive/branches/cbo/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Apr 16 18:35:39 2015
@@ -3,4 +3,4 @@
 /hive/branches/spark:1608589-1660298
 /hive/branches/tez:1494760-1622766
 /hive/branches/vectorization:1466908-1527856
-/hive/trunk:1605012-1673598
+/hive/trunk:1605012-1674131

Modified: hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Thu Apr 16 18:35:39 2015
@@ -1730,6 +1730,21 @@ public class HiveConf extends Configurat
         "Keepalive time for an idle http worker thread. When the number of workers exceeds min workers, " +
         "excessive threads are killed after this time interval."),
 
+    // Cookie based authentication
+    HIVE_SERVER2_THRIFT_HTTP_COOKIE_AUTH_ENABLED("hive.server2.thrift.http.cookie.auth.enabled", true,
+        "When true, HiveServer2 in HTTP transport mode, will use cookie based authentication mechanism."),
+    HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE("hive.server2.thrift.http.cookie.max.age", "86400s",
+        new TimeValidator(TimeUnit.SECONDS),
+        "Maximum age in seconds for server side cookie used by HS2 in HTTP mode."),
+    HIVE_SERVER2_THRIFT_HTTP_COOKIE_DOMAIN("hive.server2.thrift.http.cookie.domain", null,
+        "Domain for the HS2 generated cookies"),
+    HIVE_SERVER2_THRIFT_HTTP_COOKIE_PATH("hive.server2.thrift.http.cookie.path", null,
+        "Path for the HS2 generated cookies"),
+    HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_SECURE("hive.server2.thrift.http.cookie.is.secure", true,
+        "Secure attribute of the HS2 generated cookie."),
+    HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_HTTPONLY("hive.server2.thrift.http.cookie.is.httponly", true,
+        "HttpOnly attribute of the HS2 generated cookie."),
+
     // binary transport settings
     HIVE_SERVER2_THRIFT_PORT("hive.server2.thrift.port", 10000,
         "Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'binary'."),

Modified: hive/branches/cbo/dev-support/jenkins-execute-hms-test.sh
URL: http://svn.apache.org/viewvc/hive/branches/cbo/dev-support/jenkins-execute-hms-test.sh?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/dev-support/jenkins-execute-hms-test.sh (original)
+++ hive/branches/cbo/dev-support/jenkins-execute-hms-test.sh Thu Apr 16 18:35:39 2015
@@ -165,9 +165,11 @@ create_publish_file() {
 
 if patch_contains_hms_upgrade "$PATCH_URL"; then
 	ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i $SSH_KEY $SSH_HOST "
-		rm -rf metastore/ &&
-		svn co http://svn.apache.org/repos/asf/hive/$BRANCH/testutils/metastore metastore &&
-		sudo bash -x metastore/execute-test-on-lxc.sh --patch \"${PATCH_URL}\" --branch $BRANCH
+		rm -rf hive/ &&
+		svn co http://svn.apache.org/repos/asf/hive/$BRANCH hive &&
+		cd hive/ &&
+		curl ${PATCH_URL} | bash -x testutils/ptest2/src/main/resources/smart-apply-patch.sh - &&
+		sudo bash -x testutils/metastore/execute-test-on-lxc.sh --patch \"${PATCH_URL}\" --branch $BRANCH
 	"
 	BUILD_STATUS=$?
 	if [[ $BUILD_STATUS = 0 ]]; then

Modified: hive/branches/cbo/hcatalog/src/test/e2e/templeton/deployers/env.sh
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/src/test/e2e/templeton/deployers/env.sh?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/src/test/e2e/templeton/deployers/env.sh (original)
+++ hive/branches/cbo/hcatalog/src/test/e2e/templeton/deployers/env.sh Thu Apr 16 18:35:39 2015
@@ -50,6 +50,11 @@ if [ -z ${HADOOP_HOME} ]; then
   export HADOOP_HOME=/Users/${USER}/dev/hwxhadoop/hadoop-dist/target/hadoop-${HADOOP_VERSION}
 fi
 
+if [ -z ${MYSQL_CLIENT_JAR} ]; then
+  #if using MySQL backed metastore
+  export MYSQL_CLIENT_JAR=/Users/${USER}/dev/mysql-connector-java-5.1.30/mysql-connector-java-5.1.30-bin.jar
+fi
+
 export TEZ_CLIENT_HOME=/Users/ekoifman/dev/apache-tez-client-${TEZ_VERSION}
 #Make sure Pig is built for the Hadoop version you are running
 export PIG_TAR_PATH=/Users/${USER}/dev/pig-${PIG_VERSION}-src/build

Modified: hive/branches/cbo/hcatalog/src/test/e2e/templeton/deployers/start_hive_services.sh
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/src/test/e2e/templeton/deployers/start_hive_services.sh?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/src/test/e2e/templeton/deployers/start_hive_services.sh (original)
+++ hive/branches/cbo/hcatalog/src/test/e2e/templeton/deployers/start_hive_services.sh Thu Apr 16 18:35:39 2015
@@ -25,10 +25,17 @@
 source ./env.sh
 
 #decide which DB to run against
+#Derby
 cp ${PROJ_HOME}/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-site.xml ${HIVE_HOME}/conf/hive-site.xml
+#cp ${PROJ_HOME}/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-site.mysql.xml ${HIVE_HOME}/conf/hive-site.xml
 #cp ${PROJ_HOME}/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-site.mssql.xml ${HIVE_HOME}/conf/hive-site.xml
 
 cp ${PROJ_HOME}/hcatalog/src/test/e2e/templeton/deployers/config/webhcat/webhcat-site.xml ${HIVE_HOME}/hcatalog/etc/webhcat/webhcat-site.xml
+cp ${PROJ_HOME}/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j.properties ${HIVE_HOME}/conf/hive-log4j.properties
+
+if [ -f ${MYSQL_CLIENT_JAR} ]; then
+  cp ${MYSQL_CLIENT_JAR} ${HIVE_HOME}/lib
+fi
 
 if [ -d ${WEBHCAT_LOG_DIR} ]; then
   rm -Rf ${WEBHCAT_LOG_DIR};

Modified: hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIService.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIService.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIService.java (original)
+++ hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIService.java Thu Apr 16 18:35:39 2015
@@ -160,7 +160,7 @@ public class TestThriftHttpCLIService ex
     String httpUrl = transportMode + "://" + host + ":" + port +
         "/" + thriftHttpPath + "/";
     httpClient.addRequestInterceptor(
-        new HttpBasicAuthInterceptor(USERNAME, PASSWORD, null, null));
+        new HttpBasicAuthInterceptor(USERNAME, PASSWORD, null, null, false));
     return new THttpClient(httpUrl, httpClient);
   }
 

Modified: hive/branches/cbo/itests/src/test/resources/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/src/test/resources/testconfiguration.properties?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/itests/src/test/resources/testconfiguration.properties (original)
+++ hive/branches/cbo/itests/src/test/resources/testconfiguration.properties Thu Apr 16 18:35:39 2015
@@ -313,6 +313,7 @@ minitez.query.files=bucket_map_join_tez1
   tez_schema_evolution.q,\
   tez_union.q,\
   tez_union2.q,\
+  tez_union_view.q,\
   tez_union_decimal.q,\
   tez_union_group_by.q,\
   tez_smb_main.q,\

Modified: hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java (original)
+++ hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java Thu Apr 16 18:35:39 2015
@@ -291,7 +291,7 @@ public class HiveConnection implements j
        */
       requestInterceptor =
           new HttpKerberosRequestInterceptor(sessConfMap.get(JdbcConnectionParams.AUTH_PRINCIPAL),
-              host, getServerHttpUrl(useSsl), assumeSubject, cookieStore, cookieName);
+              host, getServerHttpUrl(useSsl), assumeSubject, cookieStore, cookieName, useSsl);
     }
     else {
       /**
@@ -299,7 +299,7 @@ public class HiveConnection implements j
        * In https mode, the entire information is encrypted
        */
       requestInterceptor = new HttpBasicAuthInterceptor(getUserName(), getPassword(),
-                                                        cookieStore, cookieName);
+                                                        cookieStore, cookieName, useSsl);
     }
     // Configure http client for cookie based authentication
     if (isCookieEnabled) {

Modified: hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HttpBasicAuthInterceptor.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HttpBasicAuthInterceptor.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HttpBasicAuthInterceptor.java (original)
+++ hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HttpBasicAuthInterceptor.java Thu Apr 16 18:35:39 2015
@@ -42,9 +42,10 @@ public class HttpBasicAuthInterceptor im
   CookieStore cookieStore;
   boolean isCookieEnabled;
   String cookieName;
+  boolean isSSL;
 
   public HttpBasicAuthInterceptor(String username, String password, CookieStore cookieStore,
-                           String cn) {
+                           String cn, boolean isSSL) {
     if(username != null){
       credentials = new UsernamePasswordCredentials(username, password);
     }
@@ -52,6 +53,7 @@ public class HttpBasicAuthInterceptor im
     this.cookieStore = cookieStore;
     isCookieEnabled = (cookieStore != null);
     cookieName = cn;
+    this.isSSL = isSSL;
   }
 
   @Override
@@ -64,9 +66,10 @@ public class HttpBasicAuthInterceptor im
     // 1. Cookie Authentication is disabled OR
     // 2. The first time when the request is sent OR
     // 3. The server returns a 401, which sometimes means the cookie has expired
+    // 4. The cookie is secured where as the client connect does not use SSL
     if (!isCookieEnabled || ((httpContext.getAttribute(Utils.HIVE_SERVER2_RETRY_KEY) == null &&
         (cookieStore == null || (cookieStore != null &&
-        Utils.needToSendCredentials(cookieStore, cookieName)))) ||
+        Utils.needToSendCredentials(cookieStore, cookieName, isSSL)))) ||
         (httpContext.getAttribute(Utils.HIVE_SERVER2_RETRY_KEY) != null &&
          httpContext.getAttribute(Utils.HIVE_SERVER2_RETRY_KEY).
          equals(Utils.HIVE_SERVER2_RETRY_TRUE)))) {

Modified: hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java (original)
+++ hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java Thu Apr 16 18:35:39 2015
@@ -44,18 +44,28 @@ public class HttpKerberosRequestIntercep
   boolean assumeSubject;
   CookieStore cookieStore;
   boolean isCookieEnabled;
+  // NB: The purpose of isSSL flag is as follows:
+  // This flag is useful when the HS2 server sends a secure cookie and
+  // the client is in a non-ssl mode. Here, the client replay of the cookie
+  // doesnt reach the server. If we don't send credentials in such a scenario,
+  // the server  would send a 401 error back to the client.
+  // Thus, we would need 2 cycles instead of 1 cycle to process an incoming request if
+  // isSSL is absent.
+  boolean isSSL;
   String cookieName;
 
   // A fair reentrant lock
   private static ReentrantLock kerberosLock = new ReentrantLock(true);
 
   public HttpKerberosRequestInterceptor(String principal, String host,
-      String serverHttpUrl, boolean assumeSubject, CookieStore cs, String cn) {
+      String serverHttpUrl, boolean assumeSubject, CookieStore cs, String cn,
+      boolean isSSL) {
     this.principal = principal;
     this.host = host;
     this.serverHttpUrl = serverHttpUrl;
     this.assumeSubject = assumeSubject;
     this.cookieStore = cs;
+    this.isSSL = isSSL;
     isCookieEnabled = (cs != null);
     cookieName = cn;
   }
@@ -79,10 +89,12 @@ public class HttpKerberosRequestIntercep
       // Generate the kerberos ticket under the following scenarios:
       // 1. Cookie Authentication is disabled OR
       // 2. The first time when the request is sent OR
-      // 3. The server returns a 401, which sometimes means the cookie has expired
-      if (!isCookieEnabled || ((httpContext.getAttribute(Utils.HIVE_SERVER2_RETRY_KEY) == null &&
+      // 3. The server returns a 401, which sometimes means the cookie has expired OR
+      // 4. The cookie is secured where as the client connect does not use SSL
+      if (!isCookieEnabled ||
+         ((httpContext.getAttribute(Utils.HIVE_SERVER2_RETRY_KEY) == null &&
           (cookieStore == null || (cookieStore != null &&
-          Utils.needToSendCredentials(cookieStore, cookieName)))) ||
+          Utils.needToSendCredentials(cookieStore, cookieName, isSSL)))) ||
           (httpContext.getAttribute(Utils.HIVE_SERVER2_RETRY_KEY) != null &&
           httpContext.getAttribute(Utils.HIVE_SERVER2_RETRY_KEY).
           equals(Utils.HIVE_SERVER2_RETRY_TRUE)))) {

Modified: hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/Utils.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/Utils.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/Utils.java (original)
+++ hive/branches/cbo/jdbc/src/java/org/apache/hive/jdbc/Utils.java Thu Apr 16 18:35:39 2015
@@ -579,10 +579,11 @@ public class Utils {
    * has a valid cookie and the client need not send Credentials for validation purpose.
    * @param cookieStore The cookie Store
    * @param cookieName Name of the cookie which needs to be validated
+   * @param isSSL Whether this is a http/https connection
    * @return true or false based on whether the client needs to send the credentials or
    * not to the server.
    */
-  static boolean needToSendCredentials(CookieStore cookieStore, String cookieName) {
+  static boolean needToSendCredentials(CookieStore cookieStore, String cookieName, boolean isSSL) {
     if (cookieName == null || cookieStore == null) {
       return true;
     }
@@ -590,6 +591,12 @@ public class Utils {
     List<Cookie> cookies = cookieStore.getCookies();
 
     for (Cookie c : cookies) {
+      // If this is a secured cookie and the current connection is non-secured,
+      // then, skip this cookie. We need to skip this cookie because, the cookie
+      // replay will not be transmitted to the server.
+      if (c.isSecure() && !isSSL) {
+        continue;
+      }
       if (c.getName().equals(cookieName)) {
         return false;
       }

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Thu Apr 16 18:35:39 2015
@@ -6059,11 +6059,8 @@ public class HiveMetaStore extends Thrif
         // Wrap the start of the threads in a catch Throwable loop so that any failures
         // don't doom the rest of the metastore.
         startLock.lock();
-        try {
-          startPauseMonitor(conf);
-        } catch (Throwable t) {
-          LOG.warn("Error starting the JVM pause monitor", t);
-        }
+        ShimLoader.getHadoopShims().startPauseMonitor(conf);
+
         try {
           // Per the javadocs on Condition, do not depend on the condition alone as a start gate
           // since spurious wake ups are possible.
@@ -6083,18 +6080,6 @@ public class HiveMetaStore extends Thrif
     t.start();
   }
 
-  private static void startPauseMonitor(HiveConf conf) throws Exception {
-    try {
-      Class.forName("org.apache.hadoop.util.JvmPauseMonitor");
-      org.apache.hadoop.util.JvmPauseMonitor pauseMonitor =
-        new org.apache.hadoop.util.JvmPauseMonitor(conf);
-      pauseMonitor.start();
-    } catch (Throwable t) {
-      LOG.warn("Could not initiate the JvmPauseMonitor thread." +
-               " GCs and Pauses may not be warned upon.", t);
-    }
-  }
-
   private static void startCompactorInitiator(HiveConf conf) throws Exception {
     if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON)) {
       MetaStoreThread initiator =

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Thu Apr 16 18:35:39 2015
@@ -80,7 +80,7 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge;
-import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hive.common.util.ReflectionUtil;
 
 import javax.annotation.Nullable;
 
@@ -372,7 +372,7 @@ public class MetaStoreUtils {
       return null;
     }
     try {
-      Deserializer deserializer = ReflectionUtils.newInstance(conf.getClassByName(lib).
+      Deserializer deserializer = ReflectionUtil.newInstance(conf.getClassByName(lib).
               asSubclass(Deserializer.class), conf);
       if (skipConfError) {
         SerDeUtils.initializeSerDeWithoutErrorCheck(deserializer, conf,
@@ -419,7 +419,7 @@ public class MetaStoreUtils {
       org.apache.hadoop.hive.metastore.api.Table table) throws MetaException {
     String lib = part.getSd().getSerdeInfo().getSerializationLib();
     try {
-      Deserializer deserializer = ReflectionUtils.newInstance(conf.getClassByName(lib).
+      Deserializer deserializer = ReflectionUtil.newInstance(conf.getClassByName(lib).
         asSubclass(Deserializer.class), conf);
       SerDeUtils.initializeSerDe(deserializer, conf, MetaStoreUtils.getTableMetadata(table),
                                  MetaStoreUtils.getPartitionMetadata(part, table));

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java Thu Apr 16 18:35:39 2015
@@ -533,18 +533,29 @@ public class TxnHandler {
     }
   }
 
+  /**
+   * used to sort entries in {@link org.apache.hadoop.hive.metastore.api.ShowLocksResponse}
+   */
+  private static class LockInfoExt extends LockInfo {
+    private final ShowLocksResponseElement e;
+    LockInfoExt(ShowLocksResponseElement e, long intLockId) {
+      super(e, intLockId);
+      this.e = e;
+    }
+  }
   public ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException {
     try {
       Connection dbConn = null;
       ShowLocksResponse rsp = new ShowLocksResponse();
       List<ShowLocksResponseElement> elems = new ArrayList<ShowLocksResponseElement>();
+      List<LockInfoExt> sortedList = new ArrayList<LockInfoExt>();
       Statement stmt = null;
       try {
         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
         stmt = dbConn.createStatement();
 
         String s = "select hl_lock_ext_id, hl_txnid, hl_db, hl_table, hl_partition, hl_lock_state, " +
-          "hl_lock_type, hl_last_heartbeat, hl_acquired_at, hl_user, hl_host from HIVE_LOCKS";
+          "hl_lock_type, hl_last_heartbeat, hl_acquired_at, hl_user, hl_host, hl_lock_int_id from HIVE_LOCKS";
         LOG.debug("Doing to execute query <" + s + ">");
         ResultSet rs = stmt.executeQuery(s);
         while (rs.next()) {
@@ -572,7 +583,7 @@ public class TxnHandler {
           if (!rs.wasNull()) e.setAcquiredat(acquiredAt);
           e.setUser(rs.getString(10));
           e.setHostname(rs.getString(11));
-          elems.add(e);
+          sortedList.add(new LockInfoExt(e, rs.getLong(12)));
         }
         LOG.debug("Going to rollback");
         dbConn.rollback();
@@ -584,6 +595,12 @@ public class TxnHandler {
         closeStmt(stmt);
         closeDbConn(dbConn);
       }
+      //this ensures that "SHOW LOCKS" prints the locks in the same order as they are examined
+      //by checkLock() - makes diagnostics easier.
+      Collections.sort(sortedList, new LockInfoComparator());
+      for(LockInfoExt lockInfoExt : sortedList) {
+        elems.add(lockInfoExt.e);
+      }
       rsp.setLocks(elems);
       return rsp;
     } catch (RetryException e) {
@@ -1086,17 +1103,17 @@ public class TxnHandler {
   }
 
   private static class LockInfo {
-    long extLockId;
-    long intLockId;
-    long txnId;
-    String db;
-    String table;
-    String partition;
-    LockState state;
-    LockType type;
+    private final long extLockId;
+    private final long intLockId;
+    private final long txnId;
+    private final String db;
+    private final String table;
+    private final String partition;
+    private final LockState state;
+    private final LockType type;
 
     // Assumes the result set is set to a valid row
-    LockInfo(ResultSet rs) throws SQLException {
+    LockInfo(ResultSet rs) throws SQLException, MetaException {
       extLockId = rs.getLong("hl_lock_ext_id"); // can't be null
       intLockId = rs.getLong("hl_lock_int_id"); // can't be null
       db = rs.getString("hl_db"); // can't be null
@@ -1107,12 +1124,27 @@ public class TxnHandler {
       switch (rs.getString("hl_lock_state").charAt(0)) {
         case LOCK_WAITING: state = LockState.WAITING; break;
         case LOCK_ACQUIRED: state = LockState.ACQUIRED; break;
+        default:
+          throw new MetaException("Unknown lock state " + rs.getString("hl_lock_state").charAt(0));
       }
       switch (rs.getString("hl_lock_type").charAt(0)) {
         case LOCK_EXCLUSIVE: type = LockType.EXCLUSIVE; break;
         case LOCK_SHARED: type = LockType.SHARED_READ; break;
         case LOCK_SEMI_SHARED: type = LockType.SHARED_WRITE; break;
+        default:
+          throw new MetaException("Unknown lock type " + rs.getString("hl_lock_type").charAt(0));
       }
+      txnId = rs.getLong("hl_txnid");
+    }
+    LockInfo(ShowLocksResponseElement e, long intLockId) {
+      extLockId = e.getLockid();
+      this.intLockId = intLockId;
+      db = e.getDbname();
+      table = e.getTablename();
+      partition = e.getPartname();
+      state = e.getState();
+      type = e.getType();
+      txnId = e.getTxnid();
     }
 
     public boolean equals(Object other) {
@@ -1130,15 +1162,22 @@ public class TxnHandler {
         partition + " state:" + (state == null ? "null" : state.toString())
         + " type:" + (type == null ? "null" : type.toString());
     }
+    private boolean isDbLock() {
+      return db != null && table == null && partition == null;
+    }
+    private boolean isTableLock() {
+      return db != null && table != null && partition == null;
+    }
   }
 
   private static class LockInfoComparator implements Comparator<LockInfo> {
+    private static final LockTypeComparator lockTypeComparator = new LockTypeComparator();
     public boolean equals(Object other) {
       return this == other;
     }
 
     public int compare(LockInfo info1, LockInfo info2) {
-      // We sort by state (acquired vs waiting) and then by extLockId.
+      // We sort by state (acquired vs waiting) and then by LockType, they by id
       if (info1.state == LockState.ACQUIRED &&
         info2.state != LockState .ACQUIRED) {
         return -1;
@@ -1147,6 +1186,11 @@ public class TxnHandler {
         info2.state == LockState .ACQUIRED) {
         return 1;
       }
+
+      int sortByType = lockTypeComparator.compare(info1.type, info2.type);
+      if(sortByType != 0) {
+        return sortByType;
+      }
       if (info1.extLockId < info2.extLockId) {
         return -1;
       } else if (info1.extLockId > info2.extLockId) {
@@ -1163,6 +1207,41 @@ public class TxnHandler {
     }
   }
 
+  /**
+   * Sort more restrictive locks after less restrictive ones
+   */
+  private final static class LockTypeComparator implements Comparator<LockType> {
+    public boolean equals(Object other) {
+      return this == other;
+    }
+    public int compare(LockType t1, LockType t2) {
+      switch (t1) {
+        case EXCLUSIVE:
+          if(t2 == LockType.EXCLUSIVE) {
+            return 0;
+          }
+          return 1;
+        case SHARED_WRITE:
+          switch (t2) {
+            case EXCLUSIVE:
+              return -1;
+            case SHARED_WRITE:
+              return 0;
+            case SHARED_READ:
+              return 1;
+            default:
+              throw new RuntimeException("Unexpected LockType: " + t2);
+          }
+        case SHARED_READ:
+          if(t2 == LockType.SHARED_READ) {
+            return 0;
+          }
+          return -1;
+        default:
+          throw new RuntimeException("Unexpected LockType: " + t1);
+      }
+    }
+  }
   private enum LockAction {ACQUIRE, WAIT, KEEP_LOOKING}
 
   // A jump table to figure out whether to wait, acquire,
@@ -1362,11 +1441,11 @@ public class TxnHandler {
     LockResponse response = new LockResponse();
     response.setLockid(extLockId);
 
-    LOG.debug("Setting savepoint");
+    LOG.debug("checkLock(): Setting savepoint. extLockId=" + extLockId);
     Savepoint save = dbConn.setSavepoint();
     StringBuilder query = new StringBuilder("select hl_lock_ext_id, " +
       "hl_lock_int_id, hl_db, hl_table, hl_partition, hl_lock_state, " +
-      "hl_lock_type from HIVE_LOCKS where hl_db in (");
+      "hl_lock_type, hl_txnid from HIVE_LOCKS where hl_db in (");
 
     Set<String> strings = new HashSet<String>(locksBeingChecked.size());
     for (LockInfo info : locksBeingChecked) {
@@ -1431,19 +1510,26 @@ public class TxnHandler {
         query.append("))");
       }
     }
+    query.append(" and hl_lock_ext_id <= ").append(extLockId);
 
     LOG.debug("Going to execute query <" + query.toString() + ">");
     Statement stmt = null;
     try {
       stmt = dbConn.createStatement();
       ResultSet rs = stmt.executeQuery(query.toString());
-      SortedSet lockSet = new TreeSet(new LockInfoComparator());
+      SortedSet<LockInfo> lockSet = new TreeSet<LockInfo>(new LockInfoComparator());
       while (rs.next()) {
         lockSet.add(new LockInfo(rs));
       }
       // Turn the tree set into an array so we can move back and forth easily
       // in it.
-      LockInfo[] locks = (LockInfo[])lockSet.toArray(new LockInfo[1]);
+      LockInfo[] locks = lockSet.toArray(new LockInfo[lockSet.size()]);
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Locks to check(full): ");
+        for(LockInfo info : locks) {
+          LOG.debug("  " + info);
+        }
+      }
 
       for (LockInfo info : locksBeingChecked) {
         // Find the lock record we're checking
@@ -1496,22 +1582,27 @@ public class TxnHandler {
 
           // We've found something that matches what we're trying to lock,
           // so figure out if we can lock it too.
-          switch (jumpTable.get(locks[index].type).get(locks[i].type).get
-            (locks[i].state)) {
+          LockAction lockAction = jumpTable.get(locks[index].type).get(locks[i].type).get(locks[i].state);
+          LOG.debug("desired Lock: " + info + " checked Lock: " + locks[i] + " action: " + lockAction);
+          switch (lockAction) {
+            case WAIT:
+              if(!ignoreConflict(info, locks[i])) {
+                wait(dbConn, save);
+                if (alwaysCommit) {
+                  // In the case where lockNoWait has been called we don't want to commit because
+                  // it's going to roll everything back. In every other case we want to commit here.
+                  LOG.debug("Going to commit");
+                  dbConn.commit();
+                }
+                response.setState(LockState.WAITING);
+                LOG.debug("Lock(" + info + ") waiting for Lock(" + locks[i] + ")");
+                return response;
+              }
+              //fall through to ACQUIRE
             case ACQUIRE:
               acquire(dbConn, stmt, extLockId, info.intLockId);
               acquired = true;
               break;
-            case WAIT:
-              wait(dbConn, save);
-              if (alwaysCommit) {
-                // In the case where lockNoWait has been called we don't want to commit because
-                // it's going to roll everything back. In every other case we want to commit here.
-                LOG.debug("Going to commit");
-                dbConn.commit();
-              }
-              response.setState(LockState.WAITING);
-              return response;
             case KEEP_LOOKING:
               continue;
           }
@@ -1534,6 +1625,19 @@ public class TxnHandler {
     return response;
   }
 
+  /**
+   * the {@link #jumpTable} only deals with LockState/LockType.  In some cases it's not
+   * sufficient.  For example, an EXCLUSIVE lock on partition should prevent SHARED_READ
+   * on the table, but there is no reason for EXCLUSIVE on a table to prevent SHARED_READ
+   * on a database.
+   */
+  private boolean ignoreConflict(LockInfo desiredLock, LockInfo existingLock) {
+    return (desiredLock.isDbLock() && desiredLock.type == LockType.SHARED_READ &&
+      existingLock.isTableLock() && existingLock.type == LockType.EXCLUSIVE) ||
+      (existingLock.isDbLock() && existingLock.type == LockType.SHARED_READ &&
+        desiredLock.isTableLock() && desiredLock.type == LockType.EXCLUSIVE);
+  }
+
   private void wait(Connection dbConn, Savepoint save) throws SQLException {
     // Need to rollback because we did a select that acquired locks but we didn't
     // actually update anything.  Also, we may have locked some locks as
@@ -1654,7 +1758,7 @@ public class TxnHandler {
     try {
       stmt = dbConn.createStatement();
       String s = "select hl_lock_ext_id, hl_lock_int_id, hl_db, hl_table, " +
-        "hl_partition, hl_lock_state, hl_lock_type from HIVE_LOCKS where " +
+        "hl_partition, hl_lock_state, hl_lock_type, hl_txnid from HIVE_LOCKS where " +
         "hl_lock_ext_id = " + extLockId;
       LOG.debug("Going to execute query <" + s + ">");
       ResultSet rs = stmt.executeQuery(s);

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java Thu Apr 16 18:35:39 2015
@@ -1322,7 +1322,7 @@ public class Driver implements CommandPr
     maxthreads = HiveConf.getIntVar(conf, HiveConf.ConfVars.EXECPARALLETHREADNUMBER);
 
     try {
-      LOG.info("Starting command: " + queryStr);
+      LOG.info("Starting command(queryId=" + queryId + "): " + queryStr);
       // compile and execute can get called from different threads in case of HS2
       // so clear timing in this thread's Hive object before proceeding.
       Hive.get().clearMetaCallTiming();

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Thu Apr 16 18:35:39 2015
@@ -207,9 +207,9 @@ import org.apache.hadoop.hive.shims.Hado
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.tools.HadoopArchives;
-import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hive.common.util.AnnotationUtils;
+import org.apache.hive.common.util.ReflectionUtil;
 import org.stringtemplate.v4.ST;
 
 /**
@@ -3786,7 +3786,7 @@ public class DDLTask extends Task<DDLWor
   private void validateSerDe(String serdeName) throws HiveException {
     try {
 
-      Deserializer d = ReflectionUtils.newInstance(conf.getClassByName(serdeName).
+      Deserializer d = ReflectionUtil.newInstance(conf.getClassByName(serdeName).
           asSubclass(Deserializer.class), conf);
       if (d != null) {
         LOG.debug("Found class for " + serdeName);

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DefaultFetchFormatter.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DefaultFetchFormatter.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DefaultFetchFormatter.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DefaultFetchFormatter.java Thu Apr 16 18:35:39 2015
@@ -32,7 +32,7 @@ import org.apache.hadoop.hive.serde2.Del
 import org.apache.hadoop.hive.serde2.SerDe;
 import org.apache.hadoop.hive.serde2.SerDeUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hive.common.util.ReflectionUtil;
 
 /**
  * serialize row by user specified serde and call toString() to make string type result
@@ -50,12 +50,13 @@ public class DefaultFetchFormatter<T> im
     }
   }
 
+//TODO#: THIS
   private SerDe initializeSerde(Configuration conf, Properties props) throws Exception {
     String serdeName = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEFETCHOUTPUTSERDE);
     Class<? extends SerDe> serdeClass = Class.forName(serdeName, true,
         Utilities.getSessionSpecifiedClassLoader()).asSubclass(SerDe.class);
     // cast only needed for Hadoop 0.17 compatibility
-    SerDe serde = ReflectionUtils.newInstance(serdeClass, null);
+    SerDe serde = ReflectionUtil.newInstance(serdeClass, null);
 
     Properties serdeProps = new Properties();
     if (serde instanceof DelimitedJSONSerDe) {

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java Thu Apr 16 18:35:39 2015
@@ -39,7 +39,7 @@ import org.apache.hadoop.hive.serde2.Des
 import org.apache.hadoop.hive.serde2.SerDeUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
-import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hive.common.util.ReflectionUtil;
 
 /**
  * DemuxOperator is an operator used by MapReduce Jobs optimized by
@@ -134,12 +134,12 @@ public class DemuxOperator extends Opera
         cntrs[newTag] = 0;
         nextCntrs[newTag] = 0;
         TableDesc keyTableDesc = conf.getKeysSerializeInfos().get(newTag);
-        Deserializer inputKeyDeserializer = ReflectionUtils.newInstance(keyTableDesc
+        Deserializer inputKeyDeserializer = ReflectionUtil.newInstance(keyTableDesc
             .getDeserializerClass(), null);
         SerDeUtils.initializeSerDe(inputKeyDeserializer, null, keyTableDesc.getProperties(), null);
 
         TableDesc valueTableDesc = conf.getValuesSerializeInfos().get(newTag);
-        Deserializer inputValueDeserializer = ReflectionUtils.newInstance(valueTableDesc
+        Deserializer inputValueDeserializer = ReflectionUtil.newInstance(valueTableDesc
             .getDeserializerClass(), null);
         SerDeUtils.initializeSerDe(inputValueDeserializer, null, valueTableDesc.getProperties(),
                                    null);

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java Thu Apr 16 18:35:39 2015
@@ -69,9 +69,9 @@ import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.JobConfigurable;
 import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hive.common.util.AnnotationUtils;
+import org.apache.hive.common.util.ReflectionUtil;
 
 import com.google.common.collect.Iterators;
 
@@ -204,13 +204,12 @@ public class FetchOperator implements Se
        JobConf conf) throws IOException {
     if (Configurable.class.isAssignableFrom(inputFormatClass) ||
         JobConfigurable.class.isAssignableFrom(inputFormatClass)) {
-      return ReflectionUtils
-          .newInstance(inputFormatClass, conf);
+      return ReflectionUtil.newInstance(inputFormatClass, conf);
     }
     InputFormat format = inputFormats.get(inputFormatClass.getName());
     if (format == null) {
       try {
-        format = ReflectionUtils.newInstance(inputFormatClass, conf);
+        format = ReflectionUtil.newInstance(inputFormatClass, conf);
         inputFormats.put(inputFormatClass.getName(), format);
       } catch (Exception e) {
         throw new IOException("Cannot create an instance of InputFormat class "

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java Thu Apr 16 18:35:39 2015
@@ -44,7 +44,7 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.SequenceFileInputFormat;
-import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hive.common.util.ReflectionUtil;
 
 public class JoinUtil {
 
@@ -268,7 +268,7 @@ public class JoinUtil {
     if (desc == null) {
       return null;
     }
-    SerDe sd = (SerDe) ReflectionUtils.newInstance(desc.getDeserializerClass(),
+    SerDe sd = (SerDe) ReflectionUtil.newInstance(desc.getDeserializerClass(),
         null);
     try {
       SerDeUtils.initializeSerDe(sd, null, desc.getProperties(), null);

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java Thu Apr 16 18:35:39 2015
@@ -61,7 +61,7 @@ import org.apache.hadoop.hive.serde2.Ser
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hive.common.util.ReflectionUtil;
 
 import static org.apache.hadoop.hive.ql.exec.persistence.HybridHashTableContainer.HashPartition;
 import static org.apache.hadoop.hive.ql.exec.persistence.MapJoinBytesTableContainer.KeyValueHelper;
@@ -229,8 +229,8 @@ public class MapJoinOperator extends Abs
 
     try {
       TableDesc keyTableDesc = conf.getKeyTblDesc();
-      SerDe keySerializer = (SerDe) ReflectionUtils.newInstance(keyTableDesc.getDeserializerClass(),
- null);
+      SerDe keySerializer = (SerDe) ReflectionUtil.newInstance(
+          keyTableDesc.getDeserializerClass(), null);
       SerDeUtils.initializeSerDe(keySerializer, null, keyTableDesc.getProperties(), null);
       MapJoinObjectSerDeContext keyContext = new MapJoinObjectSerDeContext(keySerializer, false);
       for (int pos = 0; pos < order.length; pos++) {
@@ -243,8 +243,8 @@ public class MapJoinOperator extends Abs
         } else {
           valueTableDesc = conf.getValueFilteredTblDescs().get(pos);
         }
-        SerDe valueSerDe =
-            (SerDe) ReflectionUtils.newInstance(valueTableDesc.getDeserializerClass(), null);
+        SerDe valueSerDe = (SerDe) ReflectionUtil.newInstance(
+            valueTableDesc.getDeserializerClass(), null);
         SerDeUtils.initializeSerDe(valueSerDe, null, valueTableDesc.getProperties(), null);
         MapJoinObjectSerDeContext valueContext =
             new MapJoinObjectSerDeContext(valueSerDe, hasFilter(pos));

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java Thu Apr 16 18:35:39 2015
@@ -51,7 +51,7 @@ import org.apache.hadoop.io.WritableComp
 import org.apache.hadoop.io.WritableComparator;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.util.PriorityQueue;
-import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hive.common.util.ReflectionUtil;
 
 /**
  * Sorted Merge Map Join Operator.
@@ -525,7 +525,7 @@ public class SMBMapJoinOperator extends
 
     BucketMapJoinContext bucketMatcherCxt = localWork.getBucketMapjoinContext();
     Class<? extends BucketMatcher> bucketMatcherCls = bucketMatcherCxt.getBucketMatcherClass();
-    BucketMatcher bucketMatcher = ReflectionUtils.newInstance(bucketMatcherCls, null);
+    BucketMatcher bucketMatcher = ReflectionUtil.newInstance(bucketMatcherCls, null);
 
     getExecContext().setFileId(bucketMatcherCxt.createFileId(currentInputPath.toString()));
     if (isLogInfoEnabled) {

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java Thu Apr 16 18:35:39 2015
@@ -52,8 +52,8 @@ import org.apache.hadoop.mapred.InputFor
 import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hive.common.util.ReflectionUtil;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.MapMaker;
@@ -150,7 +150,7 @@ public class StatsNoJobTask extends Task
         boolean statsAvailable = false;
         for(FileStatus file: fileList) {
           if (!file.isDir()) {
-            InputFormat<?, ?> inputFormat = (InputFormat<?, ?>) ReflectionUtils.newInstance(
+            InputFormat<?, ?> inputFormat = (InputFormat<?, ?>) ReflectionUtil.newInstance(
                 partn.getInputFormatClass(), jc);
             InputSplit dummySplit = new FileSplit(file.getPath(), 0, 0,
                 new String[] { partn.getLocation() });
@@ -248,7 +248,7 @@ public class StatsNoJobTask extends Task
           boolean statsAvailable = false;
           for(FileStatus file: fileList) {
             if (!file.isDir()) {
-              InputFormat<?, ?> inputFormat = (InputFormat<?, ?>) ReflectionUtils.newInstance(
+              InputFormat<?, ?> inputFormat = (InputFormat<?, ?>) ReflectionUtil.newInstance(
                   table.getInputFormatClass(), jc);
               InputSplit dummySplit = new FileSplit(file.getPath(), 0, 0, new String[] { table
                   .getDataLocation().toString() });

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Thu Apr 16 18:35:39 2015
@@ -181,8 +181,8 @@ import org.apache.hadoop.mapred.Reporter
 import org.apache.hadoop.mapred.SequenceFileInputFormat;
 import org.apache.hadoop.mapred.SequenceFileOutputFormat;
 import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Shell;
+import org.apache.hive.common.util.ReflectionUtil;
 
 import com.esotericsoftware.kryo.Kryo;
 import com.esotericsoftware.kryo.io.Input;
@@ -1375,7 +1375,7 @@ public final class Utilities {
     if (isCompressed) {
       Class<? extends CompressionCodec> codecClass = FileOutputFormat.getOutputCompressorClass(jc,
           DefaultCodec.class);
-      CompressionCodec codec = ReflectionUtils.newInstance(codecClass, jc);
+      CompressionCodec codec = ReflectionUtil.newInstance(codecClass, jc);
       return codec.createOutputStream(out);
     } else {
       return (out);
@@ -1424,7 +1424,7 @@ public final class Utilities {
     if ((hiveOutputFormat instanceof HiveIgnoreKeyTextOutputFormat) && isCompressed) {
       Class<? extends CompressionCodec> codecClass = FileOutputFormat.getOutputCompressorClass(jc,
           DefaultCodec.class);
-      CompressionCodec codec = ReflectionUtils.newInstance(codecClass, jc);
+      CompressionCodec codec = ReflectionUtil.newInstance(codecClass, jc);
       return codec.getDefaultExtension();
     }
     return "";
@@ -1476,7 +1476,7 @@ public final class Utilities {
     if (isCompressed) {
       compressionType = SequenceFileOutputFormat.getOutputCompressionType(jc);
       codecClass = FileOutputFormat.getOutputCompressorClass(jc, DefaultCodec.class);
-      codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, jc);
+      codec = (CompressionCodec) ReflectionUtil.newInstance(codecClass, jc);
     }
     return SequenceFile.createWriter(fs, jc, file, keyClass, valClass, compressionType, codec,
       progressable);
@@ -1500,7 +1500,7 @@ public final class Utilities {
     CompressionCodec codec = null;
     if (isCompressed) {
       Class<?> codecClass = FileOutputFormat.getOutputCompressorClass(jc, DefaultCodec.class);
-      codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, jc);
+      codec = (CompressionCodec) ReflectionUtil.newInstance(codecClass, jc);
     }
     return new RCFile.Writer(fs, jc, file, progressable, codec);
   }
@@ -2948,7 +2948,7 @@ public final class Utilities {
 
         if (reworkInputFormats.size() > 0) {
           for (Class<? extends InputFormat> inputFormatCls : reworkInputFormats) {
-            ReworkMapredInputFormat inst = (ReworkMapredInputFormat) ReflectionUtils
+            ReworkMapredInputFormat inst = (ReworkMapredInputFormat) ReflectionUtil
                 .newInstance(inputFormatCls, null);
             inst.rework(conf, mapredWork);
           }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java Thu Apr 16 18:35:39 2015
@@ -46,7 +46,7 @@ import org.apache.hadoop.mapred.InputFor
 import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hive.common.util.ReflectionUtil;
 
 /**
  * Simple persistent container for rows.
@@ -213,7 +213,7 @@ public class RowContainer<ROW extends Li
         JobConf localJc = getLocalFSJobConfClone(jc);
         if (inputSplits == null) {
           if (this.inputFormat == null) {
-            inputFormat = ReflectionUtils.newInstance(
+            inputFormat = ReflectionUtil.newInstance(
                 tblDesc.getInputFileFormatClass(), localJc);
           }
 

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java Thu Apr 16 18:35:39 2015
@@ -280,7 +280,7 @@ public class VectorizationContext {
       throw new HiveException("Null column name");
     }
     if (!projectionColumnMap.containsKey(name)) {
-      throw new HiveException(String.format("The column %s is not in the vectorization context column map %s.", 
+      throw new HiveException(String.format("The column %s is not in the vectorization context column map %s.",
                  name, projectionColumnMap.toString()));
     }
     return projectionColumnMap.get(name);
@@ -1022,7 +1022,7 @@ public class VectorizationContext {
             arguments[i] = colIndex;
         } else if (child instanceof ExprNodeConstantDesc) {
           Object scalarValue = getVectorTypeScalarValue((ExprNodeConstantDesc) child);
-          arguments[i] = scalarValue;
+          arguments[i] = (null == scalarValue) ? getConstantVectorExpression(null, child.getTypeInfo(), childrenMode) : scalarValue;
         } else {
           throw new HiveException("Cannot handle expression type: " + child.getClass().getSimpleName());
         }
@@ -1337,10 +1337,10 @@ public class VectorizationContext {
     HiveDecimal rawDecimal;
     switch (ptinfo.getPrimitiveCategory()) {
     case FLOAT:
-      rawDecimal = HiveDecimal.create(String.valueOf((Float) scalar));
+      rawDecimal = HiveDecimal.create(String.valueOf(scalar));
       break;
     case DOUBLE:
-      rawDecimal = HiveDecimal.create(String.valueOf((Double) scalar));
+      rawDecimal = HiveDecimal.create(String.valueOf(scalar));
       break;
     case BYTE:
       rawDecimal = HiveDecimal.create((Byte) scalar);
@@ -1504,7 +1504,7 @@ public class VectorizationContext {
       return createVectorExpression(CastStringGroupToChar.class, childExpr, Mode.PROJECTION, returnType);
     }
 
-    /* 
+    /*
      * Timestamp, float, and double types are handled by the legacy code path. See isLegacyPathUDF.
      */
 
@@ -1852,7 +1852,7 @@ public class VectorizationContext {
         return 0;
       }
     } else if (decimalTypePattern.matcher(constDesc.getTypeString()).matches()) {
-      return (HiveDecimal) constDesc.getValue();
+      return constDesc.getValue();
     } else {
       return constDesc.getValue();
     }
@@ -1992,7 +1992,7 @@ public class VectorizationContext {
       return "None";
     }
   }
-  
+
   static String getUndecoratedName(String hiveTypeName) {
     VectorExpressionDescriptor.ArgumentType argType = VectorExpressionDescriptor.ArgumentType.fromHiveTypeName(hiveTypeName);
     switch (argType) {
@@ -2021,7 +2021,7 @@ public class VectorizationContext {
   }
 
   // TODO: When we support vectorized STRUCTs and can handle more in the reduce-side (MERGEPARTIAL):
-  // TODO:   Write reduce-side versions of AVG. Currently, only map-side (HASH) versions are in table. 
+  // TODO:   Write reduce-side versions of AVG. Currently, only map-side (HASH) versions are in table.
   // TODO:   And, investigate if different reduce-side versions are needed for var* and std*, or if map-side aggregate can be used..  Right now they are conservatively
   //         marked map-side (HASH).
   static ArrayList<AggregateDefinition> aggregatesDefinition = new ArrayList<AggregateDefinition>() {{
@@ -2135,6 +2135,7 @@ public class VectorizationContext {
     return map;
   }
 
+  @Override
   public String toString() {
     StringBuilder sb = new StringBuilder(32);
     sb.append("Context name ").append(contextName).append(", level " + level + ", ");

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ConstantVectorExpression.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ConstantVectorExpression.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ConstantVectorExpression.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ConstantVectorExpression.java Thu Apr 16 18:35:39 2015
@@ -18,13 +18,10 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
-import java.util.Arrays;
-
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.common.type.HiveChar;
 import org.apache.hadoop.hive.common.type.HiveVarchar;
 import org.apache.hadoop.hive.ql.exec.vector.*;
-import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
 
 /**
  * Constant is represented as a vector with repeating values.

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputSplit.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputSplit.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputSplit.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputSplit.java Thu Apr 16 18:35:39 2015
@@ -26,7 +26,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat.HiveInputSplit;
 import org.apache.hadoop.mapred.FileSplit;
 import org.apache.hadoop.mapred.InputSplit;
-import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hive.common.util.ReflectionUtil;
 
 /**
  * HiveInputSplit encapsulates an InputSplit with its corresponding
@@ -137,12 +137,11 @@ public class BucketizedHiveInputSplit ex
   @Override
   public void readFields(DataInput in) throws IOException {
     String inputSplitClassName = in.readUTF();
-
     int numSplits = in.readInt();
     inputSplits = new InputSplit[numSplits];
     for (int i = 0; i < numSplits; i++) {
       try {
-        inputSplits[i] = (InputSplit) ReflectionUtils.newInstance(conf
+        inputSplits[i] = (InputSplit) ReflectionUtil.newInstance(conf
             .getClassByName(inputSplitClassName), conf);
       } catch (Exception e) {
         throw new IOException(

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java Thu Apr 16 18:35:39 2015
@@ -59,7 +59,7 @@ import org.apache.hadoop.mapred.Sequence
 import org.apache.hadoop.mapred.TaskAttemptContext;
 import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hadoop.util.Shell;
-import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hive.common.util.ReflectionUtil;
 
 /**
  * An util class for various Hive file format tasks.
@@ -274,7 +274,7 @@ public final class HiveFileFormatUtils {
 
   private static HiveOutputFormat<?, ?> getHiveOutputFormat(
       Configuration conf, Class<? extends OutputFormat> outputClass) throws HiveException {
-    OutputFormat<?, ?> outputFormat = ReflectionUtils.newInstance(outputClass, conf);
+    OutputFormat<?, ?> outputFormat = ReflectionUtil.newInstance(outputClass, conf);
     if (!(outputFormat instanceof HiveOutputFormat)) {
       outputFormat = new HivePassThroughOutputFormat(outputFormat);
     }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java Thu Apr 16 18:35:39 2015
@@ -23,7 +23,6 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -61,7 +60,7 @@ import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.JobConfigurable;
 import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hive.common.util.ReflectionUtil;
 
 /**
  * HiveInputFormat is a parameterized InputFormat which looks at the path name
@@ -156,11 +155,12 @@ public class HiveInputFormat<K extends W
       return inputSplit.getLocations();
     }
 
+ // TODO#: THIS
     @Override
     public void readFields(DataInput in) throws IOException {
       String inputSplitClassName = in.readUTF();
       try {
-        inputSplit = (InputSplit) ReflectionUtils.newInstance(conf
+        inputSplit = (InputSplit) ReflectionUtil.newInstance(conf
             .getClassByName(inputSplitClassName), conf);
       } catch (Exception e) {
         throw new IOException(
@@ -200,7 +200,7 @@ public class HiveInputFormat<K extends W
     InputFormat<WritableComparable, Writable> instance = inputFormats.get(inputFormatClass);
     if (instance == null) {
       try {
-        instance = (InputFormat<WritableComparable, Writable>) ReflectionUtils
+        instance = (InputFormat<WritableComparable, Writable>) ReflectionUtil
             .newInstance(inputFormatClass, job);
         // HBase input formats are not thread safe today. See HIVE-8808.
         String inputFormatName = inputFormatClass.getName().toLowerCase();

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java Thu Apr 16 18:35:39 2015
@@ -72,12 +72,21 @@ public class DbLockManager implements Hi
    * Send a lock request to the metastore.  This is intended for use by
    * {@link DbTxnManager}.
    * @param lock lock request
+   * @param isBlocking if true, will block until locks have been acquired
    * @throws LockException
+   * @return the result of the lock attempt
    */
-  List<HiveLock> lock(LockRequest lock) throws LockException {
+  LockState lock(LockRequest lock, String queryId, boolean isBlocking, List<HiveLock> acquiredLocks) throws LockException {
     try {
-      LOG.debug("Requesting lock");
+      LOG.debug("Requesting: queryId=" + queryId + " " + lock);
       LockResponse res = client.lock(lock);
+      //link lockId to queryId
+      LOG.debug("Response " + res);
+      if(!isBlocking) {
+        if(res.getState() == LockState.WAITING) {
+          return LockState.WAITING;
+        }
+      }
       while (res.getState() == LockState.WAITING) {
         backoff();
         res = client.checkLock(res.getLockid());
@@ -88,9 +97,8 @@ public class DbLockManager implements Hi
       if (res.getState() != LockState.ACQUIRED) {
         throw new LockException(ErrorMsg.LOCK_CANNOT_BE_ACQUIRED.getMsg());
       }
-      List<HiveLock> locks = new ArrayList<HiveLock>(1);
-      locks.add(hl);
-      return locks;
+      acquiredLocks.add(hl);
+      return res.getState();
     } catch (NoSuchTxnException e) {
       LOG.error("Metastore could not find txnid " + lock.getTxnid());
       throw new LockException(ErrorMsg.TXNMGR_NOT_INSTANTIATED.getMsg(), e);
@@ -102,6 +110,20 @@ public class DbLockManager implements Hi
           e);
     }
   }
+  /**
+   * Used to make another attempt to acquire a lock (in Waiting state)
+   * @param extLockId
+   * @return result of the attempt
+   * @throws LockException
+   */
+  LockState checkLock(long extLockId) throws LockException {
+    try {
+      return client.checkLock(extLockId).getState();
+    } catch (TException e) {
+      throw new LockException(ErrorMsg.METASTORE_COMMUNICATION_FAILED.getMsg(),
+        e);
+    }
+  }
 
   @Override
   public void unlock(HiveLock hiveLock) throws LockException {

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java Thu Apr 16 18:35:39 2015
@@ -37,6 +37,7 @@ import org.apache.hadoop.hive.ql.metadat
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.thrift.TException;
 
+import java.util.ArrayList;
 import java.util.List;
 
 /**
@@ -87,6 +88,15 @@ public class DbTxnManager extends HiveTx
 
   @Override
   public void acquireLocks(QueryPlan plan, Context ctx, String username) throws LockException {
+    acquireLocks(plan, ctx, username, true);
+  }
+
+  /**
+   * This is for testing only.  Normally client should call {@link #acquireLocks(org.apache.hadoop.hive.ql.QueryPlan, org.apache.hadoop.hive.ql.Context, String)}
+   * @param isBlocking if false, the method will return immediately; thus the locks may be in LockState.WAITING
+   * @return null if no locks were needed
+   */
+  LockState acquireLocks(QueryPlan plan, Context ctx, String username, boolean isBlocking) throws LockException {
     init();
         // Make sure we've built the lock manager
     getLockManager();
@@ -94,7 +104,8 @@ public class DbTxnManager extends HiveTx
     boolean atLeastOneLock = false;
 
     LockRequestBuilder rqstBuilder = new LockRequestBuilder();
-    LOG.debug("Setting lock request transaction to " + txnId);
+    //link queryId to txnId
+    LOG.debug("Setting lock request transaction to " + txnId + " for queryId=" + plan.getQueryId());
     rqstBuilder.setTransactionId(txnId)
         .setUser(username);
 
@@ -206,10 +217,15 @@ public class DbTxnManager extends HiveTx
 
     // Make sure we need locks.  It's possible there's nothing to lock in
     // this operation.
-    if (!atLeastOneLock) return;
+    if (!atLeastOneLock) {
+      LOG.debug("No locks needed for queryId" + plan.getQueryId());
+      return null;
+    }
 
-    List<HiveLock> locks = lockMgr.lock(rqstBuilder.build());
+    List<HiveLock> locks = new ArrayList<HiveLock>(1); 
+    LockState lockState = lockMgr.lock(rqstBuilder.build(), plan.getQueryId(), isBlocking, locks);
     ctx.setHiveLocks(locks);
+    return lockState;
   }
 
   @Override

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Thu Apr 16 18:35:39 2015
@@ -101,6 +101,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.FunctionTask;
+import org.apache.hadoop.hive.ql.exec.FunctionUtils;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.index.HiveIndexHandler;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
@@ -174,8 +175,9 @@ public class Hive {
       for (String functionName : db.getFunctions(dbName, "*")) {
         Function function = db.getFunction(dbName, functionName);
         try {
-          FunctionRegistry.registerPermanentFunction(functionName, function.getClassName(), false,
-              FunctionTask.toFunctionResource(function.getResourceUris()));
+	  FunctionRegistry.registerPermanentFunction(
+	      FunctionUtils.qualifyFunctionName(functionName, dbName), function.getClassName(),
+	      false, FunctionTask.toFunctionResource(function.getResourceUris()));
         } catch (Exception e) {
           LOG.warn("Failed to register persistent function " +
               functionName + ":" + function.getClassName() + ". Ignore and continue.");

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java Thu Apr 16 18:35:39 2015
@@ -488,9 +488,8 @@ public final class ConstantPropagateProc
             ObjectInspectorUtils.getConstantObjectInspector(constant.getWritableObjectInspector(),
                 writableValue);
       } else if (desc instanceof ExprNodeNullDesc) {
-
-        // FIXME: add null support.
-        return null;
+         argois[i] = desc.getWritableObjectInspector();
+         arguments[i] = new DeferredJavaObject(((ExprNodeNullDesc) desc).getValue());
       } else if (desc instanceof ExprNodeGenericFuncDesc) {
         ExprNodeDesc evaluatedFn = foldExpr((ExprNodeGenericFuncDesc)desc);
         if (null == evaluatedFn || !(evaluatedFn instanceof ExprNodeConstantDesc)) {
@@ -511,6 +510,10 @@ public final class ConstantPropagateProc
       Object o = udf.evaluate(arguments);
       LOG.debug(udf.getClass().getName() + "(" + exprs + ")=" + o);
       if (o == null) {
+        if (oi instanceof PrimitiveObjectInspector) {
+
+          return new ExprNodeConstantDesc(((PrimitiveObjectInspector) oi).getTypeInfo(), o);
+        }
         return new ExprNodeNullDesc();
       }
       Class<?> clz = o.getClass();
@@ -604,6 +607,10 @@ public final class ConstantPropagateProc
           LOG.warn("Filter expression " + condn + " holds false!");
         }
       }
+      if (newCondn instanceof ExprNodeNullDesc || (newCondn instanceof ExprNodeConstantDesc && ((ExprNodeConstantDesc)newCondn).getValue() == null)) {
+        // where null is same as where false
+        newCondn = new ExprNodeConstantDesc(Boolean.FALSE);
+      }
       LOG.debug("New filter FIL[" + op.getIdentifier() + "] conditions:" + newCondn.getExprString());
 
       // merge it with the downstream col list

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java Thu Apr 16 18:35:39 2015
@@ -365,6 +365,17 @@ public class Vectorizer implements Physi
       addMapWorkRules(opRules, vnp);
       Dispatcher disp = new DefaultRuleDispatcher(vnp, opRules, null);
       GraphWalker ogw = new DefaultGraphWalker(disp);
+      if ((mapWork.getAliasToWork() == null) || (mapWork.getAliasToWork().size() == 0)) {
+        return false;
+      } else {
+        for (Operator<?> op : mapWork.getAliasToWork().values()) {
+          if (op == null) {
+            LOG.warn("Map work has invalid aliases to work with. Fail validation!");
+            return false;
+          }
+        }
+      }
+
       // iterator the mapper operator tree
       ArrayList<Node> topNodes = new ArrayList<Node>();
       topNodes.addAll(mapWork.getAliasToWork().values());

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java Thu Apr 16 18:35:39 2015
@@ -352,7 +352,7 @@ public class StatsRulesProcFactory {
 
         // special case for handling false constants
         ExprNodeConstantDesc encd = (ExprNodeConstantDesc) pred;
-        if (encd.getValue().equals(false)) {
+        if (Boolean.FALSE.equals(encd.getValue())) {
           return 0;
         } else {
           return stats.getNumRows();
@@ -383,7 +383,7 @@ public class StatsRulesProcFactory {
             return numRows - newNumRows;
           } else if (leaf instanceof ExprNodeConstantDesc) {
             ExprNodeConstantDesc encd = (ExprNodeConstantDesc) leaf;
-            if (encd.getValue().equals(true)) {
+            if (Boolean.TRUE.equals(encd.getValue())) {
               return 0;
             } else {
               return numRows;

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java?rev=1674132&r1=1674131&r2=1674132&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java Thu Apr 16 18:35:39 2015
@@ -334,16 +334,8 @@ public class MapWork extends BaseWork {
   public Set<Operator<?>> getAllRootOperators() {
     Set<Operator<?>> opSet = new LinkedHashSet<Operator<?>>();
 
-    Map<String, ArrayList<String>> pa = getPathToAliases();
-    if (pa != null) {
-      for (List<String> ls : pa.values()) {
-        for (String a : ls) {
-          Operator<?> op = getAliasToWork().get(a);
-          if (op != null ) {
-            opSet.add(op);
-          }
-        }
-      }
+    for (Operator<?> op : getAliasToWork().values()) {
+      opSet.add(op);
     }
     return opSet;
   }



Mime
View raw message