hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ser...@apache.org
Subject svn commit: r1669718 [2/29] - in /hive/branches/llap: ./ ant/src/org/apache/hadoop/hive/ant/ common/src/java/org/apache/hadoop/hive/common/ common/src/java/org/apache/hadoop/hive/common/type/ common/src/java/org/apache/hadoop/hive/conf/ common/src/java...
Date Sat, 28 Mar 2015 00:22:27 GMT
Modified: hive/branches/llap/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/branches/llap/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Sat Mar 28 00:22:15 2015
@@ -710,6 +710,11 @@ public class HiveConf extends Configurat
     HIVEMAPJOINUSEOPTIMIZEDTABLE("hive.mapjoin.optimized.hashtable", true,
         "Whether Hive should use memory-optimized hash table for MapJoin. Only works on Tez,\n" +
         "because memory-optimized hashtable cannot be serialized."),
+    HIVEUSEHYBRIDGRACEHASHJOIN("hive.mapjoin.hybridgrace.hashtable", false, "Whether to use hybrid" +
+        "grace hash join as the join method for mapjoin."),
+    HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ("hive.mapjoin.hybridgrace.memcheckfrequency", 1024, "For " +
+        "hybrid grace hash join, how often (how many rows apart) we check if memory is full. " +
+        "This number should be power of 2."),
     HIVEHASHTABLEWBSIZE("hive.mapjoin.optimized.hashtable.wbsize", 10 * 1024 * 1024,
         "Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to\n" +
         "store data. This is one buffer size. HT may be slightly faster if this is larger, but for small\n" +
@@ -2073,8 +2078,11 @@ public class HiveConf extends Configurat
     SPARK_RPC_CHANNEL_LOG_LEVEL("hive.spark.client.channel.log.level", null,
       "Channel logging level for remote Spark driver.  One of {DEBUG, ERROR, INFO, TRACE, WARN}."),
     SPARK_RPC_SASL_MECHANISM("hive.spark.client.rpc.sasl.mechanisms", "DIGEST-MD5",
-	"Name of the SASL mechanism to use for authentication.")
-    ;
+      "Name of the SASL mechanism to use for authentication."),
+    NWAYJOINREORDER("hive.reorder.nway.joins", true,
+      "Runs reordering of tables within single n-way join (i.e.: picks streamtable)"),
+    HIVE_LOG_N_RECORDS("hive.log.every.n.records", 0L, new RangeValidator(0L, null),
+      "If value is greater than 0 logs in fixed intervals of size n rather than exponentially.");
 
     public final String varname;
     private final String defaultExpr;

Modified: hive/branches/llap/common/src/java/org/apache/hive/common/util/DateUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/common/src/java/org/apache/hive/common/util/DateUtils.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/common/src/java/org/apache/hive/common/util/DateUtils.java (original)
+++ hive/branches/llap/common/src/java/org/apache/hive/common/util/DateUtils.java Sat Mar 28 00:22:15 2015
@@ -18,8 +18,11 @@
 
 package org.apache.hive.common.util;
 
+import java.math.BigDecimal;
 import java.text.SimpleDateFormat;
 
+import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
+
 /**
  * DateUtils. Thread-safe class
  *
@@ -36,4 +39,31 @@ public class DateUtils {
   public static SimpleDateFormat getDateFormat() {
     return dateFormatLocal.get();
   }
-}
\ No newline at end of file
+
+  public static final int NANOS_PER_SEC = 1000000000;
+  public static final BigDecimal MAX_INT_BD = new BigDecimal(Integer.MAX_VALUE);
+  public static final BigDecimal NANOS_PER_SEC_BD = new BigDecimal(NANOS_PER_SEC);
+
+  public static int parseNumericValueWithRange(String fieldName,
+      String strVal, int minValue, int maxValue) throws IllegalArgumentException {
+    int result = 0;
+    if (strVal != null) {
+      result = Integer.parseInt(strVal);
+      if (result < minValue || result > maxValue) {
+        throw new IllegalArgumentException(String.format("%s value %d outside range [%d, %d]",
+            fieldName, result, minValue, maxValue));
+      }
+    }
+    return result;
+  }
+
+  public static long getIntervalDayTimeTotalNanos(HiveIntervalDayTime intervalDayTime) {
+    return intervalDayTime.getTotalSeconds() * NANOS_PER_SEC + intervalDayTime.getNanos();
+  }
+
+  public static void setIntervalDayTimeTotalNanos(HiveIntervalDayTime intervalDayTime,
+      long totalNanos) {
+    intervalDayTime.set(totalNanos / NANOS_PER_SEC, (int) (totalNanos % NANOS_PER_SEC));
+  }
+}
+

Added: hive/branches/llap/common/src/test/org/apache/hadoop/hive/common/type/TestHiveIntervalDayTime.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/common/src/test/org/apache/hadoop/hive/common/type/TestHiveIntervalDayTime.java?rev=1669718&view=auto
==============================================================================
--- hive/branches/llap/common/src/test/org/apache/hadoop/hive/common/type/TestHiveIntervalDayTime.java (added)
+++ hive/branches/llap/common/src/test/org/apache/hadoop/hive/common/type/TestHiveIntervalDayTime.java Sat Mar 28 00:22:15 2015
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.common.type;
+
+import org.junit.*;
+
+import static org.junit.Assert.*;
+import com.google.code.tempusfugit.concurrency.annotations.*;
+import com.google.code.tempusfugit.concurrency.*;
+
+public class TestHiveIntervalDayTime {
+
+  @Rule public ConcurrentRule concurrentRule = new ConcurrentRule();
+  @Rule public RepeatingRule repeatingRule = new RepeatingRule();
+
+  @Test
+  @Concurrent(count=4)
+  @Repeating(repetition=100)
+  public void testGetters() throws Exception {
+    HiveIntervalDayTime i1 = new HiveIntervalDayTime(3, 4, 5, 6, 7);
+
+    assertEquals(3, i1.getDays());
+    assertEquals(4, i1.getHours());
+    assertEquals(5, i1.getMinutes());
+    assertEquals(6, i1.getSeconds());
+    assertEquals(7, i1.getNanos());
+  }
+
+  @Test
+  @Concurrent(count=4)
+  @Repeating(repetition=100)
+  public void testCompare() throws Exception {
+    HiveIntervalDayTime i1 = new HiveIntervalDayTime(3, 4, 5, 6, 7);
+    HiveIntervalDayTime i2 = new HiveIntervalDayTime(3, 4, 5, 6, 7);
+    HiveIntervalDayTime i3 = new HiveIntervalDayTime(3, 4, 8, 9, 10);
+    HiveIntervalDayTime i4 = new HiveIntervalDayTime(3, 4, 8, 9, 5);
+
+    // compareTo()
+    assertEquals(i1 + " compareTo " + i1, 0, i1.compareTo(i1));
+    assertEquals(i1 + " compareTo " + i2, 0, i1.compareTo(i2));
+    assertEquals(i2 + " compareTo " + i1, 0, i2.compareTo(i1));
+    assertEquals(i3 + " compareTo " + i3, 0, i3.compareTo(i3));
+
+    assertTrue(i1 + " compareTo " + i3, 0 > i1.compareTo(i3));
+    assertTrue(i3 + " compareTo " + i1, 0 < i3.compareTo(i1));
+
+    // equals()
+    assertTrue(i1 + " equals " + i1, i1.equals(i1));
+    assertTrue(i1 + " equals " + i2, i1.equals(i2));
+    assertFalse(i1 + " equals " + i3, i1.equals(i3));
+    assertFalse(i3 + " equals " + i1, i3.equals(i1));
+    assertFalse(i3 + " equals " + i4, i3.equals(i4));
+
+    // hashCode()
+    assertEquals(i1 + " hashCode " + i1, i1.hashCode(), i1.hashCode());
+    assertEquals(i1 + " hashCode " + i1, i1.hashCode(), i2.hashCode());
+  }
+
+  @Test
+  @Concurrent(count=4)
+  @Repeating(repetition=100)
+  public void testValueOf() throws Exception {
+    HiveIntervalDayTime i1 = HiveIntervalDayTime.valueOf("3 04:05:06.123456");
+    assertEquals(3, i1.getDays());
+    assertEquals(4, i1.getHours());
+    assertEquals(5, i1.getMinutes());
+    assertEquals(6, i1.getSeconds());
+    assertEquals(123456000, i1.getNanos());
+
+    HiveIntervalDayTime i2 = HiveIntervalDayTime.valueOf("+3 04:05:06");
+    assertEquals(3, i2.getDays());
+    assertEquals(4, i2.getHours());
+    assertEquals(5, i2.getMinutes());
+    assertEquals(6, i2.getSeconds());
+    assertEquals(0, i2.getNanos());
+
+    HiveIntervalDayTime i3 = HiveIntervalDayTime.valueOf("-12 13:14:15.987654321");
+    assertEquals(-12, i3.getDays());
+    assertEquals(-13, i3.getHours());
+    assertEquals(-14, i3.getMinutes());
+    assertEquals(-15, i3.getSeconds());
+    assertEquals(-987654321, i3.getNanos());
+
+    HiveIntervalDayTime i4 = HiveIntervalDayTime.valueOf("-0 0:0:0.000000012");
+    assertEquals(0, i4.getDays());
+    assertEquals(0, i4.getHours());
+    assertEquals(0, i4.getMinutes());
+    assertEquals(0, i4.getSeconds());
+    assertEquals(-12, i4.getNanos());
+
+    // Invalid values
+    String[] invalidValues = {
+      null,
+      "abc",
+      "0-11",
+      "0 60:0:0",
+      "0 0:60:0"
+    };
+    for (String invalidValue : invalidValues) {
+      boolean caughtException = false;
+      try {
+        HiveIntervalDayTime.valueOf(invalidValue);
+        fail("Expected exception");
+      } catch (IllegalArgumentException err) {
+        caughtException = true;
+      }
+      assertTrue("Expected exception", caughtException);
+    }
+  }
+
+  @Test
+  @Concurrent(count=4)
+  @Repeating(repetition=100)
+  public void testToString() throws Exception {
+    assertEquals("0 00:00:00.000000000", HiveIntervalDayTime.valueOf("0 00:00:00").toString());
+    assertEquals("3 04:05:06.123456000", HiveIntervalDayTime.valueOf("3 04:05:06.123456").toString());
+    assertEquals("-3 04:05:06.123456000", HiveIntervalDayTime.valueOf("-3 04:05:06.123456").toString());
+    assertEquals("1 00:00:00.000000000", HiveIntervalDayTime.valueOf("1 00:00:00").toString());
+    assertEquals("-1 00:00:00.000000000", HiveIntervalDayTime.valueOf("-1 00:00:00").toString());
+    assertEquals("0 00:00:00.880000000", HiveIntervalDayTime.valueOf("0 00:00:00.88").toString());
+    assertEquals("-0 00:00:00.880000000", HiveIntervalDayTime.valueOf("-0 00:00:00.88").toString());
+
+    // Mixed sign cases
+    assertEquals("-3 04:05:06.000000007",
+        new HiveIntervalDayTime(-3, -4, -5, -6, -7).toString());
+    assertEquals("3 04:05:06.000000007",
+        new HiveIntervalDayTime(3, 4, 5, 6, 7).toString());
+  }
+
+  @Test
+  @Concurrent(count=4)
+  @Repeating(repetition=100)
+  public void testNormalize() throws Exception {
+    HiveIntervalDayTime i1 = new HiveIntervalDayTime(50, 48, 3, 5400, 2000000123);
+    assertEquals(HiveIntervalDayTime.valueOf("52 1:33:2.000000123"), i1);
+    assertEquals(52, i1.getDays());
+    assertEquals(1, i1.getHours());
+    assertEquals(33, i1.getMinutes());
+    assertEquals(2, i1.getSeconds());
+    assertEquals(123, i1.getNanos());
+
+    assertEquals(HiveIntervalDayTime.valueOf("0 0:0:0"),
+        new HiveIntervalDayTime(0, 0, 0, 0, 0));
+    assertEquals(HiveIntervalDayTime.valueOf("0 0:0:0"),
+        new HiveIntervalDayTime(2, -48, 0, 1, -1000000000));
+    assertEquals(HiveIntervalDayTime.valueOf("0 0:0:0"),
+        new HiveIntervalDayTime(-2, 48, 0, -1, 1000000000));
+    assertEquals(HiveIntervalDayTime.valueOf("1 0:0:0"),
+        new HiveIntervalDayTime(-1, 48, 0, 0, 0));
+    assertEquals(HiveIntervalDayTime.valueOf("-1 0:0:0"),
+        new HiveIntervalDayTime(1, -48, 0, 0, 0));
+    assertEquals(HiveIntervalDayTime.valueOf("0 23:59:59.999999999"),
+        new HiveIntervalDayTime(1, 0, 0, 0, -1));
+    assertEquals(HiveIntervalDayTime.valueOf("-0 23:59:59.999999999"),
+        new HiveIntervalDayTime(-1, 0, 0, 0, 1));
+
+    // -1 day 10 hrs 11 mins 172800 secs = -1 day 10 hrs 11 mins + 2 days = 1 day 10 hrs 11 mins
+    assertEquals(HiveIntervalDayTime.valueOf("1 10:11:0"),
+        new HiveIntervalDayTime(-1, 10, 11, 172800, 0));
+
+    i1 = new HiveIntervalDayTime(480, 480, 0, 5400, 2000000123);
+    assertEquals(500, i1.getDays());
+    assertEquals(1, i1.getHours());
+    assertEquals(30, i1.getMinutes());
+    assertEquals(2, i1.getSeconds());
+    assertEquals(123, i1.getNanos());
+  }
+}

Added: hive/branches/llap/common/src/test/org/apache/hadoop/hive/common/type/TestHiveIntervalYearMonth.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/common/src/test/org/apache/hadoop/hive/common/type/TestHiveIntervalYearMonth.java?rev=1669718&view=auto
==============================================================================
--- hive/branches/llap/common/src/test/org/apache/hadoop/hive/common/type/TestHiveIntervalYearMonth.java (added)
+++ hive/branches/llap/common/src/test/org/apache/hadoop/hive/common/type/TestHiveIntervalYearMonth.java Sat Mar 28 00:22:15 2015
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.common.type;
+
+import org.junit.*;
+import static org.junit.Assert.*;
+import com.google.code.tempusfugit.concurrency.annotations.*;
+import com.google.code.tempusfugit.concurrency.*;
+
+public class TestHiveIntervalYearMonth {
+
+  @Rule public ConcurrentRule concurrentRule = new ConcurrentRule();
+  @Rule public RepeatingRule repeatingRule = new RepeatingRule();
+
+  @Test
+  @Concurrent(count=4)
+  @Repeating(repetition=100)
+  public void testGetters() throws Exception {
+    HiveIntervalYearMonth i1 = new HiveIntervalYearMonth(1, 2);
+    assertEquals(1, i1.getYears());
+    assertEquals(2, i1.getMonths());
+  }
+
+  @Test
+  @Concurrent(count=4)
+  @Repeating(repetition=100)
+  public void testCompare() throws Exception {
+    HiveIntervalYearMonth i1 = new HiveIntervalYearMonth(1, 2);
+    HiveIntervalYearMonth i2 = new HiveIntervalYearMonth(1, 2);
+    HiveIntervalYearMonth i3 = new HiveIntervalYearMonth(1, 3);
+
+    // compareTo()
+    assertEquals(i1 + " compareTo " + i1, 0, i1.compareTo(i1));
+    assertEquals(i1 + " compareTo " + i2, 0, i1.compareTo(i2));
+    assertEquals(i2 + " compareTo " + i1, 0, i2.compareTo(i1));
+    assertEquals(i3 + " compareTo " + i3, 0, i3.compareTo(i3));
+
+    assertTrue(i1 + " compareTo " + i3, 0 > i1.compareTo(i3));
+    assertTrue(i3 + " compareTo " + i1, 0 < i3.compareTo(i1));
+
+    // equals()
+    assertTrue(i1 + " equals " + i1, i1.equals(i1));
+    assertTrue(i1 + " equals " + i2, i1.equals(i2));
+    assertFalse(i1 + " equals " + i3, i1.equals(i3));
+    assertFalse(i3 + " equals " + i1, i3.equals(i1));
+
+    // hashCode()
+    assertEquals(i1 + " hashCode " + i1, i1.hashCode(), i1.hashCode());
+    assertEquals(i1 + " hashCode " + i1, i1.hashCode(), i2.hashCode());
+  }
+
+  @Test
+  @Concurrent(count=4)
+  @Repeating(repetition=100)
+  public void testValueOf() throws Exception {
+    HiveIntervalYearMonth i1 = HiveIntervalYearMonth.valueOf("1-2");
+    assertEquals(1, i1.getYears());
+    assertEquals(2, i1.getMonths());
+
+    HiveIntervalYearMonth i2 = HiveIntervalYearMonth.valueOf("+8-9");
+    assertEquals(8, i2.getYears());
+    assertEquals(9, i2.getMonths());
+
+    HiveIntervalYearMonth i3 = HiveIntervalYearMonth.valueOf("-10-11");
+    assertEquals(-10, i3.getYears());
+    assertEquals(-11, i3.getMonths());
+
+    HiveIntervalYearMonth i4 = HiveIntervalYearMonth.valueOf("-0-0");
+    assertEquals(0, i4.getYears());
+    assertEquals(0, i4.getMonths());
+
+    // Invalid values
+    String[] invalidValues = {
+      null,
+      "abc",
+      "0-12",
+      "0 1:2:3"
+    };
+    for (String invalidValue : invalidValues) {
+      boolean caughtException = false;
+      try {
+        HiveIntervalYearMonth.valueOf(invalidValue);
+        fail("Expected exception");
+      } catch (IllegalArgumentException err) {
+        caughtException = true;
+      }
+      assertTrue("Expected exception", caughtException);
+    }
+  }
+
+  @Test
+  @Concurrent(count=4)
+  @Repeating(repetition=100)
+  public void testToString() throws Exception {
+    assertEquals("0-0", HiveIntervalYearMonth.valueOf("0-0").toString());
+    assertEquals("1-2", HiveIntervalYearMonth.valueOf("1-2").toString());
+    assertEquals("-1-2", HiveIntervalYearMonth.valueOf("-1-2").toString());
+    assertEquals("1-0", HiveIntervalYearMonth.valueOf("1-0").toString());
+    assertEquals("-1-0", HiveIntervalYearMonth.valueOf("-1-0").toString());
+    assertEquals("0-0", HiveIntervalYearMonth.valueOf("-0-0").toString());
+  }
+
+  @Test
+  @Concurrent(count=4)
+  @Repeating(repetition=100)
+  public void testNormalize() throws Exception {
+    HiveIntervalYearMonth i1 = new HiveIntervalYearMonth(1, -6);
+    assertEquals(HiveIntervalYearMonth.valueOf("0-6"), i1);
+    assertEquals(0, i1.getYears());
+    assertEquals(6, i1.getMonths());
+
+    assertEquals(HiveIntervalYearMonth.valueOf("0-0"), new HiveIntervalYearMonth(0, 0));
+    assertEquals(HiveIntervalYearMonth.valueOf("0-0"), new HiveIntervalYearMonth(-1, 12));
+    assertEquals(HiveIntervalYearMonth.valueOf("0-4"), new HiveIntervalYearMonth(-1, 16));
+    assertEquals(HiveIntervalYearMonth.valueOf("0-11"), new HiveIntervalYearMonth(1, -1));
+    assertEquals(HiveIntervalYearMonth.valueOf("-0-11"), new HiveIntervalYearMonth(-1, 1));
+
+    // -5 years + 121 months = -5 years + 10 years + 1 month = 5 years 1 month
+    assertEquals(HiveIntervalYearMonth.valueOf("5-1"), new HiveIntervalYearMonth(-5, 121));
+  }
+}

Modified: hive/branches/llap/dev-support/jenkins-common.sh
URL: http://svn.apache.org/viewvc/hive/branches/llap/dev-support/jenkins-common.sh?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/dev-support/jenkins-common.sh (original)
+++ hive/branches/llap/dev-support/jenkins-common.sh Sat Mar 28 00:22:15 2015
@@ -91,3 +91,9 @@ process_jira() {
   fi
   export BUILD_OPTS=$build_opts
 }
+
+# Checks if a specified URL patch contains HMS upgrade changes
+# Returns 0 if there are changes; non-zero value otherwise.
+patch_contains_hms_upgrade() {
+	curl -s "$1" | grep "^diff.*metastore/scripts/upgrade/" >/dev/null
+}
\ No newline at end of file

Modified: hive/branches/llap/dev-support/jenkins-execute-hms-test.sh
URL: http://svn.apache.org/viewvc/hive/branches/llap/dev-support/jenkins-execute-hms-test.sh?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/dev-support/jenkins-execute-hms-test.sh (original)
+++ hive/branches/llap/dev-support/jenkins-execute-hms-test.sh Sat Mar 28 00:22:15 2015
@@ -163,10 +163,6 @@ create_publish_file() {
 	echo $json_file
 }
 
-patch_contains_hms_upgrade() {
-	curl -s "$1" | grep "^diff.*metastore/scripts/upgrade/" >/dev/null
-}
-
 if patch_contains_hms_upgrade "$PATCH_URL"; then
 	ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i $SSH_KEY $SSH_HOST "
 		rm -rf metastore/ &&
@@ -184,6 +180,10 @@ if patch_contains_hms_upgrade "$PATCH_UR
 		do
 			if echo $line | grep 'Test failed' > /dev/null; then
 				FAILED_TESTS+=("$line")
+			elif echo $line | grep 'Executing sql test' >/dev/null; then
+				# Remove 'Executing sql test' line from MESSAGES log to avoid a verbose
+				# comment on JIRA
+				continue
 			fi
 
 			MESSAGES+=("$line")

Modified: hive/branches/llap/dev-support/jenkins-submit-build.sh
URL: http://svn.apache.org/viewvc/hive/branches/llap/dev-support/jenkins-submit-build.sh?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/dev-support/jenkins-submit-build.sh (original)
+++ hive/branches/llap/dev-support/jenkins-submit-build.sh Sat Mar 28 00:22:15 2015
@@ -27,23 +27,28 @@ case "$BUILD_PROFILE" in
   trunk-mr1|trunk-mr2)
    test -n "$TRUNK_URL" || fail "TRUNK_URL must be specified"
    url="$TRUNK_URL&ISSUE_NUM=$ISSUE_NUM"
-   curl -v -i "$url"
-   exit 0
   ;;
   spark-mr2)
    test -n "$SPARK_URL" || fail "SPARK_URL must be specified"
    url="$SPARK_URL&ISSUE_NUM=$ISSUE_NUM"
-   curl -v -i "$url"
-   exit 0
   ;;
   encryption-mr2)
    test -n "$ENCRYPTION_URL" || fail "ENCRYPTION_URL must be specified"
    url="$ENCRYPTION_URL&ISSUE_NUM=$ISSUE_NUM"
-   curl -v -i "$url"
-   exit 0
   ;;
   *)
   echo "Unknown profile '$BUILD_PROFILE'"
   exit 1
   ;;
 esac
+
+# Execute jenkins job for HMS upgrade tests if needed
+if patch_contains_hms_upgrade "${JIRA_ROOT_URL}$PATCH_URL"; then
+  test -n "$HMS_UPGRADE_URL" || fail "HMS_UPGRADE_URL must be specified"
+  echo "Calling HMS upgrade testing job..."
+  curl -v -i "${HMS_UPGRADE_URL}&ISSUE_NUM=${ISSUE_NUM}&BRANCH=${BRANCH}"
+fi
+
+# Execute jenkins job for specific profile
+echo "Calling Precommit $BRANCH Build..."
+curl -v -i "$url"

Modified: hive/branches/llap/hbase-handler/src/test/queries/positive/hbase_timestamp.q
URL: http://svn.apache.org/viewvc/hive/branches/llap/hbase-handler/src/test/queries/positive/hbase_timestamp.q?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/hbase-handler/src/test/queries/positive/hbase_timestamp.q (original)
+++ hive/branches/llap/hbase-handler/src/test/queries/positive/hbase_timestamp.q Sat Mar 28 00:22:15 2015
@@ -10,7 +10,7 @@ DROP TABLE hbase_table;
 CREATE TABLE hbase_table (key string, value string, time bigint)
   STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
   WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp");
-FROM src INSERT OVERWRITE TABLE hbase_table SELECT key, value, 1329959754000 WHERE (key % 17) = 0;
+FROM src INSERT OVERWRITE TABLE hbase_table SELECT key, value, 1329959754 WHERE (key % 17) = 0;
 SELECT key, value, cast(time as timestamp) FROM hbase_table;
 
 DROP TABLE hbase_table;
@@ -19,23 +19,23 @@ CREATE TABLE hbase_table (key string, va
   WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp");
 insert overwrite table hbase_table select key,value,ts FROM
 (
-  select key, value, 100000000000 as ts from src WHERE (key % 33) = 0
+  select key, value, 100000000 as ts from src WHERE (key % 33) = 0
   UNION ALL
-  select key, value, 200000000000 as ts from src WHERE (key % 37) = 0
+  select key, value, 200000000 as ts from src WHERE (key % 37) = 0
 ) T;
 
 explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000000;
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000000;
+SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000;
+SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000;
 
 explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000000;
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000000;
+SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000;
+SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000;
 
 explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000000;
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000000;
+SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000;
+SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000;
 
 explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000000;
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000000;
+SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000;
+SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000;

Modified: hive/branches/llap/hbase-handler/src/test/results/positive/hbase_timestamp.q.out
URL: http://svn.apache.org/viewvc/hive/branches/llap/hbase-handler/src/test/results/positive/hbase_timestamp.q.out?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/hbase-handler/src/test/results/positive/hbase_timestamp.q.out (original)
+++ hive/branches/llap/hbase-handler/src/test/results/positive/hbase_timestamp.q.out Sat Mar 28 00:22:15 2015
@@ -81,11 +81,11 @@ POSTHOOK: query: CREATE TABLE hbase_tabl
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table
-PREHOOK: query: FROM src INSERT OVERWRITE TABLE hbase_table SELECT key, value, 1329959754000 WHERE (key % 17) = 0
+PREHOOK: query: FROM src INSERT OVERWRITE TABLE hbase_table SELECT key, value, 1329959754 WHERE (key % 17) = 0
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@hbase_table
-POSTHOOK: query: FROM src INSERT OVERWRITE TABLE hbase_table SELECT key, value, 1329959754000 WHERE (key % 17) = 0
+POSTHOOK: query: FROM src INSERT OVERWRITE TABLE hbase_table SELECT key, value, 1329959754 WHERE (key % 17) = 0
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@hbase_table
@@ -139,27 +139,27 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table
 PREHOOK: query: insert overwrite table hbase_table select key,value,ts FROM
 (
-  select key, value, 100000000000 as ts from src WHERE (key % 33) = 0
+  select key, value, 100000000 as ts from src WHERE (key % 33) = 0
   UNION ALL
-  select key, value, 200000000000 as ts from src WHERE (key % 37) = 0
+  select key, value, 200000000 as ts from src WHERE (key % 37) = 0
 ) T
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@hbase_table
 POSTHOOK: query: insert overwrite table hbase_table select key,value,ts FROM
 (
-  select key, value, 100000000000 as ts from src WHERE (key % 33) = 0
+  select key, value, 100000000 as ts from src WHERE (key % 33) = 0
   UNION ALL
-  select key, value, 200000000000 as ts from src WHERE (key % 37) = 0
+  select key, value, 200000000 as ts from src WHERE (key % 37) = 0
 ) T
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@hbase_table
 PREHOOK: query: explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000000
+SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000
 PREHOOK: type: QUERY
 POSTHOOK: query: explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000000
+SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -171,10 +171,10 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: hbase_table
-            filterExpr: (((key > 100.0) and (key < 400.0)) and (time < 200000000000)) (type: boolean)
+            filterExpr: (((key > 100.0) and (key < 400.0)) and (time < 200000000)) (type: boolean)
             Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 100.0) and ((UDFToDouble(key) < 400.0) and (time < 200000000000))) (type: boolean)
+              predicate: ((UDFToDouble(key) > 100.0) and ((UDFToDouble(key) < 400.0) and (time < 200000000))) (type: boolean)
               Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string), CAST( time AS TIMESTAMP) (type: timestamp)
@@ -194,21 +194,21 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000000
+PREHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000
 PREHOOK: type: QUERY
 PREHOOK: Input: default@hbase_table
 #### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000000
+POSTHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time < 200000000
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@hbase_table
 #### A masked pattern was here ####
 165	val_165	1973-03-03 01:46:40
 396	val_396	1973-03-03 01:46:40
 PREHOOK: query: explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000000
+SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000
 PREHOOK: type: QUERY
 POSTHOOK: query: explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000000
+SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -220,10 +220,10 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: hbase_table
-            filterExpr: (((key > 100.0) and (key < 400.0)) and (time > 100000000000)) (type: boolean)
+            filterExpr: (((key > 100.0) and (key < 400.0)) and (time > 100000000)) (type: boolean)
             Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 100.0) and ((UDFToDouble(key) < 400.0) and (time > 100000000000))) (type: boolean)
+              predicate: ((UDFToDouble(key) > 100.0) and ((UDFToDouble(key) < 400.0) and (time > 100000000))) (type: boolean)
               Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string), CAST( time AS TIMESTAMP) (type: timestamp)
@@ -243,11 +243,11 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000000
+PREHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000
 PREHOOK: type: QUERY
 PREHOOK: Input: default@hbase_table
 #### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000000
+POSTHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time > 100000000
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@hbase_table
 #### A masked pattern was here ####
@@ -256,10 +256,10 @@ POSTHOOK: Input: default@hbase_table
 296	val_296	1976-05-03 12:33:20
 333	val_333	1976-05-03 12:33:20
 PREHOOK: query: explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000000
+SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000
 PREHOOK: type: QUERY
 POSTHOOK: query: explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000000
+SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -271,10 +271,10 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: hbase_table
-            filterExpr: (((key > 100.0) and (key < 400.0)) and (time <= 100000000000)) (type: boolean)
+            filterExpr: (((key > 100.0) and (key < 400.0)) and (time <= 100000000)) (type: boolean)
             Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 100.0) and ((UDFToDouble(key) < 400.0) and (time <= 100000000000))) (type: boolean)
+              predicate: ((UDFToDouble(key) > 100.0) and ((UDFToDouble(key) < 400.0) and (time <= 100000000))) (type: boolean)
               Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string), CAST( time AS TIMESTAMP) (type: timestamp)
@@ -294,21 +294,21 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000000
+PREHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000
 PREHOOK: type: QUERY
 PREHOOK: Input: default@hbase_table
 #### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000000
+POSTHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time <= 100000000
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@hbase_table
 #### A masked pattern was here ####
 165	val_165	1973-03-03 01:46:40
 396	val_396	1973-03-03 01:46:40
 PREHOOK: query: explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000000
+SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000
 PREHOOK: type: QUERY
 POSTHOOK: query: explain
-SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000000
+SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -320,10 +320,10 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: hbase_table
-            filterExpr: (((key > 100.0) and (key < 400.0)) and (time >= 200000000000)) (type: boolean)
+            filterExpr: (((key > 100.0) and (key < 400.0)) and (time >= 200000000)) (type: boolean)
             Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 100.0) and ((UDFToDouble(key) < 400.0) and (time >= 200000000000))) (type: boolean)
+              predicate: ((UDFToDouble(key) > 100.0) and ((UDFToDouble(key) < 400.0) and (time >= 200000000))) (type: boolean)
               Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string), CAST( time AS TIMESTAMP) (type: timestamp)
@@ -343,11 +343,11 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000000
+PREHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000
 PREHOOK: type: QUERY
 PREHOOK: Input: default@hbase_table
 #### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000000
+POSTHOOK: query: SELECT key, value, cast(time as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND time >= 200000000
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@hbase_table
 #### A masked pattern was here ####

Modified: hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java (original)
+++ hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java Sat Mar 28 00:22:15 2015
@@ -10,7 +10,6 @@ import org.apache.hadoop.hive.common.Val
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.MetaStoreThread;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.CompactionRequest;
 import org.apache.hadoop.hive.metastore.api.CompactionType;
@@ -55,6 +54,8 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
+import java.util.SortedSet;
+import java.util.TreeSet;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
@@ -88,6 +89,7 @@ public class TestCompactor {
     hiveConf.setVar(HiveConf.ConfVars.POSTEXECHOOKS, "");
     hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, TEST_WAREHOUSE_DIR);
     hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName());
+    hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
     //"org.apache.hadoop.hive.ql.io.HiveInputFormat"
 
     TxnDbUtil.setConfValues(hiveConf);
@@ -281,6 +283,124 @@ public class TestCompactor {
   }
 
   @Test
+  public void dynamicPartitioningInsert() throws Exception {
+    String tblName = "dpct";
+    List<String> colNames = Arrays.asList("a", "b");
+    executeStatementOnDriver("drop table if exists " + tblName, driver);
+    executeStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING) " +
+      " PARTITIONED BY(ds string)" +
+      " CLUSTERED BY(a) INTO 2 BUCKETS" + //currently ACID requires table to be bucketed
+      " STORED AS ORC TBLPROPERTIES ('transactional'='true')", driver);
+    executeStatementOnDriver("insert into " + tblName + " partition (ds) values (1, 'fred', " +
+        "'today'), (2, 'wilma', 'yesterday')", driver);
+
+    Initiator initiator = new Initiator();
+    initiator.setThreadId((int)initiator.getId());
+    conf.setIntVar(HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_NUM_THRESHOLD, 0);
+    initiator.setHiveConf(conf);
+    AtomicBoolean stop = new AtomicBoolean();
+    stop.set(true);
+    initiator.init(stop, new AtomicBoolean());
+    initiator.run();
+
+    CompactionTxnHandler txnHandler = new CompactionTxnHandler(conf);
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+    Assert.assertEquals(2, compacts.size());
+    SortedSet<String> partNames = new TreeSet<String>();
+    for (int i = 0; i < compacts.size(); i++) {
+      Assert.assertEquals("default", compacts.get(i).getDbname());
+      Assert.assertEquals(tblName, compacts.get(i).getTablename());
+      Assert.assertEquals("initiated", compacts.get(i).getState());
+      partNames.add(compacts.get(i).getPartitionname());
+    }
+    List<String> names = new ArrayList<String>(partNames);
+    Assert.assertEquals("ds=today", names.get(0));
+    Assert.assertEquals("ds=yesterday", names.get(1));
+  }
+
+  @Test
+  public void dynamicPartitioningUpdate() throws Exception {
+    String tblName = "udpct";
+    List<String> colNames = Arrays.asList("a", "b");
+    executeStatementOnDriver("drop table if exists " + tblName, driver);
+    executeStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING) " +
+      " PARTITIONED BY(ds string)" +
+      " CLUSTERED BY(a) INTO 2 BUCKETS" + //currently ACID requires table to be bucketed
+      " STORED AS ORC TBLPROPERTIES ('transactional'='true')", driver);
+    executeStatementOnDriver("insert into " + tblName + " partition (ds) values (1, 'fred', " +
+        "'today'), (2, 'wilma', 'yesterday')", driver);
+
+    executeStatementOnDriver("update " + tblName + " set b = 'barney'", driver);
+
+    Initiator initiator = new Initiator();
+    initiator.setThreadId((int)initiator.getId());
+    // Set to 1 so insert doesn't set it off but update does
+    conf.setIntVar(HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_NUM_THRESHOLD, 1);
+    initiator.setHiveConf(conf);
+    AtomicBoolean stop = new AtomicBoolean();
+    stop.set(true);
+    initiator.init(stop, new AtomicBoolean());
+    initiator.run();
+
+    CompactionTxnHandler txnHandler = new CompactionTxnHandler(conf);
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+    Assert.assertEquals(2, compacts.size());
+    SortedSet<String> partNames = new TreeSet<String>();
+    for (int i = 0; i < compacts.size(); i++) {
+      Assert.assertEquals("default", compacts.get(i).getDbname());
+      Assert.assertEquals(tblName, compacts.get(i).getTablename());
+      Assert.assertEquals("initiated", compacts.get(i).getState());
+      partNames.add(compacts.get(i).getPartitionname());
+    }
+    List<String> names = new ArrayList<String>(partNames);
+    Assert.assertEquals("ds=today", names.get(0));
+    Assert.assertEquals("ds=yesterday", names.get(1));
+  }
+
+  @Test
+  public void dynamicPartitioningDelete() throws Exception {
+    String tblName = "ddpct";
+    List<String> colNames = Arrays.asList("a", "b");
+    executeStatementOnDriver("drop table if exists " + tblName, driver);
+    executeStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING) " +
+      " PARTITIONED BY(ds string)" +
+      " CLUSTERED BY(a) INTO 2 BUCKETS" + //currently ACID requires table to be bucketed
+      " STORED AS ORC TBLPROPERTIES ('transactional'='true')", driver);
+    executeStatementOnDriver("insert into " + tblName + " partition (ds) values (1, 'fred', " +
+        "'today'), (2, 'wilma', 'yesterday')", driver);
+
+    executeStatementOnDriver("update " + tblName + " set a = 3", driver);
+
+    executeStatementOnDriver("delete from " + tblName + " where b = 'fred'", driver);
+
+    Initiator initiator = new Initiator();
+    initiator.setThreadId((int)initiator.getId());
+    // Set to 2 so insert and update don't set it off but delete does
+    conf.setIntVar(HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_NUM_THRESHOLD, 2);
+    initiator.setHiveConf(conf);
+    AtomicBoolean stop = new AtomicBoolean();
+    stop.set(true);
+    initiator.init(stop, new AtomicBoolean());
+    initiator.run();
+
+    CompactionTxnHandler txnHandler = new CompactionTxnHandler(conf);
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+    Assert.assertEquals(1, compacts.size());
+    SortedSet<String> partNames = new TreeSet<String>();
+    for (int i = 0; i < compacts.size(); i++) {
+      Assert.assertEquals("default", compacts.get(i).getDbname());
+      Assert.assertEquals(tblName, compacts.get(i).getTablename());
+      Assert.assertEquals("initiated", compacts.get(i).getState());
+      partNames.add(compacts.get(i).getPartitionname());
+    }
+    List<String> names = new ArrayList<String>(partNames);
+    Assert.assertEquals("ds=today", names.get(0));
+  }
+
+  @Test
   public void minorCompactWhileStreaming() throws Exception {
     String dbName = "default";
     String tblName = "cws";

Modified: hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestHiveSessionImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestHiveSessionImpl.java?rev=1669718&r1=1669495&r2=1669718&view=diff
==============================================================================
    (empty)

Modified: hive/branches/llap/itests/src/test/resources/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/branches/llap/itests/src/test/resources/testconfiguration.properties?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/itests/src/test/resources/testconfiguration.properties (original)
+++ hive/branches/llap/itests/src/test/resources/testconfiguration.properties Sat Mar 28 00:22:15 2015
@@ -287,6 +287,7 @@ minitez.query.files=bucket_map_join_tez1
   bucket_map_join_tez2.q,\
   dynamic_partition_pruning.q,\
   dynamic_partition_pruning_2.q,\
+  hybridhashjoin.q,\
   mapjoin_decimal.q,\
   lvj_mapjoin.q,\
   llapdecider.q,\
@@ -315,7 +316,8 @@ encrypted.query.files=encryption_join_un
   encryption_join_with_different_encryption_keys.q,\
   encryption_select_read_only_encrypted_tbl.q,\
   encryption_select_read_only_unencrypted_tbl.q,\
-  encryption_load_data_to_encrypted_tables.q
+  encryption_load_data_to_encrypted_tables.q, \
+  encryption_unencrypted_nonhdfs_external_tables.q
 
 beeline.positive.exclude=add_part_exist.q,\
   alter1.q,\

Added: hive/branches/llap/itests/thirdparty/spark-1.2.0-bin-hadoop2-without-hive.tgz
URL: http://svn.apache.org/viewvc/hive/branches/llap/itests/thirdparty/spark-1.2.0-bin-hadoop2-without-hive.tgz?rev=1669718&view=auto
==============================================================================
Binary file - no diff available.

Propchange: hive/branches/llap/itests/thirdparty/spark-1.2.0-bin-hadoop2-without-hive.tgz
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Modified: hive/branches/llap/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/orc/stream/readers/DecimalStreamReader.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/orc/stream/readers/DecimalStreamReader.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/orc/stream/readers/DecimalStreamReader.java (original)
+++ hive/branches/llap/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/orc/stream/readers/DecimalStreamReader.java Sat Mar 28 00:22:15 2015
@@ -65,7 +65,7 @@ public class DecimalStreamReader extends
       if (_isFileCompressed) {
         index.getNext();
       }
-      value.seek(index);
+      valueStream.seek(index);
     }
 
     if (_scaleStream.available() > 0) {

Modified: hive/branches/llap/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java (original)
+++ hive/branches/llap/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java Sat Mar 28 00:22:15 2015
@@ -450,7 +450,7 @@ public class OrcEncodedDataReader extend
       List<OrcProto.Type> types = fileMetadata.getTypes();
       String[] colNamesForSarg = OrcInputFormat.getSargColumnNames(
           columnNames, types, globalIncludes, fileMetadata.isOriginalFormat());
-      sargApp = new SargApplier(sarg, colNamesForSarg, rowIndexStride, types);
+      sargApp = new SargApplier(sarg, colNamesForSarg, rowIndexStride, types, globalIncludes.length);
     }
     // readState should have been initialized by this time with an empty array.
     for (int stripeIxMod = 0; stripeIxMod < readState.length; ++stripeIxMod) {

Modified: hive/branches/llap/metastore/if/hive_metastore.thrift
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/if/hive_metastore.thrift?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/metastore/if/hive_metastore.thrift (original)
+++ hive/branches/llap/metastore/if/hive_metastore.thrift Sat Mar 28 00:22:15 2015
@@ -651,6 +651,13 @@ struct ShowCompactResponse {
     1: required list<ShowCompactResponseElement> compacts,
 }
 
+struct AddDynamicPartitions {
+    1: required i64 txnid,
+    2: required string dbname,
+    3: required string tablename,
+    4: required list<string> partitionnames,
+}
+
 struct NotificationEventRequest {
     1: required i64 lastEvent,
     2: optional i32 maxEvents,
@@ -1164,6 +1171,7 @@ service ThriftHiveMetastore extends fb30
   HeartbeatTxnRangeResponse heartbeat_txn_range(1:HeartbeatTxnRangeRequest txns)
   void compact(1:CompactionRequest rqst) 
   ShowCompactResponse show_compact(1:ShowCompactRequest rqst)
+  void add_dynamic_partitions(1:AddDynamicPartitions rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2)
 
   // Notification logging calls
   NotificationEventResponse get_next_notification(1:NotificationEventRequest rqst) 

Modified: hive/branches/llap/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.12.0-to-0.13.0.mssql.sql
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.12.0-to-0.13.0.mssql.sql?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.12.0-to-0.13.0.mssql.sql (original)
+++ hive/branches/llap/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.12.0-to-0.13.0.mssql.sql Sat Mar 28 00:22:15 2015
@@ -19,3 +19,10 @@ CREATE TABLE PART_COL_STATS
     PARTITION_NAME varchar(767) NOT NULL,
     "TABLE_NAME" varchar(128) NOT NULL
 );
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+

Modified: hive/branches/llap/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.13.0-to-0.14.0.mssql.sql
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.13.0-to-0.14.0.mssql.sql?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.13.0-to-0.14.0.mssql.sql (original)
+++ hive/branches/llap/metastore/scripts/upgrade/mssql/pre-0-upgrade-0.13.0-to-0.14.0.mssql.sql Sat Mar 28 00:22:15 2015
@@ -21,3 +21,9 @@ CREATE TABLE PART_COL_STATS
     PARTITION_NAME varchar(767) NOT NULL,
     "TABLE_NAME" varchar(128) NOT NULL
 );
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);

Added: hive/branches/llap/metastore/scripts/upgrade/mssql/pre-1-upgrade-0.12.0-to-0.13.0.mssql.sql
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/scripts/upgrade/mssql/pre-1-upgrade-0.12.0-to-0.13.0.mssql.sql?rev=1669718&view=auto
==============================================================================
--- hive/branches/llap/metastore/scripts/upgrade/mssql/pre-1-upgrade-0.12.0-to-0.13.0.mssql.sql (added)
+++ hive/branches/llap/metastore/scripts/upgrade/mssql/pre-1-upgrade-0.12.0-to-0.13.0.mssql.sql Sat Mar 28 00:22:15 2015
@@ -0,0 +1,26 @@
+CREATE TABLE TAB_COL_STATS
+(
+    CS_ID bigint NOT NULL,
+    AVG_COL_LEN float NULL,
+    "COLUMN_NAME" varchar(128) NOT NULL,
+    COLUMN_TYPE varchar(128) NOT NULL,
+    DB_NAME varchar(128) NOT NULL,
+    DOUBLE_HIGH_VALUE float NULL,
+    DOUBLE_LOW_VALUE float NULL,
+    LAST_ANALYZED bigint NOT NULL,
+    LONG_HIGH_VALUE bigint NULL,
+    LONG_LOW_VALUE bigint NULL,
+    MAX_COL_LEN bigint NULL,
+    NUM_DISTINCTS bigint NULL,
+    NUM_FALSES bigint NULL,
+    NUM_NULLS bigint NOT NULL,
+    NUM_TRUES bigint NULL,
+    TBL_ID bigint NULL,
+    "TABLE_NAME" varchar(128) NOT NULL
+);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS (TBL_ID);

Added: hive/branches/llap/metastore/scripts/upgrade/mssql/pre-1-upgrade-0.13.0-to-0.14.0.mssql.sql
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/scripts/upgrade/mssql/pre-1-upgrade-0.13.0-to-0.14.0.mssql.sql?rev=1669718&view=auto
==============================================================================
--- hive/branches/llap/metastore/scripts/upgrade/mssql/pre-1-upgrade-0.13.0-to-0.14.0.mssql.sql (added)
+++ hive/branches/llap/metastore/scripts/upgrade/mssql/pre-1-upgrade-0.13.0-to-0.14.0.mssql.sql Sat Mar 28 00:22:15 2015
@@ -0,0 +1,28 @@
+CREATE TABLE TAB_COL_STATS
+(
+    CS_ID bigint NOT NULL,
+    AVG_COL_LEN float NULL,
+    "COLUMN_NAME" varchar(128) NOT NULL,
+    COLUMN_TYPE varchar(128) NOT NULL,
+    DB_NAME varchar(128) NOT NULL,
+    BIG_DECIMAL_HIGH_VALUE varchar(255) NULL,
+    BIG_DECIMAL_LOW_VALUE varchar(255) NULL,
+    DOUBLE_HIGH_VALUE float NULL,
+    DOUBLE_LOW_VALUE float NULL,
+    LAST_ANALYZED bigint NOT NULL,
+    LONG_HIGH_VALUE bigint NULL,
+    LONG_LOW_VALUE bigint NULL,
+    MAX_COL_LEN bigint NULL,
+    NUM_DISTINCTS bigint NULL,
+    NUM_FALSES bigint NULL,
+    NUM_NULLS bigint NOT NULL,
+    NUM_TRUES bigint NULL,
+    TBL_ID bigint NULL,
+    "TABLE_NAME" varchar(128) NOT NULL
+);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS (TBL_ID);

Added: hive/branches/llap/metastore/scripts/upgrade/mysql/021-HIVE-7018.mysql.sql
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/scripts/upgrade/mysql/021-HIVE-7018.mysql.sql?rev=1669718&view=auto
==============================================================================
--- hive/branches/llap/metastore/scripts/upgrade/mysql/021-HIVE-7018.mysql.sql (added)
+++ hive/branches/llap/metastore/scripts/upgrade/mysql/021-HIVE-7018.mysql.sql Sat Mar 28 00:22:15 2015
@@ -0,0 +1,53 @@
+SELECT '< HIVE-7018 Remove Table and Partition tables column LINK_TARGET_ID from Mysql for other DBs do not have it >' AS ' ';
+
+DELIMITER $$
+DROP PROCEDURE IF EXISTS RM_TLBS_LINKID $$
+DROP PROCEDURE IF EXISTS RM_PARTITIONS_LINKID $$
+DROP PROCEDURE IF EXISTS RM_LINKID $$
+
+/* Call this procedure to drop column LINK_TARGET_ID for TBLS */
+CREATE PROCEDURE RM_TLBS_LINKID()
+  BEGIN
+    IF EXISTS (SELECT * FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_NAME` = 'TBLS' AND `COLUMN_NAME` = 'LINK_TARGET_ID') THEN
+      ALTER TABLE `TBLS`
+        DROP FOREIGN KEY `TBLS_FK3`
+      ;
+      ALTER TABLE `TBLS`
+        DROP KEY `TBLS_N51`
+      ;
+      ALTER TABLE `TBLS`
+        DROP COLUMN `LINK_TARGET_ID`
+      ;
+    END IF;
+  END $$
+
+/* Call this procedure to drop column LINK_TARGET_ID for PARTITIONS */
+CREATE PROCEDURE RM_PARTITIONS_LINKID()
+  BEGIN
+    IF EXISTS (SELECT * FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_NAME` = 'PARTITIONS' AND `COLUMN_NAME` = 'LINK_TARGET_ID') THEN
+      ALTER TABLE `PARTITIONS`
+        DROP FOREIGN KEY `PARTITIONS_FK3`
+      ;
+      ALTER TABLE `PARTITIONS`
+        DROP KEY `PARTITIONS_N51`
+      ;
+      ALTER TABLE `PARTITIONS`
+        DROP COLUMN `LINK_TARGET_ID`
+      ;
+    END IF;
+  END $$
+
+/*
+ * Check and drop column LINK_TARGET_ID
+ */
+CREATE PROCEDURE RM_LINKID()
+  BEGIN
+    call RM_PARTITIONS_LINKID();
+    call RM_TLBS_LINKID();
+    SELECT 'Completed remove LINK_TARGET_ID';
+  END $$
+
+
+DELIMITER ;
+
+CALL RM_LINKID();
\ No newline at end of file

Modified: hive/branches/llap/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql (original)
+++ hive/branches/llap/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql Sat Mar 28 00:22:15 2015
@@ -211,15 +211,12 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS`
   `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   `SD_ID` bigint(20) DEFAULT NULL,
   `TBL_ID` bigint(20) DEFAULT NULL,
-  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
   PRIMARY KEY (`PART_ID`),
   UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
   KEY `PARTITIONS_N49` (`TBL_ID`),
   KEY `PARTITIONS_N50` (`SD_ID`),
-  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
   CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 /*!40101 SET character_set_client = @saved_cs_client */;
 
@@ -590,15 +587,12 @@ CREATE TABLE IF NOT EXISTS `TBLS` (
   `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   `VIEW_EXPANDED_TEXT` mediumtext,
   `VIEW_ORIGINAL_TEXT` mediumtext,
-  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
   PRIMARY KEY (`TBL_ID`),
   UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
   KEY `TBLS_N50` (`SD_ID`),
   KEY `TBLS_N49` (`DB_ID`),
-  KEY `TBLS_N51` (`LINK_TARGET_ID`),
   CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
-  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
+  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 /*!40101 SET character_set_client = @saved_cs_client */;
 

Modified: hive/branches/llap/metastore/scripts/upgrade/mysql/upgrade-1.1.0-to-1.2.0.mysql.sql
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/scripts/upgrade/mysql/upgrade-1.1.0-to-1.2.0.mysql.sql?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/metastore/scripts/upgrade/mysql/upgrade-1.1.0-to-1.2.0.mysql.sql (original)
+++ hive/branches/llap/metastore/scripts/upgrade/mysql/upgrade-1.1.0-to-1.2.0.mysql.sql Sat Mar 28 00:22:15 2015
@@ -1,5 +1,5 @@
 SELECT 'Upgrading MetaStore schema from 1.1.0 to 1.2.0' AS ' ';
-
+SOURCE 021-HIVE-7018.mysql.sql;
 UPDATE VERSION SET SCHEMA_VERSION='1.2.0', VERSION_COMMENT='Hive release version 1.2.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 1.1.0 to 1.2.0' AS ' ';
 

Modified: hive/branches/llap/metastore/scripts/upgrade/oracle/pre-0-upgrade-0.13.0-to-0.14.0.oracle.sql
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/scripts/upgrade/oracle/pre-0-upgrade-0.13.0-to-0.14.0.oracle.sql?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/metastore/scripts/upgrade/oracle/pre-0-upgrade-0.13.0-to-0.14.0.oracle.sql (original)
+++ hive/branches/llap/metastore/scripts/upgrade/oracle/pre-0-upgrade-0.13.0-to-0.14.0.oracle.sql Sat Mar 28 00:22:15 2015
@@ -21,3 +21,9 @@ MAX_COL_LEN NUMBER,
 NUM_TRUES NUMBER,
 NUM_FALSES NUMBER,
 LAST_ANALYZED NUMBER NOT NULL);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);

Modified: hive/branches/llap/metastore/scripts/upgrade/postgres/pre-0-upgrade-0.13.0-to-0.14.0.postgres.sql
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/scripts/upgrade/postgres/pre-0-upgrade-0.13.0-to-0.14.0.postgres.sql?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/metastore/scripts/upgrade/postgres/pre-0-upgrade-0.13.0-to-0.14.0.postgres.sql (original)
+++ hive/branches/llap/metastore/scripts/upgrade/postgres/pre-0-upgrade-0.13.0-to-0.14.0.postgres.sql Sat Mar 28 00:22:15 2015
@@ -21,3 +21,9 @@ CREATE TABLE "PART_COL_STATS" (
  "NUM_FALSES" bigint,
  "LAST_ANALYZED" bigint NOT NULL
 );
+
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;



Mime
View raw message