hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ser...@apache.org
Subject svn commit: r1669718 [12/29] - in /hive/branches/llap: ./ ant/src/org/apache/hadoop/hive/ant/ common/src/java/org/apache/hadoop/hive/common/ common/src/java/org/apache/hadoop/hive/common/type/ common/src/java/org/apache/hadoop/hive/conf/ common/src/jav...
Date Sat, 28 Mar 2015 00:22:27 GMT
Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java Sat Mar 28 00:22:15 2015
@@ -26,12 +26,15 @@ import java.math.BigInteger;
 import java.nio.ByteBuffer;
 import java.sql.Date;
 import java.sql.Timestamp;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.TimeZone;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang3.exception.ExceptionUtils;
@@ -80,8 +83,8 @@ import org.apache.hadoop.io.LongWritable
 import org.apache.hadoop.io.Text;
 
 public class RecordReaderImpl implements RecordReader {
-
   static final Log LOG = LogFactory.getLog(RecordReaderImpl.class);
+
   private static final boolean isLogTraceEnabled = LOG.isTraceEnabled();
   private static final boolean isLogDebugEnabled = LOG.isDebugEnabled();
   private final Path path;
@@ -109,8 +112,6 @@ public class RecordReaderImpl implements
   private final OrcProto.RowIndex[] indexes;
   private final OrcProto.BloomFilterIndex[] bloomFilterIndices;
   private final SargApplier sargApp;
-  // same as the above array, but indices are set to true
-  private final boolean[] sargColumns;
   // an array about which row groups aren't skipped
   private boolean[] includedRowGroups = null;
   private final Configuration conf;
@@ -189,7 +190,7 @@ public class RecordReaderImpl implements
                    int bufferSize,
                    long strideRate,
                    Configuration conf
-                  ) throws IOException {
+                   ) throws IOException {
     this.fileSystem = fileSystem;
     this.path = path;
     this.file = fileSystem.open(path);
@@ -202,18 +203,10 @@ public class RecordReaderImpl implements
     this.metadata = new MetadataReaderImpl(fileSystem, path, codec, bufferSize, types.size());
     SearchArgument sarg = options.getSearchArgument();
     if (sarg != null && strideRate != 0) {
-      sargApp = new SargApplier(sarg, options.getColumnNames(), strideRate, types);
-      // included will not be null, row options will fill the array with trues if null
-      sargColumns = new boolean[included.length];
-      for (int i : sargApp.filterColumns) {
-        // filter columns may have -1 as index which could be partition column in SARG.
-        if (i > 0) {
-          sargColumns[i] = true;
-        }
-      }
+      sargApp = new SargApplier(
+          sarg, options.getColumnNames(), strideRate, types, included.length);
     } else {
       sargApp = null;
-      sargColumns = null;
     }
     long rows = 0;
     long skippedRows = 0;
@@ -295,24 +288,23 @@ public class RecordReaderImpl implements
 
     IntegerReader createIntegerReader(OrcProto.ColumnEncoding.Kind kind,
         InStream in,
-        boolean signed,
-        boolean skipCorrupt) throws IOException {
+        boolean signed, boolean skipCorrupt) throws IOException {
       switch (kind) {
-        case DIRECT_V2:
-        case DICTIONARY_V2:
-          return new RunLengthIntegerReaderV2(in, signed, skipCorrupt);
-        case DIRECT:
-        case DICTIONARY:
-          return new RunLengthIntegerReader(in, signed);
-        default:
-          throw new IllegalArgumentException("Unknown encoding " + kind);
+      case DIRECT_V2:
+      case DICTIONARY_V2:
+        return new RunLengthIntegerReaderV2(in, signed, skipCorrupt);
+      case DIRECT:
+      case DICTIONARY:
+        return new RunLengthIntegerReader(in, signed);
+      default:
+        throw new IllegalArgumentException("Unknown encoding " + kind);
       }
     }
 
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encoding
+                     OrcProto.StripeFooter stripeFooter
                     ) throws IOException {
-      checkEncoding(encoding.get(columnId));
+      checkEncoding(stripeFooter.getColumnsList().get(columnId));
       InStream in = streams.get(new StreamName(columnId,
           OrcProto.Stream.Kind.PRESENT));
       if (in == null) {
@@ -409,9 +401,9 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
                      ) throws IOException {
-      super.startStripe(streams, encodings);
+      super.startStripe(streams, stripeFooter);
       reader = new BitFieldReader(streams.get(new StreamName(columnId,
           OrcProto.Stream.Kind.DATA)), 1);
     }
@@ -479,9 +471,9 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
                     ) throws IOException {
-      super.startStripe(streams, encodings);
+      super.startStripe(streams, stripeFooter);
       reader = new RunLengthByteReader(streams.get(new StreamName(columnId,
           OrcProto.Stream.Kind.DATA)));
     }
@@ -563,13 +555,13 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
                     ) throws IOException {
-      super.startStripe(streams, encodings);
+      super.startStripe(streams, stripeFooter);
       StreamName name = new StreamName(columnId,
           OrcProto.Stream.Kind.DATA);
-      reader = createIntegerReader(encodings.get(columnId).getKind(), streams.get(name), true,
-          false);
+      reader = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
+        streams.get(name), true, false);
     }
 
     @Override
@@ -655,13 +647,14 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
                     ) throws IOException {
-      super.startStripe(streams, encodings);
+      super.startStripe(streams, stripeFooter);
       StreamName name = new StreamName(columnId,
           OrcProto.Stream.Kind.DATA);
-      reader = createIntegerReader(encodings.get(columnId).getKind(), streams.get(name), true,
-          false);
+      // TODO: stripeFooter.getColumnsList()?
+      reader = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
+        streams.get(name), true, false);
     }
 
     @Override
@@ -742,13 +735,13 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
                     ) throws IOException {
-      super.startStripe(streams, encodings);
+      super.startStripe(streams, stripeFooter);
       StreamName name = new StreamName(columnId,
           OrcProto.Stream.Kind.DATA);
-      reader = createIntegerReader(encodings.get(columnId).getKind(), streams.get(name), true,
-          false);
+      reader = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
+        streams.get(name), true, false);
     }
 
     @Override
@@ -816,9 +809,9 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
                     ) throws IOException {
-      super.startStripe(streams, encodings);
+      super.startStripe(streams, stripeFooter);
       StreamName name = new StreamName(columnId,
           OrcProto.Stream.Kind.DATA);
       stream = streams.get(name);
@@ -908,9 +901,9 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
                     ) throws IOException {
-      super.startStripe(streams, encodings);
+      super.startStripe(streams, stripeFooter);
       StreamName name =
         new StreamName(columnId,
           OrcProto.Stream.Kind.DATA);
@@ -1014,14 +1007,14 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
                     ) throws IOException {
-      super.startStripe(streams, encodings);
+      super.startStripe(streams, stripeFooter);
       StreamName name = new StreamName(columnId,
           OrcProto.Stream.Kind.DATA);
       stream = streams.get(name);
-      lengths = createIntegerReader(encodings.get(columnId).getKind(), streams.get(new
-          StreamName(columnId, OrcProto.Stream.Kind.LENGTH)), false, false);
+      lengths = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
+        streams.get(new StreamName(columnId, OrcProto.Stream.Kind.LENGTH)), false, false);
     }
 
     @Override
@@ -1088,10 +1081,15 @@ public class RecordReaderImpl implements
     }
   }
 
-  public static class TimestampTreeReader extends TreeReader {
+  public static class TimestampTreeReader extends TreeReader{
     protected IntegerReader data = null;
     protected IntegerReader nanos = null;
-    private final boolean skipCorrupt;
+    protected final boolean skipCorrupt;
+    protected Map<String, Long> baseTimestampMap;
+    protected long base_timestamp;
+    protected final TimeZone readerTimeZone;
+    protected TimeZone writerTimeZone;
+    protected boolean hasSameTZRules;
 
     TimestampTreeReader(int columnId, boolean skipCorrupt) throws IOException {
       this(columnId, null, null, null, null, skipCorrupt);
@@ -1102,6 +1100,11 @@ public class RecordReaderImpl implements
         throws IOException {
       super(columnId, presentStream);
       this.skipCorrupt = skipCorrupt;
+      this.baseTimestampMap = new HashMap<>();
+      this.readerTimeZone = TimeZone.getDefault();
+      this.writerTimeZone = readerTimeZone;
+      this.hasSameTZRules = writerTimeZone.hasSameRules(readerTimeZone);
+      this.base_timestamp = getBaseTimestamp(readerTimeZone.getID());
       if (encoding != null) {
         checkEncoding(encoding);
 
@@ -1126,15 +1129,42 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
                     ) throws IOException {
-      super.startStripe(streams, encodings);
-      data = createIntegerReader(encodings.get(columnId).getKind(),
+      super.startStripe(streams, stripeFooter);
+      data = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
           streams.get(new StreamName(columnId,
               OrcProto.Stream.Kind.DATA)), true, skipCorrupt);
-      nanos = createIntegerReader(encodings.get(columnId).getKind(),
+      nanos = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
           streams.get(new StreamName(columnId,
               OrcProto.Stream.Kind.SECONDARY)), false, skipCorrupt);
+      base_timestamp = getBaseTimestamp(stripeFooter.getWriterTimezone());
+    }
+
+    private long getBaseTimestamp(String timeZoneId) throws IOException {
+      // to make sure new readers read old files in the same way
+      if (timeZoneId == null || timeZoneId.isEmpty()) {
+        timeZoneId = readerTimeZone.getID();
+      }
+
+      if (!baseTimestampMap.containsKey(timeZoneId)) {
+        writerTimeZone = TimeZone.getTimeZone(timeZoneId);
+        hasSameTZRules = writerTimeZone.hasSameRules(readerTimeZone);
+        SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+        sdf.setTimeZone(writerTimeZone);
+        try {
+          long epoch =
+              sdf.parse(WriterImpl.BASE_TIMESTAMP_STRING).getTime() / WriterImpl.MILLIS_PER_SECOND;
+          baseTimestampMap.put(timeZoneId, epoch);
+          return epoch;
+        } catch (ParseException e) {
+          throw new IOException("Unable to create base timestamp", e);
+        } finally {
+          sdf.setTimeZone(readerTimeZone);
+        }
+      }
+
+      return baseTimestampMap.get(timeZoneId);
     }
 
     @Override
@@ -1159,9 +1189,7 @@ public class RecordReaderImpl implements
         } else {
           result = (TimestampWritable) previous;
         }
-        Timestamp ts = new Timestamp(0);
-        long millis = (data.next() + WriterImpl.BASE_TIMESTAMP) *
-            WriterImpl.MILLIS_PER_SECOND;
+        long millis = (data.next() + base_timestamp) * WriterImpl.MILLIS_PER_SECOND;
         int newNanos = parseNanos(nanos.next());
         // fix the rounding when we divided by 1000.
         if (millis >= 0) {
@@ -1169,7 +1197,24 @@ public class RecordReaderImpl implements
         } else {
           millis -= newNanos / 1000000;
         }
-        ts.setTime(millis);
+        long offset = 0;
+        // If reader and writer time zones have different rules, adjust the timezone difference
+        // between reader and writer taking day light savings into account.
+        if (!hasSameTZRules) {
+          offset = writerTimeZone.getOffset(millis) - readerTimeZone.getOffset(millis);
+        }
+        long adjustedMillis = millis + offset;
+        Timestamp ts = new Timestamp(adjustedMillis);
+        // Sometimes the reader timezone might have changed after adding the adjustedMillis.
+        // To account for that change, check for any difference in reader timezone after
+        // adding adjustedMillis. If so use the new offset (offset at adjustedMillis point of time).
+        if (!hasSameTZRules &&
+            (readerTimeZone.getOffset(millis) != readerTimeZone.getOffset(adjustedMillis))) {
+          long newOffset =
+              writerTimeZone.getOffset(millis) - readerTimeZone.getOffset(adjustedMillis);
+            adjustedMillis = millis + newOffset;
+            ts.setTime(adjustedMillis);
+        }
         ts.setNanos(newNanos);
         result.set(ts);
       }
@@ -1248,12 +1293,13 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
                     ) throws IOException {
-      super.startStripe(streams, encodings);
+      super.startStripe(streams, stripeFooter);
       StreamName name = new StreamName(columnId,
           OrcProto.Stream.Kind.DATA);
-      reader = createIntegerReader(encodings.get(columnId).getKind(), streams.get(name), true, false);
+      reader = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
+          streams.get(name), true, false);
     }
 
     @Override
@@ -1306,7 +1352,7 @@ public class RecordReaderImpl implements
   }
 
   public static class DecimalTreeReader extends TreeReader{
-    protected InStream value;
+    protected InStream valueStream;
     protected IntegerReader scaleReader = null;
     private LongColumnVector scratchScaleVector;
 
@@ -1324,7 +1370,7 @@ public class RecordReaderImpl implements
       this.precision = precision;
       this.scale = scale;
       this.scratchScaleVector = new LongColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
-      this.value = valueStream;
+      this.valueStream = valueStream;
       if (scaleStream != null && encoding != null) {
         checkEncoding(encoding);
         this.scaleReader = createIntegerReader(encoding.getKind(), scaleStream, true, false);
@@ -1342,13 +1388,13 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
     ) throws IOException {
-      super.startStripe(streams, encodings);
-      value = streams.get(new StreamName(columnId,
+      super.startStripe(streams, stripeFooter);
+      valueStream = streams.get(new StreamName(columnId,
           OrcProto.Stream.Kind.DATA));
-      scaleReader = createIntegerReader(encodings.get(columnId).getKind(), streams.get(
-          new StreamName(columnId, OrcProto.Stream.Kind.SECONDARY)), true, false);
+      scaleReader = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
+          streams.get(new StreamName(columnId, OrcProto.Stream.Kind.SECONDARY)), true, false);
     }
 
     @Override
@@ -1359,7 +1405,7 @@ public class RecordReaderImpl implements
     @Override
     public void seek(PositionProvider index) throws IOException {
       super.seek(index);
-      value.seek(index);
+      valueStream.seek(index);
       scaleReader.seek(index);
     }
 
@@ -1373,7 +1419,7 @@ public class RecordReaderImpl implements
         } else {
           result = (HiveDecimalWritable) previous;
         }
-        result.set(HiveDecimal.create(SerializationUtils.readBigInteger(value),
+        result.set(HiveDecimal.create(SerializationUtils.readBigInteger(valueStream),
             (int) scaleReader.next()));
         return HiveDecimalUtils.enforcePrecisionScale(result, precision, scale);
       }
@@ -1398,7 +1444,7 @@ public class RecordReaderImpl implements
       // Read value entries based on isNull entries
       if (result.isRepeating) {
         if (!result.isNull[0]) {
-          BigInteger bInt = SerializationUtils.readBigInteger(value);
+          BigInteger bInt = SerializationUtils.readBigInteger(valueStream);
           short scaleInData = (short) scaleReader.next();
           HiveDecimal dec = HiveDecimal.create(bInt, scaleInData);
           dec = HiveDecimalUtils.enforcePrecisionScale(dec, precision, scale);
@@ -1410,7 +1456,7 @@ public class RecordReaderImpl implements
         scaleReader.nextVector(scratchScaleVector, batchSize);
         for (int i = 0; i < batchSize; i++) {
           if (!result.isNull[i]) {
-            BigInteger bInt = SerializationUtils.readBigInteger(value);
+            BigInteger bInt = SerializationUtils.readBigInteger(valueStream);
             short scaleInData = (short) scratchScaleVector.vector[i];
             HiveDecimal dec = HiveDecimal.create(bInt, scaleInData);
             dec = HiveDecimalUtils.enforcePrecisionScale(dec, precision, scale);
@@ -1427,7 +1473,7 @@ public class RecordReaderImpl implements
     void skipRows(long items) throws IOException {
       items = countNonNulls(items);
       for(int i=0; i < items; i++) {
-        SerializationUtils.readBigInteger(value);
+        SerializationUtils.readBigInteger(valueStream);
       }
       scaleReader.skip(items);
     }
@@ -1474,11 +1520,11 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
                     ) throws IOException {
       // For each stripe, checks the encoding and initializes the appropriate
       // reader
-      switch (encodings.get(columnId).getKind()) {
+      switch (stripeFooter.getColumnsList().get(columnId).getKind()) {
         case DIRECT:
         case DIRECT_V2:
           reader = new StringDirectTreeReader(columnId);
@@ -1489,9 +1535,9 @@ public class RecordReaderImpl implements
           break;
         default:
           throw new IllegalArgumentException("Unsupported encoding " +
-              encodings.get(columnId).getKind());
+              stripeFooter.getColumnsList().get(columnId).getKind());
       }
-      reader.startStripe(streams, encodings);
+      reader.startStripe(streams, stripeFooter);
     }
 
     @Override
@@ -1625,13 +1671,13 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
                     ) throws IOException {
-      super.startStripe(streams, encodings);
+      super.startStripe(streams, stripeFooter);
       StreamName name = new StreamName(columnId,
           OrcProto.Stream.Kind.DATA);
       stream = streams.get(name);
-      lengths = createIntegerReader(encodings.get(columnId).getKind(),
+      lengths = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
           streams.get(new StreamName(columnId, OrcProto.Stream.Kind.LENGTH)),
           false, false);
     }
@@ -1746,9 +1792,9 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
                     ) throws IOException {
-      super.startStripe(streams, encodings);
+      super.startStripe(streams, stripeFooter);
 
       // read the dictionary blob
       StreamName name = new StreamName(columnId,
@@ -1759,11 +1805,11 @@ public class RecordReaderImpl implements
       // read the lengths
       name = new StreamName(columnId, OrcProto.Stream.Kind.LENGTH);
       in = streams.get(name);
-      readDictionaryLengthStream(in, encodings.get(columnId));
+      readDictionaryLengthStream(in, stripeFooter.getColumnsList().get(columnId));
 
       // set up the row reader
       name = new StreamName(columnId, OrcProto.Stream.Kind.DATA);
-      reader = createIntegerReader(encodings.get(columnId).getKind(),
+      reader = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
           streams.get(name), false, false);
     }
 
@@ -2149,12 +2195,12 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
                     ) throws IOException {
-      super.startStripe(streams, encodings);
+      super.startStripe(streams, stripeFooter);
       for(TreeReader field: fields) {
         if (field != null) {
-          field.startStripe(streams, encodings);
+          field.startStripe(streams, stripeFooter);
         }
       }
     }
@@ -2225,14 +2271,14 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
                      ) throws IOException {
-      super.startStripe(streams, encodings);
+      super.startStripe(streams, stripeFooter);
       tags = new RunLengthByteReader(streams.get(new StreamName(columnId,
           OrcProto.Stream.Kind.DATA)));
       for(TreeReader field: fields) {
         if (field != null) {
-          field.startStripe(streams, encodings);
+          field.startStripe(streams, stripeFooter);
         }
       }
     }
@@ -2317,14 +2363,14 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
                     ) throws IOException {
-      super.startStripe(streams, encodings);
-      lengths = createIntegerReader(encodings.get(columnId).getKind(),
+      super.startStripe(streams, stripeFooter);
+      lengths = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
           streams.get(new StreamName(columnId,
               OrcProto.Stream.Kind.LENGTH)), false, false);
       if (elementReader != null) {
-        elementReader.startStripe(streams, encodings);
+        elementReader.startStripe(streams, stripeFooter);
       }
     }
 
@@ -2411,17 +2457,17 @@ public class RecordReaderImpl implements
 
     @Override
     void startStripe(Map<StreamName, InStream> streams,
-                     List<OrcProto.ColumnEncoding> encodings
+                     OrcProto.StripeFooter stripeFooter
                     ) throws IOException {
-      super.startStripe(streams, encodings);
-      lengths = createIntegerReader(encodings.get(columnId).getKind(),
+      super.startStripe(streams, stripeFooter);
+      lengths = createIntegerReader(stripeFooter.getColumnsList().get(columnId).getKind(),
           streams.get(new StreamName(columnId,
               OrcProto.Stream.Kind.LENGTH)), false, false);
       if (keyReader != null) {
-        keyReader.startStripe(streams, encodings);
+        keyReader.startStripe(streams, stripeFooter);
       }
       if (valueReader != null) {
-        valueReader.startStripe(streams, encodings);
+        valueReader.startStripe(streams, stripeFooter);
       }
     }
 
@@ -2891,14 +2937,24 @@ public class RecordReaderImpl implements
     private final int[] filterColumns;
     private final long rowIndexStride;
     private final OrcProto.BloomFilterIndex[] bloomFilterIndices;
+    // same as the above array, but indices are set to true
+    private final boolean[] sargColumns;
 
     public SargApplier(SearchArgument sarg, String[] columnNames, long rowIndexStride,
-        List<OrcProto.Type> types) {
+        List<OrcProto.Type> types, int includedCount) {
       this.sarg = sarg;
       sargLeaves = sarg.getLeaves();
       filterColumns = mapSargColumns(sargLeaves, columnNames, 0);
       bloomFilterIndices = new OrcProto.BloomFilterIndex[types.size()];
       this.rowIndexStride = rowIndexStride;
+      // included will not be null, row options will fill the array with trues if null
+      sargColumns = new boolean[includedCount];
+      for (int i : filterColumns) {
+        // filter columns may have -1 as index which could be partition column in SARG.
+        if (i > 0) {
+          sargColumns[i] = true;
+        }
+      }
     }
 
     /**
@@ -2963,7 +3019,7 @@ public class RecordReaderImpl implements
     if (sargApp == null) {
       return null;
     }
-    readRowIndex(currentStripe, included);
+    readRowIndex(currentStripe, included, sargApp.sargColumns);
     return sargApp.pickRowGroups(stripes.get(currentStripe), indexes);
   }
 
@@ -3008,7 +3064,7 @@ public class RecordReaderImpl implements
       } else {
         readPartialDataStreams(stripe);
       }
-      reader.startStripe(streams, stripeFooter.getColumnsList());
+      reader.startStripe(streams, stripeFooter);
       // if we skipped the first row group, move the pointers forward
       if (rowInStripe != 0) {
         seekToRowEntry(reader, (int) (rowInStripe / rowIndexStride));
@@ -3319,7 +3375,7 @@ public class RecordReaderImpl implements
         result = new VectorizedRowBatch(cols.length);
         result.cols = cols;
       } else {
-        result = previous;
+        result = (VectorizedRowBatch) previous;
         result.selectedInUse = false;
         reader.nextVector(result.cols, (int) batchSize);
       }
@@ -3353,11 +3409,12 @@ public class RecordReaderImpl implements
         endRowGroup += 1;
       }
 
-      final long markerPosition = (endRowGroup * rowIndexStride) < rowCountInStripe ? (endRowGroup * rowIndexStride)
-          : rowCountInStripe;
+      final long markerPosition =
+          (endRowGroup * rowIndexStride) < rowCountInStripe ? (endRowGroup * rowIndexStride)
+              : rowCountInStripe;
       batchSize = Math.min(targetBatchSize, (markerPosition - rowInStripe));
 
-      if (LOG.isDebugEnabled() && batchSize < targetBatchSize) {
+      if (isLogDebugEnabled && batchSize < targetBatchSize) {
         LOG.debug("markerPosition: " + markerPosition + " batchSize: " + batchSize);
       }
     } else {
@@ -3399,8 +3456,9 @@ public class RecordReaderImpl implements
     throw new IllegalArgumentException("Seek after the end of reader range");
   }
 
-  Index readRowIndex(int stripeIndex, boolean[] included) throws IOException {
-    return readRowIndex(stripeIndex, included, null, null, null);
+  Index readRowIndex(
+      int stripeIndex, boolean[] included, boolean[] sargColumns) throws IOException {
+    return readRowIndex(stripeIndex, included, null, null, sargColumns);
   }
 
   Index readRowIndex(int stripeIndex, boolean[] included, OrcProto.RowIndex[] indexes,
@@ -3412,6 +3470,8 @@ public class RecordReaderImpl implements
       stripeFooter = this.stripeFooter;
       indexes = indexes == null ? this.indexes : indexes;
       bloomFilterIndex = bloomFilterIndex == null ? this.bloomFilterIndices : bloomFilterIndex;
+      sargColumns = sargColumns == null ?
+          (sargApp == null ? null : sargApp.sargColumns) : sargColumns;
     }
     return metadata.readRowIndex(stripe, stripeFooter, included, indexes, sargColumns,
         bloomFilterIndex);
@@ -3445,7 +3505,7 @@ public class RecordReaderImpl implements
       currentStripe = rightStripe;
       readStripe();
     }
-    readRowIndex(currentStripe, included);
+    readRowIndex(currentStripe, included, sargApp == null ? null : sargApp.sargColumns);
 
     // if we aren't to the right row yet, advance in the stripe.
     advanceToNextRow(reader, rowNumber, true);

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java Sat Mar 28 00:22:15 2015
@@ -287,19 +287,14 @@ public class RecordReaderUtils {
       while (pos < endPos) {
         int count = file.read(directBuf);
         if (count < 0) throw new EOFException();
-        if (count == 0) {
-          throw new IOException(
-              "0-length read: " + (endPos - pos) + "@" + (pos - startPos) + " and " + pos);
-        }
+        assert count != 0 : "0-length read: " + (endPos - pos) + "@" + (pos - startPos);
         pos += count;
-        if (pos > endPos) {
-          throw new AssertionError(
-              "Position " + pos + " > " + endPos + "(" + len + ") after reading " + count);
-        }
+        assert pos <= endPos : "Position " + pos + " > " + endPos + " after reading " + count;
         directBuf.position(pos);
       }
     } catch (UnsupportedOperationException ex) {
       assert pos == startPos;
+      // Happens in q files and such.
       RecordReaderImpl.LOG.error("Stream does not support direct read; we will copy.");
       byte[] buffer = new byte[len];
       file.readFully(buffer, 0, buffer.length);

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java Sat Mar 28 00:22:15 2015
@@ -26,10 +26,10 @@ import java.lang.management.ManagementFa
 import java.nio.ByteBuffer;
 import java.sql.Timestamp;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
+import java.util.TimeZone;
 import java.util.TreeMap;
 
 import org.apache.commons.logging.Log;
@@ -796,6 +796,7 @@ public class WriterImpl implements Write
       foundNulls = false;
 
       builder.addColumns(getEncoding());
+      builder.setWriterTimezone(TimeZone.getDefault().getID());
       if (rowIndexStream != null) {
         if (rowIndex.getEntryCount() != requiredIndexEntries) {
           throw new IllegalArgumentException("Column has wrong number of " +
@@ -1511,13 +1512,13 @@ public class WriterImpl implements Write
   }
 
   static final int MILLIS_PER_SECOND = 1000;
-  static final long BASE_TIMESTAMP =
-      Timestamp.valueOf("2015-01-01 00:00:00").getTime() / MILLIS_PER_SECOND;
+  static final String BASE_TIMESTAMP_STRING = "2015-01-01 00:00:00";
 
   private static class TimestampTreeWriter extends TreeWriter {
     private final IntegerWriter seconds;
     private final IntegerWriter nanos;
     private final boolean isDirectV2;
+    private final long base_timestamp;
 
     TimestampTreeWriter(int columnId,
                      ObjectInspector inspector,
@@ -1530,6 +1531,8 @@ public class WriterImpl implements Write
       this.nanos = createIntegerWriter(writer.createStream(id,
           OrcProto.Stream.Kind.SECONDARY), false, isDirectV2, writer);
       recordPosition(rowIndexPosition);
+      // for unit tests to set different time zones
+      this.base_timestamp = Timestamp.valueOf(BASE_TIMESTAMP_STRING).getTime() / MILLIS_PER_SECOND;
     }
 
     @Override
@@ -1550,7 +1553,7 @@ public class WriterImpl implements Write
             ((TimestampObjectInspector) inspector).
                 getPrimitiveJavaObject(obj);
         indexStatistics.updateTimestamp(val);
-        seconds.write((val.getTime() / MILLIS_PER_SECOND) - BASE_TIMESTAMP);
+        seconds.write((val.getTime() / MILLIS_PER_SECOND) - base_timestamp);
         nanos.write(formatNanos(val.getNanos()));
         if (createBloomFilter) {
           bloomFilter.addLong(val.getTime());

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Sat Mar 28 00:22:15 2015
@@ -1521,12 +1521,13 @@ private void constructOneLBLocationMap(F
    * @param holdDDLTime
    * @param listBucketingEnabled
    * @param isAcid true if this is an ACID operation
+   * @param txnId txnId, can be 0 unless isAcid == true
    * @return partition map details (PartitionSpec and Partition)
    * @throws HiveException
    */
   public Map<Map<String, String>, Partition> loadDynamicPartitions(Path loadPath,
       String tableName, Map<String, String> partSpec, boolean replace,
-      int numDP, boolean holdDDLTime, boolean listBucketingEnabled, boolean isAcid)
+      int numDP, boolean holdDDLTime, boolean listBucketingEnabled, boolean isAcid, long txnId)
       throws HiveException {
 
     Set<Path> validPartitions = new HashSet<Path>();
@@ -1584,9 +1585,18 @@ private void constructOneLBLocationMap(F
         partitionsMap.put(fullPartSpec, newPartition);
         LOG.info("New loading path = " + partPath + " with partSpec " + fullPartSpec);
       }
+      if (isAcid) {
+        List<String> partNames = new ArrayList<>(partitionsMap.size());
+        for (Partition p : partitionsMap.values()) {
+          partNames.add(p.getName());
+        }
+        metaStoreClient.addDynamicPartitions(txnId, tbl.getDbName(), tbl.getTableName(), partNames);
+      }
       return partitionsMap;
     } catch (IOException e) {
       throw new HiveException(e);
+    } catch (TException te) {
+      throw new HiveException(te);
     }
   }
 

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java Sat Mar 28 00:22:15 2015
@@ -19,6 +19,8 @@
 package org.apache.hadoop.hive.ql.optimizer;
 
 import java.util.HashSet;
+import java.util.IdentityHashMap;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
@@ -36,6 +38,8 @@ import org.apache.hadoop.hive.ql.plan.Op
  * implemented, this transformation can also be done based on costs.
  */
 public class JoinReorder implements Transform {
+
+  private final Map<Operator<?>, Integer> cache = new IdentityHashMap<Operator<?>, Integer>();
   /**
    * Estimate the size of the output based on the STREAMTABLE hints. To do so
    * the whole tree is traversed. Possible sizes: 0: the operator and its
@@ -49,8 +53,25 @@ public class JoinReorder implements Tran
    * @return The estimated size - 0 (no streamed tables), 1 (streamed tables in
    *         subtree) or 2 (a streamed table)
    */
+
   private int getOutputSize(Operator<? extends OperatorDesc> operator,
       Set<String> bigTables) {
+
+    // memoize decorator for getOutputSizeInternal
+    if (cache.containsKey(operator)) {
+      return cache.get(operator);
+    }
+
+    int result = getOutputSizeInternal(operator, bigTables);
+
+    cache.put(operator, result);
+
+    return result;
+  }
+
+  private int getOutputSizeInternal(Operator<? extends OperatorDesc> operator,
+      Set<String> bigTables) {
+
     // If a join operator contains a big subtree, there is a chance that its
     // output is also big, so the output size is 1 (medium)
     if (operator instanceof JoinOperator) {
@@ -74,6 +95,7 @@ public class JoinReorder implements Tran
     int maxSize = 0;
     if (operator.getParentOperators() != null) {
       for (Operator<? extends OperatorDesc> o : operator.getParentOperators()) {
+        // recurse into memoized decorator
         int current = getOutputSize(o, bigTables);
         if (current > maxSize) {
           maxSize = current;
@@ -151,8 +173,10 @@ public class JoinReorder implements Tran
    * @param pactx
    *          current parse context
    */
+  @Override
   public ParseContext transform(ParseContext pactx) throws SemanticException {
     Set<String> bigTables = getBigTables(pactx);
+    cache.clear();
 
     for (JoinOperator joinOp : pactx.getJoinOps()) {
       reorder(joinOp, bigTables);

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java Sat Mar 28 00:22:15 2015
@@ -128,7 +128,11 @@ public class Optimizer {
     }
 
     transformations.add(new UnionProcessor());
-    transformations.add(new JoinReorder());
+
+    if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.NWAYJOINREORDER)) {
+      transformations.add(new JoinReorder());
+    }
+
     if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.DYNAMICPARTITIONING) &&
         HiveConf.getVar(hiveConf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE).equals("nonstrict") &&
         HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTSORTDYNAMICPARTITION) &&

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java Sat Mar 28 00:22:15 2015
@@ -124,12 +124,14 @@ public class ReduceSinkMapJoinProc imple
     }
     MapJoinDesc joinConf = mapJoinOp.getConf();
     long keyCount = Long.MAX_VALUE, rowCount = Long.MAX_VALUE, bucketCount = 1;
+    long tableSize = Long.MAX_VALUE;
     Statistics stats = parentRS.getStatistics();
     if (stats != null) {
       keyCount = rowCount = stats.getNumRows();
       if (keyCount <= 0) {
         keyCount = rowCount = Long.MAX_VALUE;
       }
+      tableSize = stats.getDataSize();
       ArrayList<String> keyCols = parentRS.getConf().getOutputKeyColumnNames();
       if (keyCols != null && !keyCols.isEmpty()) {
         // See if we can arrive at a smaller number using distinct stats from key columns.
@@ -157,6 +159,7 @@ public class ReduceSinkMapJoinProc imple
           // We cannot obtain a better estimate without CustomPartitionVertex providing it
           // to us somehow; in which case using statistics would be completely unnecessary.
           keyCount /= bucketCount;
+          tableSize /= bucketCount;
         }
       }
     }
@@ -166,6 +169,7 @@ public class ReduceSinkMapJoinProc imple
     if (keyCount != Long.MAX_VALUE) {
       joinConf.getParentKeyCounts().put(pos, keyCount);
     }
+    joinConf.getParentDataSizes().put(pos, tableSize);
 
     int numBuckets = -1;
     EdgeType edgeType = EdgeType.BROADCAST_EDGE;

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/TraitsUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/TraitsUtil.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/TraitsUtil.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/TraitsUtil.java Sat Mar 28 00:22:15 2015
@@ -22,7 +22,7 @@ package org.apache.hadoop.hive.ql.optimi
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelTraitSet;
 import org.apache.calcite.rel.RelCollation;
-import org.apache.calcite.rel.RelCollationImpl;
+import org.apache.calcite.rel.RelCollations;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode;
 
 public class TraitsUtil {
@@ -32,6 +32,6 @@ public class TraitsUtil {
   }
 
   public static RelTraitSet getDefaultTraitSet(RelOptCluster cluster) {
-    return cluster.traitSetOf(HiveRelNode.CONVENTION, RelCollationImpl.EMPTY);
+    return cluster.traitSetOf(HiveRelNode.CONVENTION, RelCollations.EMPTY);
   }
 }

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveGroupingID.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveGroupingID.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveGroupingID.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveGroupingID.java Sat Mar 28 00:22:15 2015
@@ -1,25 +1,25 @@
 package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
 
-import org.apache.calcite.sql.SqlInternalOperator;
+import org.apache.calcite.sql.SqlAggFunction;
+import org.apache.calcite.sql.SqlFunctionCategory;
 import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.type.InferTypes;
 import org.apache.calcite.sql.type.OperandTypes;
 import org.apache.calcite.sql.type.ReturnTypes;
+import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 
-public class HiveGroupingID extends SqlInternalOperator {
+public class HiveGroupingID extends SqlAggFunction {
 
-  public static final SqlInternalOperator GROUPING__ID =
+  public static final SqlAggFunction INSTANCE =
           new HiveGroupingID();
 
   private HiveGroupingID() {
-    super("$GROUPING__ID",
+    super(VirtualColumn.GROUPINGID.getName(),
             SqlKind.OTHER,
-            0,
-            false,
-            ReturnTypes.BIGINT,
+            ReturnTypes.INTEGER,
             InferTypes.BOOLEAN,
-            OperandTypes.ONE_OR_MORE);
+            OperandTypes.NILADIC,
+            SqlFunctionCategory.USER_DEFINED_FUNCTION);
   }
 
 }
-

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java Sat Mar 28 00:22:15 2015
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
 
+import java.math.BigDecimal;
 import java.text.DateFormat;
 import java.text.SimpleDateFormat;
 import java.util.Calendar;
@@ -26,6 +27,8 @@ import org.apache.calcite.rel.core.JoinR
 import org.apache.calcite.rel.core.TableScan;
 import org.apache.calcite.rex.RexLiteral;
 import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
+import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
 import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
@@ -218,6 +221,23 @@ class ASTBuilder {
       val = "'" + val + "'";
     }
       break;
+    case INTERVAL_YEAR_MONTH: {
+      type = HiveParser.TOK_INTERVAL_YEAR_MONTH_LITERAL;
+      BigDecimal monthsBd = (BigDecimal) literal.getValue();
+      HiveIntervalYearMonth intervalYearMonth = new HiveIntervalYearMonth(monthsBd.intValue());
+      val = "'" + intervalYearMonth.toString() + "'";
+      break;
+    }
+    case INTERVAL_DAY_TIME: {
+      type = HiveParser.TOK_INTERVAL_DAY_TIME_LITERAL;
+      BigDecimal millisBd = (BigDecimal) literal.getValue();
+
+      // Calcite literal is in millis, convert to seconds
+      BigDecimal secsBd = millisBd.divide(BigDecimal.valueOf(1000));
+      HiveIntervalDayTime intervalDayTime = new HiveIntervalDayTime(secsBd);
+      val = "'" + intervalDayTime.toString() + "'";
+      break;
+    }
     case NULL:
       type = HiveParser.TOK_NULL;
       break;

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java Sat Mar 28 00:22:15 2015
@@ -189,31 +189,7 @@ public class ASTConverter {
       int i = 0;
 
       for (RexNode r : select.getChildExps()) {
-        // If it is a GroupBy with grouping sets and grouping__id column
-        // is selected, we reformulate to project that column from
-        // the output of the GroupBy operator
-        boolean reformulate = false;
-        if (groupBy != null && groupBy.indicator) {
-          RexNode expr = select.getChildExps().get(i);
-          if (expr instanceof RexCall) {
-            if ( ((RexCall) expr).getOperator().
-                    equals(HiveGroupingID.GROUPING__ID)) {
-              reformulate = true;
-            }
-          }
-        }
-        ASTNode expr;
-        if(reformulate) {
-          RexInputRef iRef = new RexInputRef(
-                  groupBy.getGroupCount() * 2 + groupBy.getAggCallList().size(),
-                  TypeConverter.convert(
-                          VirtualColumn.GROUPINGID.getTypeInfo(),
-                          groupBy.getCluster().getTypeFactory()));
-          expr = iRef.accept(new RexVisitor(schema));
-        }
-        else {
-          expr = r.accept(new RexVisitor(schema, r instanceof RexLiteral));
-        }
+        ASTNode expr = r.accept(new RexVisitor(schema, r instanceof RexLiteral));
         String alias = select.getRowType().getFieldNames().get(i++);
         ASTNode selectExpr = ASTBuilder.selectExpr(expr, alias);
         b.add(selectExpr);
@@ -631,6 +607,10 @@ public class ASTConverter {
       }
       List<AggregateCall> aggs = gBy.getAggCallList();
       for (AggregateCall agg : aggs) {
+        if (agg.getAggregation() == HiveGroupingID.INSTANCE) {
+          add(new ColumnInfo(null,VirtualColumn.GROUPINGID.getName()));
+          continue;
+        }
         int argCount = agg.getArgList().size();
         ASTBuilder b = agg.isDistinct() ? ASTBuilder.construct(HiveParser.TOK_FUNCTIONDI,
             "TOK_FUNCTIONDI") : argCount == 0 ? ASTBuilder.construct(HiveParser.TOK_FUNCTIONSTAR,
@@ -643,9 +623,6 @@ public class ASTConverter {
         }
         add(new ColumnInfo(null, b.node()));
       }
-      if(gBy.indicator) {
-        add(new ColumnInfo(null,VirtualColumn.GROUPINGID.getName()));
-      }
     }
 
     /**

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java Sat Mar 28 00:22:15 2015
@@ -29,6 +29,7 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.calcite.avatica.util.TimeUnit;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.type.RelDataType;
@@ -37,14 +38,18 @@ import org.apache.calcite.rex.RexBuilder
 import org.apache.calcite.rex.RexCall;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexUtil;
+import org.apache.calcite.sql.SqlIntervalQualifier;
 import org.apache.calcite.sql.SqlOperator;
 import org.apache.calcite.sql.fun.SqlCastFunction;
+import org.apache.calcite.sql.parser.SqlParserPos;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.common.type.Decimal128;
 import org.apache.hadoop.hive.common.type.HiveChar;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
+import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
 import org.apache.hadoop.hive.common.type.HiveVarchar;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
@@ -371,13 +376,15 @@ public class RexNodeConverter {
       calciteLiteral = rexBuilder.makeApproxLiteral(new BigDecimal((Double) value), calciteDataType);
       break;
     case CHAR:
-      if (value instanceof HiveChar)
+      if (value instanceof HiveChar) {
         value = ((HiveChar) value).getValue();
+      }
       calciteLiteral = rexBuilder.makeLiteral((String) value);
       break;
     case VARCHAR:
-      if (value instanceof HiveVarchar)
+      if (value instanceof HiveVarchar) {
         value = ((HiveVarchar) value).getValue();
+      }
       calciteLiteral = rexBuilder.makeLiteral((String) value);
       break;
     case STRING:
@@ -398,6 +405,21 @@ public class RexNodeConverter {
       }
       calciteLiteral = rexBuilder.makeTimestampLiteral(c, RelDataType.PRECISION_NOT_SPECIFIED);
       break;
+    case INTERVAL_YEAR_MONTH:
+      // Calcite year-month literal value is months as BigDecimal
+      BigDecimal totalMonths = BigDecimal.valueOf(((HiveIntervalYearMonth) value).getTotalMonths());
+      calciteLiteral = rexBuilder.makeIntervalLiteral(totalMonths,
+          new SqlIntervalQualifier(TimeUnit.YEAR, TimeUnit.MONTH, new SqlParserPos(1,1)));
+      break;
+    case INTERVAL_DAY_TIME:
+      // Calcite day-time interval is millis value as BigDecimal
+      // Seconds converted to millis
+      BigDecimal secsValueBd = BigDecimal.valueOf(((HiveIntervalDayTime) value).getTotalSeconds() * 1000);
+      // Nanos converted to millis
+      BigDecimal nanosValueBd = BigDecimal.valueOf(((HiveIntervalDayTime) value).getNanos(), 6);
+      calciteLiteral = rexBuilder.makeIntervalLiteral(secsValueBd.add(nanosValueBd),
+          new SqlIntervalQualifier(TimeUnit.DAY, TimeUnit.SECOND, new SqlParserPos(1,1)));
+      break;
     case VOID:
       calciteLiteral = cluster.getRexBuilder().makeLiteral(null,
           cluster.getTypeFactory().createSqlType(SqlTypeName.NULL), true);

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java Sat Mar 28 00:22:15 2015
@@ -22,11 +22,14 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.calcite.avatica.util.TimeUnit;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.SqlIntervalQualifier;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.RowSchema;
@@ -65,6 +68,8 @@ public class TypeConverter {
     b.put(SqlTypeName.DOUBLE.getName(), new HiveToken(HiveParser.TOK_DOUBLE, "TOK_DOUBLE"));
     b.put(SqlTypeName.DATE.getName(), new HiveToken(HiveParser.TOK_DATE, "TOK_DATE"));
     b.put(SqlTypeName.TIMESTAMP.getName(), new HiveToken(HiveParser.TOK_TIMESTAMP, "TOK_TIMESTAMP"));
+    b.put(SqlTypeName.INTERVAL_YEAR_MONTH.getName(), new HiveToken(HiveParser.TOK_INTERVAL_YEAR_MONTH, "TOK_INTERVAL_YEAR_MONTH"));
+    b.put(SqlTypeName.INTERVAL_DAY_TIME.getName(), new HiveToken(HiveParser.TOK_INTERVAL_DAY_TIME, "TOK_INTERVAL_DAY_TIME"));
     b.put(SqlTypeName.BINARY.getName(), new HiveToken(HiveParser.TOK_BINARY, "TOK_BINARY"));
     calciteToHiveTypeNameMap = b.build();
   };
@@ -162,6 +167,14 @@ public class TypeConverter {
     case TIMESTAMP:
       convertedType = dtFactory.createSqlType(SqlTypeName.TIMESTAMP);
       break;
+    case INTERVAL_YEAR_MONTH:
+      convertedType = dtFactory.createSqlIntervalType(
+          new SqlIntervalQualifier(TimeUnit.YEAR, TimeUnit.MONTH, new SqlParserPos(1,1)));
+      break;
+    case INTERVAL_DAY_TIME:
+      convertedType = dtFactory.createSqlIntervalType(
+          new SqlIntervalQualifier(TimeUnit.DAY, TimeUnit.SECOND, new SqlParserPos(1,1)));
+      break;
     case BINARY:
       convertedType = dtFactory.createSqlType(SqlTypeName.BINARY);
       break;
@@ -277,6 +290,10 @@ public class TypeConverter {
       return TypeInfoFactory.dateTypeInfo;
     case TIMESTAMP:
       return TypeInfoFactory.timestampTypeInfo;
+    case INTERVAL_YEAR_MONTH:
+      return TypeInfoFactory.intervalYearMonthTypeInfo;
+    case INTERVAL_DAY_TIME:
+      return TypeInfoFactory.intervalDayTimeTypeInfo;
     case BINARY:
       return TypeInfoFactory.binaryTypeInfo;
     case DECIMAL:

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java Sat Mar 28 00:22:15 2015
@@ -446,14 +446,15 @@ public final class CorrelationUtilities
     }
   }
 
-  /** throw a exception if the input operator is null
+  /**
+   * Throws an exception if the input operator is null
+   *
    * @param operator
-   * @throws HiveException
+   * @throws SemanticException if the input operator is null
    */
   protected static void isNullOperator(Operator<?> operator) throws SemanticException {
     if (operator == null) {
-      throw new SemanticException("Operator " + operator.getName() + " (ID: " +
-          operator.getIdentifier() + ") is null.");
+      throw new SemanticException("Operator is null.");
     }
   }
 

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java Sat Mar 28 00:22:15 2015
@@ -118,6 +118,7 @@ import org.apache.hadoop.hive.ql.udf.UDF
 import org.apache.hadoop.hive.ql.udf.UDFWeekOfYear;
 import org.apache.hadoop.hive.ql.udf.UDFYear;
 import org.apache.hadoop.hive.ql.udf.generic.*;
+import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
@@ -148,6 +149,8 @@ public class Vectorizer implements Physi
     patternBuilder.append("|long");
     patternBuilder.append("|short");
     patternBuilder.append("|timestamp");
+    patternBuilder.append("|" + serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME);
+    patternBuilder.append("|" + serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME);
     patternBuilder.append("|boolean");
     patternBuilder.append("|binary");
     patternBuilder.append("|string");
@@ -261,6 +264,8 @@ public class Vectorizer implements Physi
     supportedGenericUDFs.add(GenericUDFToDate.class);
     supportedGenericUDFs.add(GenericUDFToChar.class);
     supportedGenericUDFs.add(GenericUDFToVarchar.class);
+    supportedGenericUDFs.add(GenericUDFToIntervalYearMonth.class);
+    supportedGenericUDFs.add(GenericUDFToIntervalDayTime.class);
 
     // For conditional expressions
     supportedGenericUDFs.add(GenericUDFIf.class);

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java Sat Mar 28 00:22:15 2015
@@ -49,6 +49,7 @@ import org.apache.calcite.plan.hep.HepPr
 import org.apache.calcite.plan.hep.HepProgramBuilder;
 import org.apache.calcite.rel.InvalidRelException;
 import org.apache.calcite.rel.RelCollation;
+import org.apache.calcite.rel.RelCollations;
 import org.apache.calcite.rel.RelCollationImpl;
 import org.apache.calcite.rel.RelFieldCollation;
 import org.apache.calcite.rel.RelNode;
@@ -707,7 +708,7 @@ public class CalcitePlanner extends Sema
       List<RelMetadataProvider> list = Lists.newArrayList();
       list.add(HiveDefaultRelMetadataProvider.INSTANCE);
       RelTraitSet desiredTraits = cluster
-          .traitSetOf(HiveRelNode.CONVENTION, RelCollationImpl.EMPTY);
+          .traitSetOf(HiveRelNode.CONVENTION, RelCollations.EMPTY);
 
       HepProgram hepPgm = null;
       HepProgramBuilder hepPgmBldr = new HepProgramBuilder().addMatchOrder(HepMatchOrder.BOTTOM_UP)
@@ -1519,6 +1520,7 @@ public class CalcitePlanner extends Sema
       RexNodeConverter converter = new RexNodeConverter(this.cluster, srcRel.getRowType(), posMap,
           0, false);
 
+      final boolean hasGroupSets = groupSets != null && !groupSets.isEmpty();
       final List<RexNode> gbChildProjLst = Lists.newArrayList();
       final HashMap<String, Integer> rexNodeToPosMap = new HashMap<String, Integer>();
       final List<Integer> groupSetPositions = Lists.newArrayList();
@@ -1533,23 +1535,10 @@ public class CalcitePlanner extends Sema
       }
       final ImmutableBitSet groupSet = ImmutableBitSet.of(groupSetPositions);
 
-      List<AggregateCall> aggregateCalls = Lists.newArrayList();
-      for (AggInfo agg : aggInfoLst) {
-        aggregateCalls.add(convertGBAgg(agg, srcRel, gbChildProjLst, converter, rexNodeToPosMap,
-            gbChildProjLst.size()));
-      }
-
-      if (gbChildProjLst.isEmpty()) {
-        // This will happen for count(*), in such cases we arbitarily pick
-        // first element from srcRel
-        gbChildProjLst.add(this.cluster.getRexBuilder().makeInputRef(srcRel, 0));
-      }
-      RelNode gbInputRel = HiveProject.create(srcRel, gbChildProjLst, null);
-
       // Grouping sets: we need to transform them into ImmutableBitSet
       // objects for Calcite
       List<ImmutableBitSet> transformedGroupSets = null;
-      if(groupSets != null && !groupSets.isEmpty()) {
+      if(hasGroupSets) {
         Set<ImmutableBitSet> setTransformedGroupSets =
                 new HashSet<ImmutableBitSet>(groupSets.size());
         for(int val: groupSets) {
@@ -1560,6 +1549,27 @@ public class CalcitePlanner extends Sema
         Collections.sort(transformedGroupSets, ImmutableBitSet.COMPARATOR);
       }
 
+      List<AggregateCall> aggregateCalls = Lists.newArrayList();
+      for (AggInfo agg : aggInfoLst) {
+        aggregateCalls.add(convertGBAgg(agg, srcRel, gbChildProjLst, converter, rexNodeToPosMap,
+            gbChildProjLst.size()));
+      }
+      if (hasGroupSets) {
+        // Create GroupingID column
+        AggregateCall aggCall = new AggregateCall(HiveGroupingID.INSTANCE,
+                false, new ImmutableList.Builder<Integer>().build(),
+                this.cluster.getTypeFactory().createSqlType(SqlTypeName.INTEGER),
+                HiveGroupingID.INSTANCE.getName());
+        aggregateCalls.add(aggCall);
+      }
+
+      if (gbChildProjLst.isEmpty()) {
+        // This will happen for count(*), in such cases we arbitarily pick
+        // first element from srcRel
+        gbChildProjLst.add(this.cluster.getRexBuilder().makeInputRef(srcRel, 0));
+      }
+      RelNode gbInputRel = HiveProject.create(srcRel, gbChildProjLst, null);
+
       HiveRelNode aggregateRel = null;
       try {
         aggregateRel = new HiveAggregate(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
@@ -1782,7 +1792,19 @@ public class CalcitePlanner extends Sema
           } else if (qbp.getDestGroupingSets().contains(detsClauseName)) {
             groupingSets = getGroupingSets(grpByAstExprs, qbp, detsClauseName);
           }
-          groupingColsSize = groupingColsSize * 2;
+          
+          final int limit = groupingColsSize * 2;
+          while (groupingColsSize < limit) {
+            String field = getColumnInternalName(groupingColsSize);
+            outputColumnNames.add(field);
+            groupByOutputRowResolver.put(null, field,
+                    new ColumnInfo(
+                            field,
+                            TypeInfoFactory.booleanTypeInfo,
+                            null,
+                            false));
+            groupingColsSize++;
+          }
         }
 
         // 5. Construct aggregation function Info
@@ -1820,55 +1842,23 @@ public class CalcitePlanner extends Sema
           }
         }
 
-        gbRel = genGBRelNode(gbExprNDescLst, aggregations, groupingSets, srcRel);
-        relToHiveColNameCalcitePosMap.put(gbRel,
-            buildHiveToCalciteColumnMap(groupByOutputRowResolver, gbRel));
-        this.relToHiveRR.put(gbRel, groupByOutputRowResolver);
-
-        // 6. If GroupingSets, Cube, Rollup were used, we account grouping__id.
-        // Further, we insert a project operator on top to remove the grouping
-        // boolean associated to each column in Calcite; this will avoid
-        // recalculating all column positions when we go back from Calcite to Hive
+        // 6. If GroupingSets, Cube, Rollup were used, we account grouping__id
         if(groupingSets != null && !groupingSets.isEmpty()) {
-          RowResolver selectOutputRowResolver = new RowResolver();
-          selectOutputRowResolver.setIsExprResolver(true);
-          RowResolver.add(selectOutputRowResolver, groupByOutputRowResolver);
-          outputColumnNames = new ArrayList<String>(outputColumnNames);
-
-          // 6.1 List of columns to keep from groupBy operator
-          List<RelDataTypeField> gbOutput = gbRel.getRowType().getFieldList();
-          List<RexNode> calciteColLst = new ArrayList<RexNode>();
-          for(RelDataTypeField gbOut: gbOutput) {
-            if(gbOut.getIndex() < gbExprNDescLst.size() ||
-                    gbOut.getIndex() >= gbExprNDescLst.size() * 2) {
-              calciteColLst.add(new RexInputRef(gbOut.getIndex(), gbOut.getType()));
-            }
-          }
-
-          // 6.2 Add column for grouping_id function
           String field = getColumnInternalName(groupingColsSize + aggregations.size());
           outputColumnNames.add(field);
-          selectOutputRowResolver.put(null, VirtualColumn.GROUPINGID.getName(),
+          groupByOutputRowResolver.put(null, VirtualColumn.GROUPINGID.getName(),
                   new ColumnInfo(
                           field,
-                          TypeInfoFactory.stringTypeInfo,
+                          TypeInfoFactory.intTypeInfo,
                           null,
                           true));
-
-          // 6.3 Compute column for grouping_id function in Calcite
-          List<RexNode> identifierCols = new ArrayList<RexNode>();
-          for(int i = gbExprNDescLst.size(); i < gbExprNDescLst.size() * 2; i++) {
-            identifierCols.add(new RexInputRef(
-                    i, gbOutput.get(i).getType()));
-          }
-          final RexBuilder rexBuilder = cluster.getRexBuilder();
-          RexNode groupingID = rexBuilder.makeCall(HiveGroupingID.GROUPING__ID,
-                  identifierCols);
-          calciteColLst.add(groupingID);
-
-          // Create select
-          gbRel = this.genSelectRelNode(calciteColLst, selectOutputRowResolver, gbRel);
         }
+
+        // 7. We create the group_by operator
+        gbRel = genGBRelNode(gbExprNDescLst, aggregations, groupingSets, srcRel);
+        relToHiveColNameCalcitePosMap.put(gbRel,
+            buildHiveToCalciteColumnMap(groupByOutputRowResolver, gbRel));
+        this.relToHiveRR.put(gbRel, groupByOutputRowResolver);
       }
 
       return gbRel;
@@ -2030,7 +2020,7 @@ public class CalcitePlanner extends Sema
       if (limit != null) {
         RexNode fetch = cluster.getRexBuilder().makeExactLiteral(BigDecimal.valueOf(limit));
         RelTraitSet traitSet = cluster.traitSetOf(HiveRelNode.CONVENTION);
-        RelCollation canonizedCollation = traitSet.canonize(RelCollationImpl.EMPTY);
+        RelCollation canonizedCollation = traitSet.canonize(RelCollations.EMPTY);
         sortRel = new HiveSort(cluster, traitSet, srcRel, canonizedCollation, null, fetch);
 
         RowResolver outputRR = new RowResolver();

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java Sat Mar 28 00:22:15 2015
@@ -35,6 +35,7 @@ import org.apache.hadoop.hive.ql.exec.Ut
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
 import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.session.SessionState;
 
 /**
  * ColumnStatsSemanticAnalyzer.
@@ -94,13 +95,9 @@ public class ColumnStatsSemanticAnalyzer
 
   private Table getTable(ASTNode tree) throws SemanticException {
     String tableName = getUnescapedName((ASTNode) tree.getChild(0).getChild(0));
-    try {
-      return db.getTable(tableName);
-    } catch (InvalidTableException e) {
-      throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName), e);
-    } catch (HiveException e) {
-      throw new SemanticException(e.getMessage(), e);
-    }
+    String currentDb = SessionState.get().getCurrentDatabase();
+    String [] names = Utilities.getDbTableName(currentDb, tableName);
+    return getTable(names[0], names[1], true);
   }
 
   private Map<String,String> getPartKeyValuePairsFromAST(ASTNode tree) {
@@ -315,6 +312,8 @@ public class ColumnStatsSemanticAnalyzer
       }
     }
     rewrittenQueryBuilder.append(" from ");
+    rewrittenQueryBuilder.append(tbl.getDbName());
+    rewrittenQueryBuilder.append(".");
     rewrittenQueryBuilder.append(tbl.getTableName());
     isRewritten = true;
 
@@ -428,7 +427,7 @@ public class ColumnStatsSemanticAnalyzer
       qb.setAnalyzeRewrite(true);
       qbp = qb.getParseInfo();
       analyzeRewrite = new AnalyzeRewriteContext();
-      analyzeRewrite.setTableName(tbl.getTableName());
+      analyzeRewrite.setTableName(tbl.getDbName() + "." + tbl.getTableName());
       analyzeRewrite.setTblLvl(isTableLevel);
       analyzeRewrite.setColName(colNames);
       analyzeRewrite.setColType(colType);

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Sat Mar 28 00:22:15 2015
@@ -509,7 +509,7 @@ public class DDLSemanticAnalyzer extends
     if (colType == null)
       throw new SemanticException("column type not found");
 
-    ColumnStatsDesc cStatsDesc = new ColumnStatsDesc(tbl.getTableName(),
+    ColumnStatsDesc cStatsDesc = new ColumnStatsDesc(tbl.getDbName() + "." + tbl.getTableName(),
         Arrays.asList(colName), Arrays.asList(colType), partSpec == null);
     ColumnStatsUpdateTask cStatsUpdateTask = (ColumnStatsUpdateTask) TaskFactory
         .get(new ColumnStatsUpdateWork(cStatsDesc, partName, mapProp), conf);

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g Sat Mar 28 00:22:15 2015
@@ -122,6 +122,7 @@ KW_DOUBLE: 'DOUBLE';
 KW_DATE: 'DATE';
 KW_DATETIME: 'DATETIME';
 KW_TIMESTAMP: 'TIMESTAMP';
+KW_INTERVAL: 'INTERVAL';
 KW_DECIMAL: 'DECIMAL';
 KW_STRING: 'STRING';
 KW_CHAR: 'CHAR';
@@ -297,6 +298,12 @@ KW_AUTHORIZATION: 'AUTHORIZATION';
 KW_CONF: 'CONF';
 KW_VALUES: 'VALUES';
 KW_RELOAD: 'RELOAD';
+KW_YEAR: 'YEAR';
+KW_MONTH: 'MONTH';
+KW_DAY: 'DAY';
+KW_HOUR: 'HOUR';
+KW_MINUTE: 'MINUTE';
+KW_SECOND: 'SECOND';
 
 // Operators
 // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work.

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g Sat Mar 28 00:22:15 2015
@@ -111,6 +111,16 @@ TOK_DATELITERAL;
 TOK_DATETIME;
 TOK_TIMESTAMP;
 TOK_TIMESTAMPLITERAL;
+TOK_INTERVAL_YEAR_MONTH;
+TOK_INTERVAL_YEAR_MONTH_LITERAL;
+TOK_INTERVAL_DAY_TIME;
+TOK_INTERVAL_DAY_TIME_LITERAL;
+TOK_INTERVAL_YEAR_LITERAL;
+TOK_INTERVAL_MONTH_LITERAL;
+TOK_INTERVAL_DAY_LITERAL;
+TOK_INTERVAL_HOUR_LITERAL;
+TOK_INTERVAL_MINUTE_LITERAL;
+TOK_INTERVAL_SECOND_LITERAL;
 TOK_STRING;
 TOK_CHAR;
 TOK_VARCHAR;
@@ -2034,6 +2044,9 @@ primitiveType
     | KW_DATE          ->    TOK_DATE
     | KW_DATETIME      ->    TOK_DATETIME
     | KW_TIMESTAMP     ->    TOK_TIMESTAMP
+    // Uncomment to allow intervals as table column types
+    //| KW_INTERVAL KW_YEAR KW_TO KW_MONTH -> TOK_INTERVAL_YEAR_MONTH
+    //| KW_INTERVAL KW_DAY KW_TO KW_SECOND -> TOK_INTERVAL_DAY_TIME
     | KW_STRING        ->    TOK_STRING
     | KW_BINARY        ->    TOK_BINARY
     | KW_DECIMAL (LPAREN prec=Number (COMMA scale=Number)? RPAREN)? -> ^(TOK_DECIMAL $prec? $scale?)

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g Sat Mar 28 00:22:15 2015
@@ -248,6 +248,7 @@ constant
     Number
     | dateLiteral
     | timestampLiteral
+    | intervalLiteral
     | StringLiteral
     | stringLiteralSequence
     | BigintLiteral
@@ -292,6 +293,26 @@ timestampLiteral
     KW_CURRENT_TIMESTAMP -> ^(TOK_FUNCTION KW_CURRENT_TIMESTAMP)
     ;
 
+intervalLiteral
+    :
+    KW_INTERVAL StringLiteral qualifiers=intervalQualifiers ->
+    {
+      adaptor.create(qualifiers.tree.token.getType(), $StringLiteral.text)
+    }
+    ;
+
+intervalQualifiers
+    :
+    KW_YEAR KW_TO KW_MONTH -> TOK_INTERVAL_YEAR_MONTH_LITERAL
+    | KW_DAY KW_TO KW_SECOND -> TOK_INTERVAL_DAY_TIME_LITERAL
+    | KW_YEAR -> TOK_INTERVAL_YEAR_LITERAL
+    | KW_MONTH -> TOK_INTERVAL_MONTH_LITERAL
+    | KW_DAY -> TOK_INTERVAL_DAY_LITERAL
+    | KW_HOUR -> TOK_INTERVAL_HOUR_LITERAL
+    | KW_MINUTE -> TOK_INTERVAL_MINUTE_LITERAL
+    | KW_SECOND -> TOK_INTERVAL_SECOND_LITERAL
+    ;
+
 expression
 @init { gParent.pushMsg("expression specification", state); }
 @after { gParent.popMsg(state); }
@@ -592,22 +613,22 @@ nonReserved
     :
     KW_ADD | KW_ADMIN | KW_AFTER | KW_ANALYZE | KW_ARCHIVE | KW_ASC | KW_BEFORE | KW_BUCKET | KW_BUCKETS
     | KW_CASCADE | KW_CHANGE | KW_CLUSTER | KW_CLUSTERED | KW_CLUSTERSTATUS | KW_COLLECTION | KW_COLUMNS
-    | KW_COMMENT | KW_COMPACT | KW_COMPACTIONS | KW_COMPUTE | KW_CONCATENATE | KW_CONTINUE | KW_DATA 
+    | KW_COMMENT | KW_COMPACT | KW_COMPACTIONS | KW_COMPUTE | KW_CONCATENATE | KW_CONTINUE | KW_DATA | KW_DAY
     | KW_DATABASES | KW_DATETIME | KW_DBPROPERTIES | KW_DEFERRED | KW_DEFINED | KW_DELIMITED | KW_DEPENDENCY 
     | KW_DESC | KW_DIRECTORIES | KW_DIRECTORY | KW_DISABLE | KW_DISTRIBUTE | KW_ELEM_TYPE 
     | KW_ENABLE | KW_ESCAPED | KW_EXCLUSIVE | KW_EXPLAIN | KW_EXPORT | KW_FIELDS | KW_FILE | KW_FILEFORMAT
-    | KW_FIRST | KW_FORMAT | KW_FORMATTED | KW_FUNCTIONS | KW_HOLD_DDLTIME | KW_IDXPROPERTIES | KW_IGNORE
+    | KW_FIRST | KW_FORMAT | KW_FORMATTED | KW_FUNCTIONS | KW_HOLD_DDLTIME | KW_HOUR | KW_IDXPROPERTIES | KW_IGNORE
     | KW_INDEX | KW_INDEXES | KW_INPATH | KW_INPUTDRIVER | KW_INPUTFORMAT | KW_ITEMS | KW_JAR
     | KW_KEYS | KW_KEY_TYPE | KW_LIMIT | KW_LINES | KW_LOAD | KW_LOCATION | KW_LOCK | KW_LOCKS | KW_LOGICAL | KW_LONG
-    | KW_MAPJOIN | KW_MATERIALIZED | KW_MINUS | KW_MSCK | KW_NOSCAN | KW_NO_DROP | KW_OFFLINE | KW_OPTION
+    | KW_MAPJOIN | KW_MATERIALIZED | KW_MINUS | KW_MINUTE | KW_MONTH | KW_MSCK | KW_NOSCAN | KW_NO_DROP | KW_OFFLINE | KW_OPTION
     | KW_OUTPUTDRIVER | KW_OUTPUTFORMAT | KW_OVERWRITE | KW_OWNER | KW_PARTITIONED | KW_PARTITIONS | KW_PLUS | KW_PRETTY | KW_PRINCIPALS
     | KW_PROTECTION | KW_PURGE | KW_READ | KW_READONLY | KW_REBUILD | KW_RECORDREADER | KW_RECORDWRITER
     | KW_REGEXP | KW_RELOAD | KW_RENAME | KW_REPAIR | KW_REPLACE | KW_RESTRICT | KW_REWRITE | KW_RLIKE
-    | KW_ROLE | KW_ROLES | KW_SCHEMA | KW_SCHEMAS | KW_SEMI | KW_SERDE | KW_SERDEPROPERTIES | KW_SERVER | KW_SETS | KW_SHARED
+    | KW_ROLE | KW_ROLES | KW_SCHEMA | KW_SCHEMAS | KW_SECOND | KW_SEMI | KW_SERDE | KW_SERDEPROPERTIES | KW_SERVER | KW_SETS | KW_SHARED
     | KW_SHOW | KW_SHOW_DATABASE | KW_SKEWED | KW_SORT | KW_SORTED | KW_SSL | KW_STATISTICS | KW_STORED
     | KW_STREAMTABLE | KW_STRING | KW_STRUCT | KW_TABLES | KW_TBLPROPERTIES | KW_TEMPORARY | KW_TERMINATED
     | KW_TINYINT | KW_TOUCH | KW_TRANSACTIONS | KW_UNARCHIVE | KW_UNDO | KW_UNIONTYPE | KW_UNLOCK | KW_UNSET
-    | KW_UNSIGNED | KW_URI | KW_USE | KW_UTC | KW_UTCTIMESTAMP | KW_VALUE_TYPE | KW_VIEW | KW_WHILE
+    | KW_UNSIGNED | KW_URI | KW_USE | KW_UTC | KW_UTCTIMESTAMP | KW_VALUE_TYPE | KW_VIEW | KW_WHILE | KW_YEAR
     ;
 
 //The following SQL2011 reserved keywords are used as cast function name only, it is a subset of the sql11ReservedKeywordsUsedAsIdentifier.

Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1669718&r1=1669717&r2=1669718&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Sat Mar 28 00:22:15 2015
@@ -1856,7 +1856,7 @@ public class SemanticAnalyzer extends Ba
           return true;
         }
       } catch (Exception e) {
-        throw new HiveException("Unable to determine if " + path + "is encrypted: " + e, e);
+        throw new HiveException("Unable to determine if " + path + " is encrypted: " + e, e);
       }
     }
 
@@ -1919,7 +1919,7 @@ public class SemanticAnalyzer extends Ba
    * Gets the strongest encrypted table path.
    *
    * @param qb The QB object that contains a list of all table locations.
-   * @return The strongest encrypted path
+   * @return The strongest encrypted path. It may return NULL if there are not tables encrypted, or are not HDFS tables.
    * @throws HiveException if an error occurred attempting to compare the encryption strength
    */
   private Path getStrongestEncryptedTablePath(QB qb) throws HiveException {
@@ -1932,17 +1932,14 @@ public class SemanticAnalyzer extends Ba
       if (tab != null) {
         Path tablePath = tab.getDataLocation();
         if (tablePath != null) {
-          try {
-            if (strongestPath == null) {
-              strongestPath = tablePath;
-            } else if ("hdfs".equals(tablePath.toUri().getScheme())
-                && isPathEncrypted(tablePath)
-                && comparePathKeyStrength(tablePath, strongestPath) > 0)
-            {
-              strongestPath = tablePath;
+          if ("hdfs".equalsIgnoreCase(tablePath.toUri().getScheme())) {
+            if (isPathEncrypted(tablePath)) {
+              if (strongestPath == null) {
+                strongestPath = tablePath;
+              } else if (comparePathKeyStrength(tablePath, strongestPath) > 0) {
+                strongestPath = tablePath;
+              }
             }
-          } catch (HiveException e) {
-            throw new HiveException("Unable to find the most secure table path: " + e, e);
           }
         }
       }
@@ -1966,22 +1963,19 @@ public class SemanticAnalyzer extends Ba
   private Path getStagingDirectoryPathname(QB qb) throws HiveException {
     Path stagingPath = null, tablePath;
 
-    // Looks for the most encrypted table location (if there is one)
+    // Looks for the most encrypted table location
+    // It may return null if there are not tables encrypted, or are not part of HDFS
     tablePath = getStrongestEncryptedTablePath(qb);
-    if (tablePath != null && isPathEncrypted(tablePath)) {
-      // Only HDFS paths can be checked for encryption
-      if ("hdfs".equals(tablePath.toUri().getScheme())) {
-        if (isPathReadOnly(tablePath)) {
-          Path tmpPath = ctx.getMRTmpPath();
-          if (comparePathKeyStrength(tablePath, tmpPath) < 0) {
-            throw new HiveException("Read-only encrypted tables cannot be read " +
-                "if the scratch directory is not encrypted (or encryption is weak)");
-          } else {
-            stagingPath = tmpPath;
-          }
+    if (tablePath != null) {
+      // At this point, tablePath is part of HDFS and it is encrypted
+      if (isPathReadOnly(tablePath)) {
+        Path tmpPath = ctx.getMRTmpPath();
+        if (comparePathKeyStrength(tablePath, tmpPath) < 0) {
+          throw new HiveException("Read-only encrypted tables cannot be read " +
+              "if the scratch directory is not encrypted (or encryption is weak)");
+        } else {
+          stagingPath = tmpPath;
         }
-      } else {
-        LOG.debug("Encryption is not applicable to table path " + tablePath.toString());
       }
 
       if (stagingPath == null) {



Mime
View raw message