hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gunt...@apache.org
Subject svn commit: r1508202 [5/48] - in /hive/branches/tez: ./ beeline/src/java/org/apache/hive/beeline/ cli/src/java/org/apache/hadoop/hive/cli/ common/src/java/org/apache/hadoop/hive/common/metrics/ common/src/java/org/apache/hadoop/hive/conf/ common/src/te...
Date Mon, 29 Jul 2013 21:08:19 GMT
Modified: hive/branches/tez/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java (original)
+++ hive/branches/tez/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java Mon Jul 29 21:08:03 2013
@@ -2415,6 +2415,401 @@ public final class OrcProto {
     // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.ql.io.orc.DecimalStatistics)
   }
   
+  public interface DateStatisticsOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+    
+    // optional sint32 minimum = 1;
+    boolean hasMinimum();
+    int getMinimum();
+    
+    // optional sint32 maximum = 2;
+    boolean hasMaximum();
+    int getMaximum();
+  }
+  public static final class DateStatistics extends
+      com.google.protobuf.GeneratedMessage
+      implements DateStatisticsOrBuilder {
+    // Use DateStatistics.newBuilder() to construct.
+    private DateStatistics(Builder builder) {
+      super(builder);
+    }
+    private DateStatistics(boolean noInit) {}
+    
+    private static final DateStatistics defaultInstance;
+    public static DateStatistics getDefaultInstance() {
+      return defaultInstance;
+    }
+    
+    public DateStatistics getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+    
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor;
+    }
+    
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_fieldAccessorTable;
+    }
+    
+    private int bitField0_;
+    // optional sint32 minimum = 1;
+    public static final int MINIMUM_FIELD_NUMBER = 1;
+    private int minimum_;
+    public boolean hasMinimum() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    public int getMinimum() {
+      return minimum_;
+    }
+    
+    // optional sint32 maximum = 2;
+    public static final int MAXIMUM_FIELD_NUMBER = 2;
+    private int maximum_;
+    public boolean hasMaximum() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    public int getMaximum() {
+      return maximum_;
+    }
+    
+    private void initFields() {
+      minimum_ = 0;
+      maximum_ = 0;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+      
+      memoizedIsInitialized = 1;
+      return true;
+    }
+    
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeSInt32(1, minimum_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeSInt32(2, maximum_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+    
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+    
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeSInt32Size(1, minimum_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeSInt32Size(2, maximum_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+    
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+    
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+    
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatisticsOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor;
+      }
+      
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_fieldAccessorTable;
+      }
+      
+      // Construct using org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+      
+      private Builder(BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+      
+      public Builder clear() {
+        super.clear();
+        minimum_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        maximum_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+      
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+      
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDescriptor();
+      }
+      
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics getDefaultInstanceForType() {
+        return org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance();
+      }
+      
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics build() {
+        org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+      
+      private org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics buildParsed()
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(
+            result).asInvalidProtocolBufferException();
+        }
+        return result;
+      }
+      
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics buildPartial() {
+        org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics result = new org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.minimum_ = minimum_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.maximum_ = maximum_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+      
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics) {
+          return mergeFrom((org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+      
+      public Builder mergeFrom(org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics other) {
+        if (other == org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance()) return this;
+        if (other.hasMinimum()) {
+          setMinimum(other.getMinimum());
+        }
+        if (other.hasMaximum()) {
+          setMaximum(other.getMaximum());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+      
+      public final boolean isInitialized() {
+        return true;
+      }
+      
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder(
+            this.getUnknownFields());
+        while (true) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              this.setUnknownFields(unknownFields.build());
+              onChanged();
+              return this;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                this.setUnknownFields(unknownFields.build());
+                onChanged();
+                return this;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              minimum_ = input.readSInt32();
+              break;
+            }
+            case 16: {
+              bitField0_ |= 0x00000002;
+              maximum_ = input.readSInt32();
+              break;
+            }
+          }
+        }
+      }
+      
+      private int bitField0_;
+      
+      // optional sint32 minimum = 1;
+      private int minimum_ ;
+      public boolean hasMinimum() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      public int getMinimum() {
+        return minimum_;
+      }
+      public Builder setMinimum(int value) {
+        bitField0_ |= 0x00000001;
+        minimum_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearMinimum() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        minimum_ = 0;
+        onChanged();
+        return this;
+      }
+      
+      // optional sint32 maximum = 2;
+      private int maximum_ ;
+      public boolean hasMaximum() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      public int getMaximum() {
+        return maximum_;
+      }
+      public Builder setMaximum(int value) {
+        bitField0_ |= 0x00000002;
+        maximum_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearMaximum() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        maximum_ = 0;
+        onChanged();
+        return this;
+      }
+      
+      // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.DateStatistics)
+    }
+    
+    static {
+      defaultInstance = new DateStatistics(true);
+      defaultInstance.initFields();
+    }
+    
+    // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.ql.io.orc.DateStatistics)
+  }
+  
   public interface ColumnStatisticsOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
@@ -2446,6 +2841,11 @@ public final class OrcProto {
     boolean hasDecimalStatistics();
     org.apache.hadoop.hive.ql.io.orc.OrcProto.DecimalStatistics getDecimalStatistics();
     org.apache.hadoop.hive.ql.io.orc.OrcProto.DecimalStatisticsOrBuilder getDecimalStatisticsOrBuilder();
+    
+    // optional .org.apache.hadoop.hive.ql.io.orc.DateStatistics dateStatistics = 7;
+    boolean hasDateStatistics();
+    org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics getDateStatistics();
+    org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatisticsOrBuilder getDateStatisticsOrBuilder();
   }
   public static final class ColumnStatistics extends
       com.google.protobuf.GeneratedMessage
@@ -2551,6 +2951,19 @@ public final class OrcProto {
       return decimalStatistics_;
     }
     
+    // optional .org.apache.hadoop.hive.ql.io.orc.DateStatistics dateStatistics = 7;
+    public static final int DATESTATISTICS_FIELD_NUMBER = 7;
+    private org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics dateStatistics_;
+    public boolean hasDateStatistics() {
+      return ((bitField0_ & 0x00000040) == 0x00000040);
+    }
+    public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics getDateStatistics() {
+      return dateStatistics_;
+    }
+    public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatisticsOrBuilder getDateStatisticsOrBuilder() {
+      return dateStatistics_;
+    }
+    
     private void initFields() {
       numberOfValues_ = 0L;
       intStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.IntegerStatistics.getDefaultInstance();
@@ -2558,6 +2971,7 @@ public final class OrcProto {
       stringStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.StringStatistics.getDefaultInstance();
       bucketStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.BucketStatistics.getDefaultInstance();
       decimalStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DecimalStatistics.getDefaultInstance();
+      dateStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance();
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -2589,6 +3003,9 @@ public final class OrcProto {
       if (((bitField0_ & 0x00000020) == 0x00000020)) {
         output.writeMessage(6, decimalStatistics_);
       }
+      if (((bitField0_ & 0x00000040) == 0x00000040)) {
+        output.writeMessage(7, dateStatistics_);
+      }
       getUnknownFields().writeTo(output);
     }
     
@@ -2622,6 +3039,10 @@ public final class OrcProto {
         size += com.google.protobuf.CodedOutputStream
           .computeMessageSize(6, decimalStatistics_);
       }
+      if (((bitField0_ & 0x00000040) == 0x00000040)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(7, dateStatistics_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -2743,6 +3164,7 @@ public final class OrcProto {
           getStringStatisticsFieldBuilder();
           getBucketStatisticsFieldBuilder();
           getDecimalStatisticsFieldBuilder();
+          getDateStatisticsFieldBuilder();
         }
       }
       private static Builder create() {
@@ -2783,6 +3205,12 @@ public final class OrcProto {
           decimalStatisticsBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000020);
+        if (dateStatisticsBuilder_ == null) {
+          dateStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance();
+        } else {
+          dateStatisticsBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000040);
         return this;
       }
       
@@ -2865,6 +3293,14 @@ public final class OrcProto {
         } else {
           result.decimalStatistics_ = decimalStatisticsBuilder_.build();
         }
+        if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
+          to_bitField0_ |= 0x00000040;
+        }
+        if (dateStatisticsBuilder_ == null) {
+          result.dateStatistics_ = dateStatistics_;
+        } else {
+          result.dateStatistics_ = dateStatisticsBuilder_.build();
+        }
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -2899,6 +3335,9 @@ public final class OrcProto {
         if (other.hasDecimalStatistics()) {
           mergeDecimalStatistics(other.getDecimalStatistics());
         }
+        if (other.hasDateStatistics()) {
+          mergeDateStatistics(other.getDateStatistics());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -2980,6 +3419,15 @@ public final class OrcProto {
               setDecimalStatistics(subBuilder.buildPartial());
               break;
             }
+            case 58: {
+              org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.Builder subBuilder = org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.newBuilder();
+              if (hasDateStatistics()) {
+                subBuilder.mergeFrom(getDateStatistics());
+              }
+              input.readMessage(subBuilder, extensionRegistry);
+              setDateStatistics(subBuilder.buildPartial());
+              break;
+            }
           }
         }
       }
@@ -3457,6 +3905,96 @@ public final class OrcProto {
         return decimalStatisticsBuilder_;
       }
       
+      // optional .org.apache.hadoop.hive.ql.io.orc.DateStatistics dateStatistics = 7;
+      private org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics dateStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatisticsOrBuilder> dateStatisticsBuilder_;
+      public boolean hasDateStatistics() {
+        return ((bitField0_ & 0x00000040) == 0x00000040);
+      }
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics getDateStatistics() {
+        if (dateStatisticsBuilder_ == null) {
+          return dateStatistics_;
+        } else {
+          return dateStatisticsBuilder_.getMessage();
+        }
+      }
+      public Builder setDateStatistics(org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics value) {
+        if (dateStatisticsBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          dateStatistics_ = value;
+          onChanged();
+        } else {
+          dateStatisticsBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000040;
+        return this;
+      }
+      public Builder setDateStatistics(
+          org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.Builder builderForValue) {
+        if (dateStatisticsBuilder_ == null) {
+          dateStatistics_ = builderForValue.build();
+          onChanged();
+        } else {
+          dateStatisticsBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000040;
+        return this;
+      }
+      public Builder mergeDateStatistics(org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics value) {
+        if (dateStatisticsBuilder_ == null) {
+          if (((bitField0_ & 0x00000040) == 0x00000040) &&
+              dateStatistics_ != org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance()) {
+            dateStatistics_ =
+              org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.newBuilder(dateStatistics_).mergeFrom(value).buildPartial();
+          } else {
+            dateStatistics_ = value;
+          }
+          onChanged();
+        } else {
+          dateStatisticsBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000040;
+        return this;
+      }
+      public Builder clearDateStatistics() {
+        if (dateStatisticsBuilder_ == null) {
+          dateStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance();
+          onChanged();
+        } else {
+          dateStatisticsBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000040);
+        return this;
+      }
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.Builder getDateStatisticsBuilder() {
+        bitField0_ |= 0x00000040;
+        onChanged();
+        return getDateStatisticsFieldBuilder().getBuilder();
+      }
+      public org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatisticsOrBuilder getDateStatisticsOrBuilder() {
+        if (dateStatisticsBuilder_ != null) {
+          return dateStatisticsBuilder_.getMessageOrBuilder();
+        } else {
+          return dateStatistics_;
+        }
+      }
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatisticsOrBuilder> 
+          getDateStatisticsFieldBuilder() {
+        if (dateStatisticsBuilder_ == null) {
+          dateStatisticsBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatisticsOrBuilder>(
+                  dateStatistics_,
+                  getParentForChildren(),
+                  isClean());
+          dateStatistics_ = null;
+        }
+        return dateStatisticsBuilder_;
+      }
+      
       // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.ColumnStatistics)
     }
     
@@ -6505,6 +7043,7 @@ public final class OrcProto {
       STRUCT(12, 12),
       UNION(13, 13),
       DECIMAL(14, 14),
+      DATE(15, 15),
       ;
       
       public static final int BOOLEAN_VALUE = 0;
@@ -6522,6 +7061,7 @@ public final class OrcProto {
       public static final int STRUCT_VALUE = 12;
       public static final int UNION_VALUE = 13;
       public static final int DECIMAL_VALUE = 14;
+      public static final int DATE_VALUE = 15;
       
       
       public final int getNumber() { return value; }
@@ -6543,6 +7083,7 @@ public final class OrcProto {
           case 12: return STRUCT;
           case 13: return UNION;
           case 14: return DECIMAL;
+          case 15: return DATE;
           default: return null;
         }
       }
@@ -6573,7 +7114,7 @@ public final class OrcProto {
       }
       
       private static final Kind[] VALUES = {
-        BOOLEAN, BYTE, SHORT, INT, LONG, FLOAT, DOUBLE, STRING, BINARY, TIMESTAMP, LIST, MAP, STRUCT, UNION, DECIMAL, 
+        BOOLEAN, BYTE, SHORT, INT, LONG, FLOAT, DOUBLE, STRING, BINARY, TIMESTAMP, LIST, MAP, STRUCT, UNION, DECIMAL, DATE, 
       };
       
       public static Kind valueOf(
@@ -9791,6 +10332,15 @@ public final class OrcProto {
     // optional uint64 compressionBlockSize = 3;
     boolean hasCompressionBlockSize();
     long getCompressionBlockSize();
+    
+    // repeated uint32 version = 4 [packed = true];
+    java.util.List<java.lang.Integer> getVersionList();
+    int getVersionCount();
+    int getVersion(int index);
+    
+    // optional string magic = 8000;
+    boolean hasMagic();
+    String getMagic();
   }
   public static final class PostScript extends
       com.google.protobuf.GeneratedMessage
@@ -9851,10 +10401,59 @@ public final class OrcProto {
       return compressionBlockSize_;
     }
     
+    // repeated uint32 version = 4 [packed = true];
+    public static final int VERSION_FIELD_NUMBER = 4;
+    private java.util.List<java.lang.Integer> version_;
+    public java.util.List<java.lang.Integer>
+        getVersionList() {
+      return version_;
+    }
+    public int getVersionCount() {
+      return version_.size();
+    }
+    public int getVersion(int index) {
+      return version_.get(index);
+    }
+    private int versionMemoizedSerializedSize = -1;
+    
+    // optional string magic = 8000;
+    public static final int MAGIC_FIELD_NUMBER = 8000;
+    private java.lang.Object magic_;
+    public boolean hasMagic() {
+      return ((bitField0_ & 0x00000008) == 0x00000008);
+    }
+    public String getMagic() {
+      java.lang.Object ref = magic_;
+      if (ref instanceof String) {
+        return (String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        String s = bs.toStringUtf8();
+        if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+          magic_ = s;
+        }
+        return s;
+      }
+    }
+    private com.google.protobuf.ByteString getMagicBytes() {
+      java.lang.Object ref = magic_;
+      if (ref instanceof String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+        magic_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+    
     private void initFields() {
       footerLength_ = 0L;
       compression_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.CompressionKind.NONE;
       compressionBlockSize_ = 0L;
+      version_ = java.util.Collections.emptyList();;
+      magic_ = "";
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -9877,6 +10476,16 @@ public final class OrcProto {
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
         output.writeUInt64(3, compressionBlockSize_);
       }
+      if (getVersionList().size() > 0) {
+        output.writeRawVarint32(34);
+        output.writeRawVarint32(versionMemoizedSerializedSize);
+      }
+      for (int i = 0; i < version_.size(); i++) {
+        output.writeUInt32NoTag(version_.get(i));
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        output.writeBytes(8000, getMagicBytes());
+      }
       getUnknownFields().writeTo(output);
     }
     
@@ -9898,6 +10507,24 @@ public final class OrcProto {
         size += com.google.protobuf.CodedOutputStream
           .computeUInt64Size(3, compressionBlockSize_);
       }
+      {
+        int dataSize = 0;
+        for (int i = 0; i < version_.size(); i++) {
+          dataSize += com.google.protobuf.CodedOutputStream
+            .computeUInt32SizeNoTag(version_.get(i));
+        }
+        size += dataSize;
+        if (!getVersionList().isEmpty()) {
+          size += 1;
+          size += com.google.protobuf.CodedOutputStream
+              .computeInt32SizeNoTag(dataSize);
+        }
+        versionMemoizedSerializedSize = dataSize;
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(8000, getMagicBytes());
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -10028,6 +10655,10 @@ public final class OrcProto {
         bitField0_ = (bitField0_ & ~0x00000002);
         compressionBlockSize_ = 0L;
         bitField0_ = (bitField0_ & ~0x00000004);
+        version_ = java.util.Collections.emptyList();;
+        bitField0_ = (bitField0_ & ~0x00000008);
+        magic_ = "";
+        bitField0_ = (bitField0_ & ~0x00000010);
         return this;
       }
       
@@ -10078,6 +10709,15 @@ public final class OrcProto {
           to_bitField0_ |= 0x00000004;
         }
         result.compressionBlockSize_ = compressionBlockSize_;
+        if (((bitField0_ & 0x00000008) == 0x00000008)) {
+          version_ = java.util.Collections.unmodifiableList(version_);
+          bitField0_ = (bitField0_ & ~0x00000008);
+        }
+        result.version_ = version_;
+        if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+          to_bitField0_ |= 0x00000008;
+        }
+        result.magic_ = magic_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -10103,6 +10743,19 @@ public final class OrcProto {
         if (other.hasCompressionBlockSize()) {
           setCompressionBlockSize(other.getCompressionBlockSize());
         }
+        if (!other.version_.isEmpty()) {
+          if (version_.isEmpty()) {
+            version_ = other.version_;
+            bitField0_ = (bitField0_ & ~0x00000008);
+          } else {
+            ensureVersionIsMutable();
+            version_.addAll(other.version_);
+          }
+          onChanged();
+        }
+        if (other.hasMagic()) {
+          setMagic(other.getMagic());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -10155,6 +10808,25 @@ public final class OrcProto {
               compressionBlockSize_ = input.readUInt64();
               break;
             }
+            case 32: {
+              ensureVersionIsMutable();
+              version_.add(input.readUInt32());
+              break;
+            }
+            case 34: {
+              int length = input.readRawVarint32();
+              int limit = input.pushLimit(length);
+              while (input.getBytesUntilLimit() > 0) {
+                addVersion(input.readUInt32());
+              }
+              input.popLimit(limit);
+              break;
+            }
+            case 64002: {
+              bitField0_ |= 0x00000010;
+              magic_ = input.readBytes();
+              break;
+            }
           }
         }
       }
@@ -10227,6 +10899,87 @@ public final class OrcProto {
         return this;
       }
       
+      // repeated uint32 version = 4 [packed = true];
+      private java.util.List<java.lang.Integer> version_ = java.util.Collections.emptyList();;
+      private void ensureVersionIsMutable() {
+        if (!((bitField0_ & 0x00000008) == 0x00000008)) {
+          version_ = new java.util.ArrayList<java.lang.Integer>(version_);
+          bitField0_ |= 0x00000008;
+         }
+      }
+      public java.util.List<java.lang.Integer>
+          getVersionList() {
+        return java.util.Collections.unmodifiableList(version_);
+      }
+      public int getVersionCount() {
+        return version_.size();
+      }
+      public int getVersion(int index) {
+        return version_.get(index);
+      }
+      public Builder setVersion(
+          int index, int value) {
+        ensureVersionIsMutable();
+        version_.set(index, value);
+        onChanged();
+        return this;
+      }
+      public Builder addVersion(int value) {
+        ensureVersionIsMutable();
+        version_.add(value);
+        onChanged();
+        return this;
+      }
+      public Builder addAllVersion(
+          java.lang.Iterable<? extends java.lang.Integer> values) {
+        ensureVersionIsMutable();
+        super.addAll(values, version_);
+        onChanged();
+        return this;
+      }
+      public Builder clearVersion() {
+        version_ = java.util.Collections.emptyList();;
+        bitField0_ = (bitField0_ & ~0x00000008);
+        onChanged();
+        return this;
+      }
+      
+      // optional string magic = 8000;
+      private java.lang.Object magic_ = "";
+      public boolean hasMagic() {
+        return ((bitField0_ & 0x00000010) == 0x00000010);
+      }
+      public String getMagic() {
+        java.lang.Object ref = magic_;
+        if (!(ref instanceof String)) {
+          String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+          magic_ = s;
+          return s;
+        } else {
+          return (String) ref;
+        }
+      }
+      public Builder setMagic(String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000010;
+        magic_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearMagic() {
+        bitField0_ = (bitField0_ & ~0x00000010);
+        magic_ = getDefaultInstance().getMagic();
+        onChanged();
+        return this;
+      }
+      void setMagic(com.google.protobuf.ByteString value) {
+        bitField0_ |= 0x00000010;
+        magic_ = value;
+        onChanged();
+      }
+      
       // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.PostScript)
     }
     
@@ -10264,6 +11017,11 @@ public final class OrcProto {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_org_apache_hadoop_hive_ql_io_orc_DecimalStatistics_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
     internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -10335,61 +11093,66 @@ public final class OrcProto {
       "istics\022\017\n\007minimum\030\001 \001(\t\022\017\n\007maximum\030\002 \001(\t" +
       "\"%\n\020BucketStatistics\022\021\n\005count\030\001 \003(\004B\002\020\001\"" +
       "B\n\021DecimalStatistics\022\017\n\007minimum\030\001 \001(\t\022\017\n" +
-      "\007maximum\030\002 \001(\t\022\013\n\003sum\030\003 \001(\t\"\260\003\n\020ColumnSt" +
-      "atistics\022\026\n\016numberOfValues\030\001 \001(\004\022J\n\rintS",
-      "tatistics\030\002 \001(\01323.org.apache.hadoop.hive" +
-      ".ql.io.orc.IntegerStatistics\022L\n\020doubleSt" +
-      "atistics\030\003 \001(\01322.org.apache.hadoop.hive." +
-      "ql.io.orc.DoubleStatistics\022L\n\020stringStat" +
-      "istics\030\004 \001(\01322.org.apache.hadoop.hive.ql" +
-      ".io.orc.StringStatistics\022L\n\020bucketStatis" +
-      "tics\030\005 \001(\01322.org.apache.hadoop.hive.ql.i" +
-      "o.orc.BucketStatistics\022N\n\021decimalStatist" +
-      "ics\030\006 \001(\01323.org.apache.hadoop.hive.ql.io" +
-      ".orc.DecimalStatistics\"n\n\rRowIndexEntry\022",
-      "\025\n\tpositions\030\001 \003(\004B\002\020\001\022F\n\nstatistics\030\002 \001" +
-      "(\01322.org.apache.hadoop.hive.ql.io.orc.Co" +
-      "lumnStatistics\"J\n\010RowIndex\022>\n\005entry\030\001 \003(" +
-      "\0132/.org.apache.hadoop.hive.ql.io.orc.Row" +
-      "IndexEntry\"\331\001\n\006Stream\022;\n\004kind\030\001 \002(\0162-.or" +
-      "g.apache.hadoop.hive.ql.io.orc.Stream.Ki" +
-      "nd\022\016\n\006column\030\002 \001(\r\022\016\n\006length\030\003 \001(\004\"r\n\004Ki" +
-      "nd\022\013\n\007PRESENT\020\000\022\010\n\004DATA\020\001\022\n\n\006LENGTH\020\002\022\023\n" +
-      "\017DICTIONARY_DATA\020\003\022\024\n\020DICTIONARY_COUNT\020\004" +
-      "\022\r\n\tSECONDARY\020\005\022\r\n\tROW_INDEX\020\006\"\221\001\n\016Colum",
-      "nEncoding\022C\n\004kind\030\001 \002(\01625.org.apache.had" +
-      "oop.hive.ql.io.orc.ColumnEncoding.Kind\022\026" +
-      "\n\016dictionarySize\030\002 \001(\r\"\"\n\004Kind\022\n\n\006DIRECT" +
-      "\020\000\022\016\n\nDICTIONARY\020\001\"\214\001\n\014StripeFooter\0229\n\007s" +
-      "treams\030\001 \003(\0132(.org.apache.hadoop.hive.ql" +
-      ".io.orc.Stream\022A\n\007columns\030\002 \003(\01320.org.ap" +
-      "ache.hadoop.hive.ql.io.orc.ColumnEncodin" +
-      "g\"\236\002\n\004Type\0229\n\004kind\030\001 \002(\0162+.org.apache.ha" +
-      "doop.hive.ql.io.orc.Type.Kind\022\024\n\010subtype" +
-      "s\030\002 \003(\rB\002\020\001\022\022\n\nfieldNames\030\003 \003(\t\"\260\001\n\004Kind",
-      "\022\013\n\007BOOLEAN\020\000\022\010\n\004BYTE\020\001\022\t\n\005SHORT\020\002\022\007\n\003IN" +
-      "T\020\003\022\010\n\004LONG\020\004\022\t\n\005FLOAT\020\005\022\n\n\006DOUBLE\020\006\022\n\n\006" +
-      "STRING\020\007\022\n\n\006BINARY\020\010\022\r\n\tTIMESTAMP\020\t\022\010\n\004L" +
-      "IST\020\n\022\007\n\003MAP\020\013\022\n\n\006STRUCT\020\014\022\t\n\005UNION\020\r\022\013\n" +
-      "\007DECIMAL\020\016\"x\n\021StripeInformation\022\016\n\006offse" +
-      "t\030\001 \001(\004\022\023\n\013indexLength\030\002 \001(\004\022\022\n\ndataLeng" +
-      "th\030\003 \001(\004\022\024\n\014footerLength\030\004 \001(\004\022\024\n\014number" +
-      "OfRows\030\005 \001(\004\"/\n\020UserMetadataItem\022\014\n\004name" +
-      "\030\001 \002(\t\022\r\n\005value\030\002 \002(\014\"\356\002\n\006Footer\022\024\n\014head" +
-      "erLength\030\001 \001(\004\022\025\n\rcontentLength\030\002 \001(\004\022D\n",
-      "\007stripes\030\003 \003(\01323.org.apache.hadoop.hive." +
-      "ql.io.orc.StripeInformation\0225\n\005types\030\004 \003" +
-      "(\0132&.org.apache.hadoop.hive.ql.io.orc.Ty" +
-      "pe\022D\n\010metadata\030\005 \003(\01322.org.apache.hadoop" +
-      ".hive.ql.io.orc.UserMetadataItem\022\024\n\014numb" +
-      "erOfRows\030\006 \001(\004\022F\n\nstatistics\030\007 \003(\01322.org" +
-      ".apache.hadoop.hive.ql.io.orc.ColumnStat" +
-      "istics\022\026\n\016rowIndexStride\030\010 \001(\r\"\210\001\n\nPostS" +
-      "cript\022\024\n\014footerLength\030\001 \001(\004\022F\n\013compressi" +
-      "on\030\002 \001(\01621.org.apache.hadoop.hive.ql.io.",
-      "orc.CompressionKind\022\034\n\024compressionBlockS" +
-      "ize\030\003 \001(\004*:\n\017CompressionKind\022\010\n\004NONE\020\000\022\010" +
-      "\n\004ZLIB\020\001\022\n\n\006SNAPPY\020\002\022\007\n\003LZO\020\003"
+      "\007maximum\030\002 \001(\t\022\013\n\003sum\030\003 \001(\t\"2\n\016DateStati" +
+      "stics\022\017\n\007minimum\030\001 \001(\021\022\017\n\007maximum\030\002 \001(\021\"",
+      "\372\003\n\020ColumnStatistics\022\026\n\016numberOfValues\030\001" +
+      " \001(\004\022J\n\rintStatistics\030\002 \001(\01323.org.apache" +
+      ".hadoop.hive.ql.io.orc.IntegerStatistics" +
+      "\022L\n\020doubleStatistics\030\003 \001(\01322.org.apache." +
+      "hadoop.hive.ql.io.orc.DoubleStatistics\022L" +
+      "\n\020stringStatistics\030\004 \001(\01322.org.apache.ha" +
+      "doop.hive.ql.io.orc.StringStatistics\022L\n\020" +
+      "bucketStatistics\030\005 \001(\01322.org.apache.hado" +
+      "op.hive.ql.io.orc.BucketStatistics\022N\n\021de" +
+      "cimalStatistics\030\006 \001(\01323.org.apache.hadoo",
+      "p.hive.ql.io.orc.DecimalStatistics\022H\n\016da" +
+      "teStatistics\030\007 \001(\01320.org.apache.hadoop.h" +
+      "ive.ql.io.orc.DateStatistics\"n\n\rRowIndex" +
+      "Entry\022\025\n\tpositions\030\001 \003(\004B\002\020\001\022F\n\nstatisti" +
+      "cs\030\002 \001(\01322.org.apache.hadoop.hive.ql.io." +
+      "orc.ColumnStatistics\"J\n\010RowIndex\022>\n\005entr" +
+      "y\030\001 \003(\0132/.org.apache.hadoop.hive.ql.io.o" +
+      "rc.RowIndexEntry\"\331\001\n\006Stream\022;\n\004kind\030\001 \002(" +
+      "\0162-.org.apache.hadoop.hive.ql.io.orc.Str" +
+      "eam.Kind\022\016\n\006column\030\002 \001(\r\022\016\n\006length\030\003 \001(\004",
+      "\"r\n\004Kind\022\013\n\007PRESENT\020\000\022\010\n\004DATA\020\001\022\n\n\006LENGT" +
+      "H\020\002\022\023\n\017DICTIONARY_DATA\020\003\022\024\n\020DICTIONARY_C" +
+      "OUNT\020\004\022\r\n\tSECONDARY\020\005\022\r\n\tROW_INDEX\020\006\"\221\001\n" +
+      "\016ColumnEncoding\022C\n\004kind\030\001 \002(\01625.org.apac" +
+      "he.hadoop.hive.ql.io.orc.ColumnEncoding." +
+      "Kind\022\026\n\016dictionarySize\030\002 \001(\r\"\"\n\004Kind\022\n\n\006" +
+      "DIRECT\020\000\022\016\n\nDICTIONARY\020\001\"\214\001\n\014StripeFoote" +
+      "r\0229\n\007streams\030\001 \003(\0132(.org.apache.hadoop.h" +
+      "ive.ql.io.orc.Stream\022A\n\007columns\030\002 \003(\01320." +
+      "org.apache.hadoop.hive.ql.io.orc.ColumnE",
+      "ncoding\"\250\002\n\004Type\0229\n\004kind\030\001 \002(\0162+.org.apa" +
+      "che.hadoop.hive.ql.io.orc.Type.Kind\022\024\n\010s" +
+      "ubtypes\030\002 \003(\rB\002\020\001\022\022\n\nfieldNames\030\003 \003(\t\"\272\001" +
+      "\n\004Kind\022\013\n\007BOOLEAN\020\000\022\010\n\004BYTE\020\001\022\t\n\005SHORT\020\002" +
+      "\022\007\n\003INT\020\003\022\010\n\004LONG\020\004\022\t\n\005FLOAT\020\005\022\n\n\006DOUBLE" +
+      "\020\006\022\n\n\006STRING\020\007\022\n\n\006BINARY\020\010\022\r\n\tTIMESTAMP\020" +
+      "\t\022\010\n\004LIST\020\n\022\007\n\003MAP\020\013\022\n\n\006STRUCT\020\014\022\t\n\005UNIO" +
+      "N\020\r\022\013\n\007DECIMAL\020\016\022\010\n\004DATE\020\017\"x\n\021StripeInfo" +
+      "rmation\022\016\n\006offset\030\001 \001(\004\022\023\n\013indexLength\030\002" +
+      " \001(\004\022\022\n\ndataLength\030\003 \001(\004\022\024\n\014footerLength",
+      "\030\004 \001(\004\022\024\n\014numberOfRows\030\005 \001(\004\"/\n\020UserMeta" +
+      "dataItem\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\014\"\356\002" +
+      "\n\006Footer\022\024\n\014headerLength\030\001 \001(\004\022\025\n\rconten" +
+      "tLength\030\002 \001(\004\022D\n\007stripes\030\003 \003(\01323.org.apa" +
+      "che.hadoop.hive.ql.io.orc.StripeInformat" +
+      "ion\0225\n\005types\030\004 \003(\0132&.org.apache.hadoop.h" +
+      "ive.ql.io.orc.Type\022D\n\010metadata\030\005 \003(\01322.o" +
+      "rg.apache.hadoop.hive.ql.io.orc.UserMeta" +
+      "dataItem\022\024\n\014numberOfRows\030\006 \001(\004\022F\n\nstatis" +
+      "tics\030\007 \003(\01322.org.apache.hadoop.hive.ql.i",
+      "o.orc.ColumnStatistics\022\026\n\016rowIndexStride" +
+      "\030\010 \001(\r\"\255\001\n\nPostScript\022\024\n\014footerLength\030\001 " +
+      "\001(\004\022F\n\013compression\030\002 \001(\01621.org.apache.ha" +
+      "doop.hive.ql.io.orc.CompressionKind\022\034\n\024c" +
+      "ompressionBlockSize\030\003 \001(\004\022\023\n\007version\030\004 \003" +
+      "(\rB\002\020\001\022\016\n\005magic\030\300> \001(\t*:\n\017CompressionKin" +
+      "d\022\010\n\004NONE\020\000\022\010\n\004ZLIB\020\001\022\n\n\006SNAPPY\020\002\022\007\n\003LZO" +
+      "\020\003"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -10436,16 +11199,24 @@ public final class OrcProto {
               new java.lang.String[] { "Minimum", "Maximum", "Sum", },
               org.apache.hadoop.hive.ql.io.orc.OrcProto.DecimalStatistics.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.DecimalStatistics.Builder.class);
-          internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor =
+          internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor =
             getDescriptor().getMessageTypes().get(5);
+          internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor,
+              new java.lang.String[] { "Minimum", "Maximum", },
+              org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.class,
+              org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.Builder.class);
+          internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor =
+            getDescriptor().getMessageTypes().get(6);
           internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor,
-              new java.lang.String[] { "NumberOfValues", "IntStatistics", "DoubleStatistics", "StringStatistics", "BucketStatistics", "DecimalStatistics", },
+              new java.lang.String[] { "NumberOfValues", "IntStatistics", "DoubleStatistics", "StringStatistics", "BucketStatistics", "DecimalStatistics", "DateStatistics", },
               org.apache.hadoop.hive.ql.io.orc.OrcProto.ColumnStatistics.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.ColumnStatistics.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_descriptor =
-            getDescriptor().getMessageTypes().get(6);
+            getDescriptor().getMessageTypes().get(7);
           internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_descriptor,
@@ -10453,7 +11224,7 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.RowIndexEntry.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.RowIndexEntry.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_descriptor =
-            getDescriptor().getMessageTypes().get(7);
+            getDescriptor().getMessageTypes().get(8);
           internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_descriptor,
@@ -10461,7 +11232,7 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.RowIndex.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.RowIndex.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_descriptor =
-            getDescriptor().getMessageTypes().get(8);
+            getDescriptor().getMessageTypes().get(9);
           internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_descriptor,
@@ -10469,7 +11240,7 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.Stream.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.Stream.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_descriptor =
-            getDescriptor().getMessageTypes().get(9);
+            getDescriptor().getMessageTypes().get(10);
           internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_descriptor,
@@ -10477,7 +11248,7 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.ColumnEncoding.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.ColumnEncoding.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_descriptor =
-            getDescriptor().getMessageTypes().get(10);
+            getDescriptor().getMessageTypes().get(11);
           internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_descriptor,
@@ -10485,7 +11256,7 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.StripeFooter.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.StripeFooter.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_Type_descriptor =
-            getDescriptor().getMessageTypes().get(11);
+            getDescriptor().getMessageTypes().get(12);
           internal_static_org_apache_hadoop_hive_ql_io_orc_Type_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_Type_descriptor,
@@ -10493,7 +11264,7 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.Type.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.Type.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_descriptor =
-            getDescriptor().getMessageTypes().get(12);
+            getDescriptor().getMessageTypes().get(13);
           internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_descriptor,
@@ -10501,7 +11272,7 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.StripeInformation.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.StripeInformation.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_descriptor =
-            getDescriptor().getMessageTypes().get(13);
+            getDescriptor().getMessageTypes().get(14);
           internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_descriptor,
@@ -10509,7 +11280,7 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.UserMetadataItem.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.UserMetadataItem.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_descriptor =
-            getDescriptor().getMessageTypes().get(14);
+            getDescriptor().getMessageTypes().get(15);
           internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_descriptor,
@@ -10517,11 +11288,11 @@ public final class OrcProto {
               org.apache.hadoop.hive.ql.io.orc.OrcProto.Footer.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.Footer.Builder.class);
           internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_descriptor =
-            getDescriptor().getMessageTypes().get(15);
+            getDescriptor().getMessageTypes().get(16);
           internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_descriptor,
-              new java.lang.String[] { "FooterLength", "Compression", "CompressionBlockSize", },
+              new java.lang.String[] { "FooterLength", "Compression", "CompressionBlockSize", "Version", "Magic", },
               org.apache.hadoop.hive.ql.io.orc.OrcProto.PostScript.class,
               org.apache.hadoop.hive.ql.io.orc.OrcProto.PostScript.Builder.class);
           return null;

Modified: hive/branches/tez/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java (original)
+++ hive/branches/tez/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java Mon Jul 29 21:08:03 2013
@@ -30,7 +30,9 @@ public enum OperatorType implements org.
   LATERALVIEWFORWARD(15),
   HASHTABLESINK(16),
   HASHTABLEDUMMY(17),
-  PTF(18);
+  PTF(18),
+  MUX(19),
+  DEMUX(20);
 
   private final int value;
 
@@ -49,7 +51,7 @@ public enum OperatorType implements org.
    * Find a the enum type by its integer value, as defined in the Thrift IDL.
    * @return null if the value is not found.
    */
-  public static OperatorType findByValue(int value) { 
+  public static OperatorType findByValue(int value) {
     switch (value) {
       case 0:
         return JOIN;
@@ -89,6 +91,10 @@ public enum OperatorType implements org.
         return HASHTABLEDUMMY;
       case 18:
         return PTF;
+      case 19:
+        return MUX;
+      case 20:
+        return DEMUX;
       default:
         return null;
     }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/Context.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/Context.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/Context.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/Context.java Mon Jul 29 21:08:03 2013
@@ -79,6 +79,7 @@ public class Context {
   private final Configuration conf;
   protected int pathid = 10000;
   protected boolean explain = false;
+  protected boolean explainLogical = false;
   protected String cmd = "";
   // number of previous attempts
   protected int tryCount = 0;
@@ -140,11 +141,26 @@ public class Context {
    * Find whether the current query is an explain query
    * @return true if the query is an explain query, false if not
    */
-  public boolean getExplain () {
+  public boolean getExplain() {
     return explain;
   }
 
   /**
+   * Find whether the current query is a logical explain query
+   */
+  public boolean getExplainLogical() {
+    return explainLogical;
+  }
+
+  /**
+   * Set the context on whether the current query is a logical
+   * explain query.
+   */
+  public void setExplainLogical(boolean explainLogical) {
+    this.explainLogical = explainLogical;
+  }
+
+  /**
    * Set the original query command.
    * @param cmd the original query command string
    */
@@ -166,7 +182,7 @@ public class Context {
    * @param scheme Scheme of the target FS
    * @param authority Authority of the target FS
    * @param mkdir create the directory if true
-   * @param scratchdir path of tmp directory
+   * @param scratchDir path of tmp directory
    */
   private String getScratchDir(String scheme, String authority,
                                boolean mkdir, String scratchDir) {

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/Driver.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/Driver.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/Driver.java Mon Jul 29 21:08:03 2013
@@ -46,7 +46,6 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Schema;
 import org.apache.hadoop.hive.ql.exec.ConditionalTask;
-import org.apache.hadoop.hive.ql.exec.ExecDriver;
 import org.apache.hadoop.hive.ql.exec.FetchTask;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
@@ -78,6 +77,9 @@ import org.apache.hadoop.hive.ql.metadat
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.metadata.formatting.JsonMetaDataFormatter;
+import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils;
+import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatter;
 import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook;
@@ -126,6 +128,7 @@ public class Driver implements CommandPr
 
   private String errorMessage;
   private String SQLState;
+  private Throwable downstreamError;
 
   // A limit on the number of threads that can be launched
   private int maxthreads;
@@ -143,6 +146,7 @@ public class Driver implements CommandPr
       } catch (SemanticException e) {
         errorMessage = "FAILED: Error in semantic analysis: " + e.getMessage();
         SQLState = ErrorMsg.findSQLState(e.getMessage());
+        downstreamError = e;
         console.printError(errorMessage, "\n"
             + org.apache.hadoop.util.StringUtils.stringifyException(e));
         return false;
@@ -193,7 +197,7 @@ public class Driver implements CommandPr
   public ClusterStatus getClusterStatus() throws Exception {
     ClusterStatus cs;
     try {
-      JobConf job = new JobConf(conf, ExecDriver.class);
+      JobConf job = new JobConf(conf);
       JobClient jc = new JobClient(job);
       cs = jc.getClusterStatus();
     } catch (Exception e) {
@@ -456,12 +460,12 @@ public class Driver implements CommandPr
 
         // serialize the queryPlan
         FileOutputStream fos = new FileOutputStream(queryPlanFileName);
-        Utilities.serializeQueryPlan(plan, fos);
+        Utilities.serializeObject(plan, fos);
         fos.close();
 
         // deserialize the queryPlan
         FileInputStream fis = new FileInputStream(queryPlanFileName);
-        QueryPlan newPlan = Utilities.deserializeQueryPlan(fis, conf);
+        QueryPlan newPlan = Utilities.deserializeObject(fis);
         fis.close();
 
         // Use the deserialized plan
@@ -483,8 +487,9 @@ public class Driver implements CommandPr
           perfLogger.PerfLogBegin(LOG, PerfLogger.DO_AUTHORIZATION);
           doAuthorization(sem);
         } catch (AuthorizationException authExp) {
-          console.printError("Authorization failed:" + authExp.getMessage()
-              + ". Use show grant to get more details.");
+          errorMessage = "Authorization failed:" + authExp.getMessage()
+                  + ". Use show grant to get more details.";
+          console.printError(errorMessage);
           return 403;
         } finally {
           perfLogger.PerfLogEnd(LOG, PerfLogger.DO_AUTHORIZATION);
@@ -502,6 +507,7 @@ public class Driver implements CommandPr
       }
       errorMessage += " " + e.getMessage();
       SQLState = error.getSQLState();
+      downstreamError = e;
       console.printError(errorMessage, "\n"
           + org.apache.hadoop.util.StringUtils.stringifyException(e));
       return error.getErrorCode();
@@ -837,12 +843,14 @@ public class Driver implements CommandPr
     } catch (SemanticException e) {
       errorMessage = "FAILED: Error in acquiring locks: " + e.getMessage();
       SQLState = ErrorMsg.findSQLState(e.getMessage());
+      downstreamError = e;
       console.printError(errorMessage, "\n"
           + org.apache.hadoop.util.StringUtils.stringifyException(e));
       return (10);
     } catch (LockException e) {
       errorMessage = "FAILED: Error in acquiring locks: " + e.getMessage();
       SQLState = ErrorMsg.findSQLState(e.getMessage());
+      downstreamError = e;
       console.printError(errorMessage, "\n"
           + org.apache.hadoop.util.StringUtils.stringifyException(e));
       return (10);
@@ -869,8 +877,65 @@ public class Driver implements CommandPr
   }
 
   public CommandProcessorResponse run(String command) throws CommandNeedRetryException {
+    CommandProcessorResponse cpr = runInternal(command);
+    if(cpr.getResponseCode() == 0) {
+      return cpr;
+    }
+    SessionState ss = SessionState.get();
+    if(ss == null) {
+      return cpr;
+    }
+    MetaDataFormatter mdf = MetaDataFormatUtils.getFormatter(ss.getConf());
+    if(!(mdf instanceof JsonMetaDataFormatter)) {
+      return cpr;
+    }
+    /*Here we want to encode the error in machine readable way (e.g. JSON)
+     * Ideally, errorCode would always be set to a canonical error defined in ErrorMsg.
+     * In practice that is rarely the case, so the messy logic below tries to tease
+     * out canonical error code if it can.  Exclude stack trace from output when
+     * the error is a specific/expected one.
+     * It's written to stdout for backward compatibility (WebHCat consumes it).*/
+    try {
+      if(downstreamError == null) {
+        mdf.error(ss.out, errorMessage, cpr.getResponseCode(), SQLState);
+        return cpr;
+      }
+      ErrorMsg canonicalErr = ErrorMsg.getErrorMsg(cpr.getResponseCode());
+      if(canonicalErr != null && canonicalErr != ErrorMsg.GENERIC_ERROR) {
+        /*Some HiveExceptions (e.g. SemanticException) don't set
+          canonical ErrorMsg explicitly, but there is logic
+          (e.g. #compile()) to find an appropriate canonical error and
+          return its code as error code. In this case we want to
+          preserve it for downstream code to interpret*/
+        mdf.error(ss.out, errorMessage, cpr.getResponseCode(), SQLState, null);
+        return cpr;
+      }
+      if(downstreamError instanceof HiveException) {
+        HiveException rc = (HiveException) downstreamError;
+        mdf.error(ss.out, errorMessage,
+                rc.getCanonicalErrorMsg().getErrorCode(), SQLState,
+                rc.getCanonicalErrorMsg() == ErrorMsg.GENERIC_ERROR ?
+                        org.apache.hadoop.util.StringUtils.stringifyException(rc)
+                        : null);
+      }
+      else {
+        ErrorMsg canonicalMsg =
+                ErrorMsg.getErrorMsg(downstreamError.getMessage());
+        mdf.error(ss.out, errorMessage, canonicalMsg.getErrorCode(),
+                SQLState, org.apache.hadoop.util.StringUtils.
+                stringifyException(downstreamError));
+      }
+    }
+    catch(HiveException ex) {
+      console.printError("Unable to JSON-encode the error",
+              org.apache.hadoop.util.StringUtils.stringifyException(ex));
+    }
+    return cpr;
+  }
+  private CommandProcessorResponse runInternal(String command) throws CommandNeedRetryException {
     errorMessage = null;
     SQLState = null;
+    downstreamError = null;
 
     if (!validateConfVariables()) {
       return new CommandProcessorResponse(12, errorMessage, SQLState);
@@ -885,10 +950,11 @@ public class Driver implements CommandPr
           driverRunHook.preDriverRun(hookContext);
       }
     } catch (Exception e) {
-      errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e)
-          + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e);
+      errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
       SQLState = ErrorMsg.findSQLState(e.getMessage());
-      console.printError(errorMessage);
+      downstreamError = e;
+      console.printError(errorMessage + "\n"
+          + org.apache.hadoop.util.StringUtils.stringifyException(e));
       return new CommandProcessorResponse(12, errorMessage, SQLState);
     }
 
@@ -961,10 +1027,11 @@ public class Driver implements CommandPr
           driverRunHook.postDriverRun(hookContext);
       }
     } catch (Exception e) {
-      errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e)
-          + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e);
+      errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
       SQLState = ErrorMsg.findSQLState(e.getMessage());
-      console.printError(errorMessage);
+      downstreamError = e;
+      console.printError(errorMessage + "\n"
+          + org.apache.hadoop.util.StringUtils.stringifyException(e));
       return new CommandProcessorResponse(12, errorMessage, SQLState);
     }
 
@@ -984,7 +1051,7 @@ public class Driver implements CommandPr
                   .getBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_UNION_REMOVE))))) {
       errorMessage = "FAILED: Hive Internal Error: "
           + ErrorMsg.SUPPORT_DIR_MUST_TRUE_FOR_LIST_BUCKETING.getMsg();
-      SQLState = ErrorMsg.findSQLState(errorMessage);
+      SQLState = ErrorMsg.SUPPORT_DIR_MUST_TRUE_FOR_LIST_BUCKETING.getSQLState();
       console.printError(errorMessage + "\n");
       valid = false;
     }
@@ -1158,12 +1225,7 @@ public class Driver implements CommandPr
           }
           Task<? extends Serializable> backupTask = tsk.getAndInitBackupTask();
           if (backupTask != null) {
-            errorMessage = "FAILED: Execution Error, return code " + exitVal + " from "
-                + tsk.getClass().getName();
-            ErrorMsg em = ErrorMsg.getErrorMsg(exitVal);
-            if (em != null) {
-              errorMessage += ". " +  em.getMsg();
-            }
+            setErrorMsgAndDetail(exitVal, tskRes.getTaskError(), tsk);
             console.printError(errorMessage);
             errorMessage = "ATTEMPT: Execute BackupTask: " + backupTask.getClass().getName();
             console.printError(errorMessage);
@@ -1184,13 +1246,7 @@ public class Driver implements CommandPr
 
               perfLogger.PerfLogEnd(LOG, PerfLogger.FAILURE_HOOK + ofh.getClass().getName());
             }
-
-            errorMessage = "FAILED: Execution Error, return code " + exitVal + " from "
-                + tsk.getClass().getName();
-            ErrorMsg em = ErrorMsg.getErrorMsg(exitVal);
-            if (em != null) {
-              errorMessage += ". " +  em.getMsg();
-            }
+            setErrorMsgAndDetail(exitVal, tskRes.getTaskError(), tsk);
             SQLState = "08S01";
             console.printError(errorMessage);
             if (!running.isEmpty()) {
@@ -1273,6 +1329,7 @@ public class Driver implements CommandPr
       // TODO: do better with handling types of Exception here
       errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
       SQLState = "08S01";
+      downstreamError = e;
       console.printError(errorMessage + "\n"
           + org.apache.hadoop.util.StringUtils.stringifyException(e));
       return (12);
@@ -1308,7 +1365,21 @@ public class Driver implements CommandPr
 
     return (0);
   }
-
+  private void setErrorMsgAndDetail(int exitVal, Throwable downstreamError, Task tsk) {
+    this.downstreamError = downstreamError;
+    errorMessage = "FAILED: Execution Error, return code " + exitVal + " from " + tsk.getClass().getName();
+    if(downstreamError != null) {
+      //here we assume that upstream code may have parametrized the msg from ErrorMsg
+      //so we want to keep it
+      errorMessage += ". " + downstreamError.getMessage();
+    }
+    else {
+      ErrorMsg em = ErrorMsg.getErrorMsg(exitVal);
+      if (em != null) {
+        errorMessage += ". " +  em.getMsg();
+      }
+    }
+  }
   /**
    * Launches a new task
    *
@@ -1388,7 +1459,7 @@ public class Driver implements CommandPr
     while (true) {
       while (resultIterator.hasNext()) {
         TaskResult tskRes = resultIterator.next();
-        if (tskRes.isRunning() == false) {
+        if (!tskRes.isRunning()) {
           return tskRes;
         }
       }

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java Mon Jul 29 21:08:03 2013
@@ -32,11 +32,15 @@ import org.apache.hadoop.hive.ql.parse.A
 /**
  * List of all error messages.
  * This list contains both compile time and run-time errors.
- **/
+ *
+ * This class supports parametrized messages such as (@link #TRUNCATE_FOR_NON_MANAGED_TABLE}.  These are
+ * preferable over un-parametrized ones where arbitrary String is appended to the end of the message,
+ * for example {@link #getMsg(String)} and {@link #INVALID_TABLE}.
+ */
 
 public enum ErrorMsg {
   // The error codes are Hive-specific and partitioned into the following ranges:
-  // 10000 to 19999: Errors occuring during semantic analysis and compilation of the query.
+  // 10000 to 19999: Errors occurring during semantic analysis and compilation of the query.
   // 20000 to 29999: Runtime errors where Hive believes that retries are unlikely to succeed.
   // 30000 to 39999: Runtime errors which Hive thinks may be transient and retrying may succeed.
   // 40000 to 49999: Errors where Hive is unable to advise about retries.
@@ -168,8 +172,8 @@ public enum ErrorMsg {
   DYNAMIC_PARTITION_STRICT_MODE(10096, "Dynamic partition strict mode requires at least one "
       + "static partition column. To turn this off set hive.exec.dynamic.partition.mode=nonstrict"),
   NONEXISTPARTCOL(10098, "Non-Partition column appears in the partition specification: "),
-  UNSUPPORTED_TYPE(10099, "DATE and DATETIME types aren't supported yet. Please use "
-      + "TIMESTAMP instead"),
+  UNSUPPORTED_TYPE(10099, "DATETIME type isn't supported yet. Please use "
+      + "DATE or TIMESTAMP instead"),
   CREATE_NON_NATIVE_AS(10100, "CREATE TABLE AS SELECT cannot be used for a non-native table"),
   LOAD_INTO_NON_NATIVE(10101, "A non-native table cannot be used as target for LOAD"),
   LOCKMGR_NOT_SPECIFIED(10102, "Lock manager not specified correctly, set hive.lock.manager"),
@@ -351,6 +355,12 @@ public enum ErrorMsg {
   TRUNCATE_LIST_BUCKETED_COLUMN(10240,
       "A column on which a partition/table is list bucketed cannot be truncated."),
 
+  TABLE_NOT_PARTITIONED(10241, "Table {0} is not a partitioned table", true),
+  DATABSAE_ALREADY_EXISTS(10242, "Database {0} already exists", true),
+  CANNOT_REPLACE_COLUMNS(10243, "Replace columns is not supported for table {0}. SerDe may be incompatible.", true),
+  BAD_LOCATION_VALUE(10244, "{0}  is not absolute or has no scheme information.  Please specify a complete absolute uri with scheme information."),
+  UNSUPPORTED_ALTER_TBL_OP(10245, "{0} alter table options is not supported"),
+
   SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."),
   SCRIPT_IO_ERROR(20001, "An error occurred while reading or writing to your custom script. "
       + "It may have crashed with an error."),
@@ -488,7 +498,7 @@ public enum ErrorMsg {
 
   /**
    * For a given error message string, searches for a <code>ErrorMsg</code> enum
-   * that appears to be a match. If an match is found, returns the
+   * that appears to be a match. If a match is found, returns the
    * <code>SQLState</code> associated with the <code>ErrorMsg</code>. If a match
    * is not found or <code>ErrorMsg</code> has no <code>SQLState</code>, returns
    * the <code>SQLState</code> bound to the <code>GENERIC_ERROR</code>
@@ -605,10 +615,41 @@ public enum ErrorMsg {
   public String format(String reason) {
     return format(new String[]{reason});
   }
-
+  /**
+   * If the message is parametrized, this will fill the parameters with supplied 
+   * {@code reasons}, otherwise {@code reasons} are appended at the end of the 
+   * message.
+   */
   public String format(String... reasons) {
-    assert format != null;
-    return format.format(reasons);
+    /* Not all messages are parametrized even those that should have been, e.g {@link #INVALID_TABLE}.
+     INVALID_TABLE is usually used with {@link #getMsg(String)}.
+     This method can also be used with INVALID_TABLE and the like and will match getMsg(String) behavior.
+
+     Another example: {@link #INVALID_PARTITION}.  Ideally you want the message to have 2 parameters one for
+     partition name one for table name.  Since this is already defined w/o any parameters, one can still call
+     {@code INVALID_PARTITION.format("<partName> <table Name>"}.  This way the message text will be slightly
+     different but at least the errorCode will match.  Note this, should not be abused by adding anything other
+     than what should have been parameter names to keep msg text standardized.
+     */
+    if(reasons == null || reasons.length == 0) {
+      return getMsg();
+    }
+    if(format != null) {
+      return format.format(reasons);
+    }
+    if(reasons.length > 1) {
+      StringBuilder sb = new StringBuilder();
+      for(String re : reasons) {
+        if(re != null) {
+          if(sb.length() > 0) {
+            sb.append(" ");
+          }
+          sb.append(re);
+        }
+      }
+      return getMsg(sb.toString());
+    }
+    return getMsg(reasons[0]);
   }
 
   public String getErrorCodedMsg() {

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java Mon Jul 29 21:08:03 2013
@@ -38,10 +38,10 @@ import java.util.concurrent.ConcurrentHa
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.ConditionalTask;
-import org.apache.hadoop.hive.ql.exec.ExecDriver;
 import org.apache.hadoop.hive.ql.exec.FetchTask;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.mr.ExecDriver;
 import org.apache.hadoop.hive.ql.hooks.LineageInfo;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
@@ -233,7 +233,7 @@ public class QueryPlan implements Serial
         mapTask.setTaskId(stage.getStageId() + "_MAP");
         mapTask.setTaskType(TaskType.MAP);
         stage.addToTaskList(mapTask);
-        populateOperatorGraph(mapTask, mrTask.getWork().getAliasToWork()
+        populateOperatorGraph(mapTask, mrTask.getWork().getMapWork().getAliasToWork()
             .values());
 
         // populate reduce task
@@ -245,7 +245,7 @@ public class QueryPlan implements Serial
           stage.addToTaskList(reduceTask);
           Collection<Operator<? extends OperatorDesc>> reducerTopOps =
             new ArrayList<Operator<? extends OperatorDesc>>();
-          reducerTopOps.add(mrTask.getWork().getReducer());
+          reducerTopOps.add(mrTask.getWork().getReduceWork().getReducer());
           populateOperatorGraph(reduceTask, reducerTopOps);
         }
       } else {
@@ -382,7 +382,7 @@ public class QueryPlan implements Serial
       }
       if (task instanceof ExecDriver) {
         ExecDriver mrTask = (ExecDriver) task;
-        extractOperatorCounters(mrTask.getWork().getAliasToWork().values(),
+        extractOperatorCounters(mrTask.getWork().getMapWork().getAliasToWork().values(),
             task.getId() + "_MAP");
         if (mrTask.mapStarted()) {
           started.add(task.getId() + "_MAP");
@@ -393,7 +393,7 @@ public class QueryPlan implements Serial
         if (mrTask.hasReduce()) {
           Collection<Operator<? extends OperatorDesc>> reducerTopOps =
             new ArrayList<Operator<? extends OperatorDesc>>();
-          reducerTopOps.add(mrTask.getWork().getReducer());
+          reducerTopOps.add(mrTask.getWork().getReduceWork().getReducer());
           extractOperatorCounters(reducerTopOps, task.getId() + "_REDUCE");
           if (mrTask.reduceStarted()) {
             started.add(task.getId() + "_REDUCE");

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java Mon Jul 29 21:08:03 2013
@@ -74,7 +74,7 @@ public class ColumnStatsTask extends Tas
     super.initialize(conf, queryPlan, ctx);
     work.initializeForFetch();
     try {
-      JobConf job = new JobConf(conf, ExecDriver.class);
+      JobConf job = new JobConf(conf);
       ftOp = new FetchOperator(work.getfWork(), job);
     } catch (Exception e) {
       LOG.error(StringUtils.stringifyException(e));

Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java?rev=1508202&r1=1508201&r2=1508202&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java Mon Jul 29 21:08:03 2013
@@ -331,6 +331,7 @@ public abstract class CommonJoinOperator
     for (AbstractRowContainer<ArrayList<Object>> alw : storage) {
       alw.clear();
     }
+    super.startGroup();
   }
 
   protected int getNextSize(int sz) {
@@ -360,26 +361,46 @@ public abstract class CommonJoinOperator
   // filter tags for objects
   protected transient short[] filterTags;
 
-  // ANDed value of all filter tags in current join group
-  // if any of values passes on outer join alias (which makes zero for the tag alias),
-  // it means there exists a pair for it, and no need to check outer join (just do inner join)
-  //
-  // for example, with table a, b something like,
-  //   a, b = 100, 10 | 100, 20 | 100, 30
-  //
-  // the query "a FOJ b ON a.k=b.k AND a.v>0 AND b.v>20" makes values with tag
-  //
-  //   a = 100, 10, 00000010 | 100, 20, 00000010 | 100, 30, 00000010 : 0/1 for 'b' (alias 1)
-  //   b = 100, 10, 00000001 | 100, 20, 00000001 | 100, 30, 00000000 : 0/1 for 'a' (alias 0)
-  //
-  // which makes aliasFilterTags for a = 00000010, for b = 00000000
-  //
-  // for LO, b = 0000000(0) means there is a pair object(s) in 'b' (has no 'a'-null case)
-  // for RO, a = 000000(1)0 means there is no pair object in 'a' (has null-'b' case)
-  //
-  // result : 100, 10 + 100, 30 | 100, 20 + 100, 30 | 100, 30 + 100, 30 |
-  //          N       + 100, 10 | N       + 100, 20
-  //
+  /**
+   * On filterTags
+   *
+   * ANDed value of all filter tags in current join group
+   * if any of values passes on outer join alias (which makes zero for the tag alias),
+   * it means there exists a pair for it and safely regarded as a inner join
+   *
+   * for example, with table a, b something like,
+   *   a = 100, 10 | 100, 20 | 100, 30
+   *   b = 100, 10 | 100, 20 | 100, 30
+   *
+   * the query "a FO b ON a.k=b.k AND a.v>10 AND b.v>30" makes filter map
+   *   0(a) = [1(b),1] : a.v>10
+   *   1(b) = [0(a),1] : b.v>30
+   *
+   * for filtered rows in a (100,10) create a-NULL
+   * for filtered rows in b (100,10) (100,20) (100,30) create NULL-b
+   *
+   * with 0(a) = [1(b),1] : a.v>10
+   *   100, 10 = 00000010 (filtered)
+   *   100, 20 = 00000000 (valid)
+   *   100, 30 = 00000000 (valid)
+   * -------------------------
+   *       sum = 00000000 : for valid rows in b, there is at least one pair in a
+   *
+   * with 1(b) = [0(a),1] : b.v>30
+   *   100, 10 = 00000001 (filtered)
+   *   100, 20 = 00000001 (filtered)
+   *   100, 30 = 00000001 (filtered)
+   * -------------------------
+   *       sum = 00000001 : for valid rows in a (100,20) (100,30), there is no pair in b
+   *
+   * result :
+   *   100, 10 :   N,  N
+   *     N,  N : 100, 10
+   *     N,  N : 100, 20
+   *     N,  N : 100, 30
+   *   100, 20 :   N,  N
+   *   100, 30 :   N,  N
+   */
   protected transient short[] aliasFilterTags;
 
   // all evaluation should be processed here for valid aliasFilterTags



Mime
View raw message