hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ser...@apache.org
Subject hive git commit: HIVE-11676 : implement metastore API to do file footer PPD (Sergey Shelukhin, reviewed by Alan Gates)
Date Fri, 16 Oct 2015 20:05:57 GMT
Repository: hive
Updated Branches:
  refs/heads/master bb05af06e -> c97f518d8


HIVE-11676 : implement metastore API to do file footer PPD (Sergey Shelukhin, reviewed by Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c97f518d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c97f518d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c97f518d

Branch: refs/heads/master
Commit: c97f518d8f5f8238612e23eaacb0d15ec0cf324b
Parents: bb05af0
Author: Sergey Shelukhin <sershe@apache.org>
Authored: Fri Oct 16 12:19:55 2015 -0700
Committer: Sergey Shelukhin <sershe@apache.org>
Committed: Fri Oct 16 13:05:46 2015 -0700

----------------------------------------------------------------------
 metastore/pom.xml                               |    5 +
 .../apache/hadoop/hive/metastore/Metastore.java | 1331 ++++++++++++++++++
 .../hadoop/hive/metastore/HiveMetaStore.java    |  169 +--
 .../hadoop/hive/metastore/ObjectStore.java      |   15 +-
 .../metastore/PartitionExpressionProxy.java     |   20 +
 .../apache/hadoop/hive/metastore/RawStore.java  |   30 +
 .../hive/metastore/hbase/HBaseReadWrite.java    |   12 +-
 .../hadoop/hive/metastore/hbase/HBaseStore.java |   31 +
 .../hadoop/hive/metastore/metastore.proto       |   29 +
 .../DummyRawStoreControlledCommit.java          |   11 +
 .../DummyRawStoreForJdoConnection.java          |   10 +
 .../MockPartitionExpressionForMetastore.java    |   12 +
 .../hadoop/hive/metastore/TestObjectStore.java  |   12 +
 .../hadoop/hive/metastore/hbase/MockUtils.java  |   12 +
 .../hadoop/hive/ql/io/orc/FileMetaInfo.java     |    6 +-
 .../hive/ql/io/sarg/ConvertAstToSearchArg.java  |   11 +-
 .../ppr/PartitionExpressionForMetastore.java    |   40 +
 17 files changed, 1636 insertions(+), 120 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/c97f518d/metastore/pom.xml
----------------------------------------------------------------------
diff --git a/metastore/pom.xml b/metastore/pom.xml
index 2ab5cd3..f209d50 100644
--- a/metastore/pom.xml
+++ b/metastore/pom.xml
@@ -268,6 +268,11 @@
                       <arg value="-I=${protobuf.src.dir}/org/apache/hadoop/hive/metastore/hbase"/>
                       <arg value="${protobuf.src.dir}/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto"/>
                     </exec>
+                    <exec executable="protoc" failonerror="true">
+                      <arg value="--java_out=${protobuf.build.dir}"/>
+                      <arg value="-I=${protobuf.src.dir}/org/apache/hadoop/hive/metastore"/>
+                      <arg value="${protobuf.src.dir}/org/apache/hadoop/hive/metastore/metastore.proto"/>
+                    </exec>
                   </target>
                 </configuration>
                 <goals>

http://git-wip-us.apache.org/repos/asf/hive/blob/c97f518d/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/Metastore.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/Metastore.java b/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/Metastore.java
new file mode 100644
index 0000000..416ae9d
--- /dev/null
+++ b/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/Metastore.java
@@ -0,0 +1,1331 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: metastore.proto
+
+package org.apache.hadoop.hive.metastore;
+
+public final class Metastore {
+  private Metastore() {}
+  public static void registerAllExtensions(
+      com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface SplitInfoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required int64 offset = 1;
+    /**
+     * <code>required int64 offset = 1;</code>
+     */
+    boolean hasOffset();
+    /**
+     * <code>required int64 offset = 1;</code>
+     */
+    long getOffset();
+
+    // required int64 length = 2;
+    /**
+     * <code>required int64 length = 2;</code>
+     */
+    boolean hasLength();
+    /**
+     * <code>required int64 length = 2;</code>
+     */
+    long getLength();
+
+    // required int32 index = 3;
+    /**
+     * <code>required int32 index = 3;</code>
+     */
+    boolean hasIndex();
+    /**
+     * <code>required int32 index = 3;</code>
+     */
+    int getIndex();
+  }
+  /**
+   * Protobuf type {@code org.apache.hadoop.hive.metastore.SplitInfo}
+   */
+  public static final class SplitInfo extends
+      com.google.protobuf.GeneratedMessage
+      implements SplitInfoOrBuilder {
+    // Use SplitInfo.newBuilder() to construct.
+    private SplitInfo(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private SplitInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final SplitInfo defaultInstance;
+    public static SplitInfo getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public SplitInfo getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private SplitInfo(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              offset_ = input.readInt64();
+              break;
+            }
+            case 16: {
+              bitField0_ |= 0x00000002;
+              length_ = input.readInt64();
+              break;
+            }
+            case 24: {
+              bitField0_ |= 0x00000004;
+              index_ = input.readInt32();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfo_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfo_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hive.metastore.Metastore.SplitInfo.class, org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<SplitInfo> PARSER =
+        new com.google.protobuf.AbstractParser<SplitInfo>() {
+      public SplitInfo parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new SplitInfo(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<SplitInfo> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required int64 offset = 1;
+    public static final int OFFSET_FIELD_NUMBER = 1;
+    private long offset_;
+    /**
+     * <code>required int64 offset = 1;</code>
+     */
+    public boolean hasOffset() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required int64 offset = 1;</code>
+     */
+    public long getOffset() {
+      return offset_;
+    }
+
+    // required int64 length = 2;
+    public static final int LENGTH_FIELD_NUMBER = 2;
+    private long length_;
+    /**
+     * <code>required int64 length = 2;</code>
+     */
+    public boolean hasLength() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required int64 length = 2;</code>
+     */
+    public long getLength() {
+      return length_;
+    }
+
+    // required int32 index = 3;
+    public static final int INDEX_FIELD_NUMBER = 3;
+    private int index_;
+    /**
+     * <code>required int32 index = 3;</code>
+     */
+    public boolean hasIndex() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>required int32 index = 3;</code>
+     */
+    public int getIndex() {
+      return index_;
+    }
+
+    private void initFields() {
+      offset_ = 0L;
+      length_ = 0L;
+      index_ = 0;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasOffset()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasLength()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasIndex()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeInt64(1, offset_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeInt64(2, length_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeInt32(3, index_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt64Size(1, offset_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt64Size(2, length_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt32Size(3, index_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hive.metastore.Metastore.SplitInfo prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.hadoop.hive.metastore.SplitInfo}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfo_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfo_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hive.metastore.Metastore.SplitInfo.class, org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hive.metastore.Metastore.SplitInfo.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        offset_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        length_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        index_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000004);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfo_descriptor;
+      }
+
+      public org.apache.hadoop.hive.metastore.Metastore.SplitInfo getDefaultInstanceForType() {
+        return org.apache.hadoop.hive.metastore.Metastore.SplitInfo.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hive.metastore.Metastore.SplitInfo build() {
+        org.apache.hadoop.hive.metastore.Metastore.SplitInfo result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hive.metastore.Metastore.SplitInfo buildPartial() {
+        org.apache.hadoop.hive.metastore.Metastore.SplitInfo result = new org.apache.hadoop.hive.metastore.Metastore.SplitInfo(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.offset_ = offset_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.length_ = length_;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.index_ = index_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hive.metastore.Metastore.SplitInfo) {
+          return mergeFrom((org.apache.hadoop.hive.metastore.Metastore.SplitInfo)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hive.metastore.Metastore.SplitInfo other) {
+        if (other == org.apache.hadoop.hive.metastore.Metastore.SplitInfo.getDefaultInstance()) return this;
+        if (other.hasOffset()) {
+          setOffset(other.getOffset());
+        }
+        if (other.hasLength()) {
+          setLength(other.getLength());
+        }
+        if (other.hasIndex()) {
+          setIndex(other.getIndex());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasOffset()) {
+
+          return false;
+        }
+        if (!hasLength()) {
+
+          return false;
+        }
+        if (!hasIndex()) {
+
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hive.metastore.Metastore.SplitInfo parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hive.metastore.Metastore.SplitInfo) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required int64 offset = 1;
+      private long offset_ ;
+      /**
+       * <code>required int64 offset = 1;</code>
+       */
+      public boolean hasOffset() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required int64 offset = 1;</code>
+       */
+      public long getOffset() {
+        return offset_;
+      }
+      /**
+       * <code>required int64 offset = 1;</code>
+       */
+      public Builder setOffset(long value) {
+        bitField0_ |= 0x00000001;
+        offset_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required int64 offset = 1;</code>
+       */
+      public Builder clearOffset() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        offset_ = 0L;
+        onChanged();
+        return this;
+      }
+
+      // required int64 length = 2;
+      private long length_ ;
+      /**
+       * <code>required int64 length = 2;</code>
+       */
+      public boolean hasLength() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>required int64 length = 2;</code>
+       */
+      public long getLength() {
+        return length_;
+      }
+      /**
+       * <code>required int64 length = 2;</code>
+       */
+      public Builder setLength(long value) {
+        bitField0_ |= 0x00000002;
+        length_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required int64 length = 2;</code>
+       */
+      public Builder clearLength() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        length_ = 0L;
+        onChanged();
+        return this;
+      }
+
+      // required int32 index = 3;
+      private int index_ ;
+      /**
+       * <code>required int32 index = 3;</code>
+       */
+      public boolean hasIndex() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>required int32 index = 3;</code>
+       */
+      public int getIndex() {
+        return index_;
+      }
+      /**
+       * <code>required int32 index = 3;</code>
+       */
+      public Builder setIndex(int value) {
+        bitField0_ |= 0x00000004;
+        index_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required int32 index = 3;</code>
+       */
+      public Builder clearIndex() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        index_ = 0;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.SplitInfo)
+    }
+
+    static {
+      defaultInstance = new SplitInfo(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.SplitInfo)
+  }
+
+  public interface SplitInfosOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
+    /**
+     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+     */
+    java.util.List<org.apache.hadoop.hive.metastore.Metastore.SplitInfo>
+        getInfosList();
+    /**
+     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+     */
+    org.apache.hadoop.hive.metastore.Metastore.SplitInfo getInfos(int index);
+    /**
+     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+     */
+    int getInfosCount();
+    /**
+     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+     */
+    java.util.List<? extends org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder>
+        getInfosOrBuilderList();
+    /**
+     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+     */
+    org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder getInfosOrBuilder(
+        int index);
+  }
+  /**
+   * Protobuf type {@code org.apache.hadoop.hive.metastore.SplitInfos}
+   */
+  public static final class SplitInfos extends
+      com.google.protobuf.GeneratedMessage
+      implements SplitInfosOrBuilder {
+    // Use SplitInfos.newBuilder() to construct.
+    private SplitInfos(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private SplitInfos(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final SplitInfos defaultInstance;
+    public static SplitInfos getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public SplitInfos getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private SplitInfos(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+                infos_ = new java.util.ArrayList<org.apache.hadoop.hive.metastore.Metastore.SplitInfo>();
+                mutable_bitField0_ |= 0x00000001;
+              }
+              infos_.add(input.readMessage(org.apache.hadoop.hive.metastore.Metastore.SplitInfo.PARSER, extensionRegistry));
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+          infos_ = java.util.Collections.unmodifiableList(infos_);
+        }
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfos_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfos_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hive.metastore.Metastore.SplitInfos.class, org.apache.hadoop.hive.metastore.Metastore.SplitInfos.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<SplitInfos> PARSER =
+        new com.google.protobuf.AbstractParser<SplitInfos>() {
+      public SplitInfos parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new SplitInfos(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<SplitInfos> getParserForType() {
+      return PARSER;
+    }
+
+    // repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
+    public static final int INFOS_FIELD_NUMBER = 1;
+    private java.util.List<org.apache.hadoop.hive.metastore.Metastore.SplitInfo> infos_;
+    /**
+     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+     */
+    public java.util.List<org.apache.hadoop.hive.metastore.Metastore.SplitInfo> getInfosList() {
+      return infos_;
+    }
+    /**
+     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+     */
+    public java.util.List<? extends org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder>
+        getInfosOrBuilderList() {
+      return infos_;
+    }
+    /**
+     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+     */
+    public int getInfosCount() {
+      return infos_.size();
+    }
+    /**
+     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+     */
+    public org.apache.hadoop.hive.metastore.Metastore.SplitInfo getInfos(int index) {
+      return infos_.get(index);
+    }
+    /**
+     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+     */
+    public org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder getInfosOrBuilder(
+        int index) {
+      return infos_.get(index);
+    }
+
+    private void initFields() {
+      infos_ = java.util.Collections.emptyList();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      for (int i = 0; i < getInfosCount(); i++) {
+        if (!getInfos(i).isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      for (int i = 0; i < infos_.size(); i++) {
+        output.writeMessage(1, infos_.get(i));
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      for (int i = 0; i < infos_.size(); i++) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, infos_.get(i));
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hive.metastore.Metastore.SplitInfos prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.apache.hadoop.hive.metastore.SplitInfos}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hive.metastore.Metastore.SplitInfosOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfos_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfos_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hive.metastore.Metastore.SplitInfos.class, org.apache.hadoop.hive.metastore.Metastore.SplitInfos.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hive.metastore.Metastore.SplitInfos.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getInfosFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (infosBuilder_ == null) {
+          infos_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+        } else {
+          infosBuilder_.clear();
+        }
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfos_descriptor;
+      }
+
+      public org.apache.hadoop.hive.metastore.Metastore.SplitInfos getDefaultInstanceForType() {
+        return org.apache.hadoop.hive.metastore.Metastore.SplitInfos.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hive.metastore.Metastore.SplitInfos build() {
+        org.apache.hadoop.hive.metastore.Metastore.SplitInfos result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hive.metastore.Metastore.SplitInfos buildPartial() {
+        org.apache.hadoop.hive.metastore.Metastore.SplitInfos result = new org.apache.hadoop.hive.metastore.Metastore.SplitInfos(this);
+        int from_bitField0_ = bitField0_;
+        if (infosBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001)) {
+            infos_ = java.util.Collections.unmodifiableList(infos_);
+            bitField0_ = (bitField0_ & ~0x00000001);
+          }
+          result.infos_ = infos_;
+        } else {
+          result.infos_ = infosBuilder_.build();
+        }
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hive.metastore.Metastore.SplitInfos) {
+          return mergeFrom((org.apache.hadoop.hive.metastore.Metastore.SplitInfos)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hive.metastore.Metastore.SplitInfos other) {
+        if (other == org.apache.hadoop.hive.metastore.Metastore.SplitInfos.getDefaultInstance()) return this;
+        if (infosBuilder_ == null) {
+          if (!other.infos_.isEmpty()) {
+            if (infos_.isEmpty()) {
+              infos_ = other.infos_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+            } else {
+              ensureInfosIsMutable();
+              infos_.addAll(other.infos_);
+            }
+            onChanged();
+          }
+        } else {
+          if (!other.infos_.isEmpty()) {
+            if (infosBuilder_.isEmpty()) {
+              infosBuilder_.dispose();
+              infosBuilder_ = null;
+              infos_ = other.infos_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+              infosBuilder_ =
+                com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+                   getInfosFieldBuilder() : null;
+            } else {
+              infosBuilder_.addAllMessages(other.infos_);
+            }
+          }
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        for (int i = 0; i < getInfosCount(); i++) {
+          if (!getInfos(i).isInitialized()) {
+
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hive.metastore.Metastore.SplitInfos parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hive.metastore.Metastore.SplitInfos) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
+      private java.util.List<org.apache.hadoop.hive.metastore.Metastore.SplitInfo> infos_ =
+        java.util.Collections.emptyList();
+      private void ensureInfosIsMutable() {
+        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+          infos_ = new java.util.ArrayList<org.apache.hadoop.hive.metastore.Metastore.SplitInfo>(infos_);
+          bitField0_ |= 0x00000001;
+         }
+      }
+
+      private com.google.protobuf.RepeatedFieldBuilder<
+          org.apache.hadoop.hive.metastore.Metastore.SplitInfo, org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder, org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder> infosBuilder_;
+
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public java.util.List<org.apache.hadoop.hive.metastore.Metastore.SplitInfo> getInfosList() {
+        if (infosBuilder_ == null) {
+          return java.util.Collections.unmodifiableList(infos_);
+        } else {
+          return infosBuilder_.getMessageList();
+        }
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public int getInfosCount() {
+        if (infosBuilder_ == null) {
+          return infos_.size();
+        } else {
+          return infosBuilder_.getCount();
+        }
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public org.apache.hadoop.hive.metastore.Metastore.SplitInfo getInfos(int index) {
+        if (infosBuilder_ == null) {
+          return infos_.get(index);
+        } else {
+          return infosBuilder_.getMessage(index);
+        }
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public Builder setInfos(
+          int index, org.apache.hadoop.hive.metastore.Metastore.SplitInfo value) {
+        if (infosBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureInfosIsMutable();
+          infos_.set(index, value);
+          onChanged();
+        } else {
+          infosBuilder_.setMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public Builder setInfos(
+          int index, org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder builderForValue) {
+        if (infosBuilder_ == null) {
+          ensureInfosIsMutable();
+          infos_.set(index, builderForValue.build());
+          onChanged();
+        } else {
+          infosBuilder_.setMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public Builder addInfos(org.apache.hadoop.hive.metastore.Metastore.SplitInfo value) {
+        if (infosBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureInfosIsMutable();
+          infos_.add(value);
+          onChanged();
+        } else {
+          infosBuilder_.addMessage(value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public Builder addInfos(
+          int index, org.apache.hadoop.hive.metastore.Metastore.SplitInfo value) {
+        if (infosBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureInfosIsMutable();
+          infos_.add(index, value);
+          onChanged();
+        } else {
+          infosBuilder_.addMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public Builder addInfos(
+          org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder builderForValue) {
+        if (infosBuilder_ == null) {
+          ensureInfosIsMutable();
+          infos_.add(builderForValue.build());
+          onChanged();
+        } else {
+          infosBuilder_.addMessage(builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public Builder addInfos(
+          int index, org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder builderForValue) {
+        if (infosBuilder_ == null) {
+          ensureInfosIsMutable();
+          infos_.add(index, builderForValue.build());
+          onChanged();
+        } else {
+          infosBuilder_.addMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public Builder addAllInfos(
+          java.lang.Iterable<? extends org.apache.hadoop.hive.metastore.Metastore.SplitInfo> values) {
+        if (infosBuilder_ == null) {
+          ensureInfosIsMutable();
+          super.addAll(values, infos_);
+          onChanged();
+        } else {
+          infosBuilder_.addAllMessages(values);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public Builder clearInfos() {
+        if (infosBuilder_ == null) {
+          infos_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+          onChanged();
+        } else {
+          infosBuilder_.clear();
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public Builder removeInfos(int index) {
+        if (infosBuilder_ == null) {
+          ensureInfosIsMutable();
+          infos_.remove(index);
+          onChanged();
+        } else {
+          infosBuilder_.remove(index);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder getInfosBuilder(
+          int index) {
+        return getInfosFieldBuilder().getBuilder(index);
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder getInfosOrBuilder(
+          int index) {
+        if (infosBuilder_ == null) {
+          return infos_.get(index);  } else {
+          return infosBuilder_.getMessageOrBuilder(index);
+        }
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public java.util.List<? extends org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder>
+           getInfosOrBuilderList() {
+        if (infosBuilder_ != null) {
+          return infosBuilder_.getMessageOrBuilderList();
+        } else {
+          return java.util.Collections.unmodifiableList(infos_);
+        }
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder addInfosBuilder() {
+        return getInfosFieldBuilder().addBuilder(
+            org.apache.hadoop.hive.metastore.Metastore.SplitInfo.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder addInfosBuilder(
+          int index) {
+        return getInfosFieldBuilder().addBuilder(
+            index, org.apache.hadoop.hive.metastore.Metastore.SplitInfo.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code>
+       */
+      public java.util.List<org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder>
+           getInfosBuilderList() {
+        return getInfosFieldBuilder().getBuilderList();
+      }
+      private com.google.protobuf.RepeatedFieldBuilder<
+          org.apache.hadoop.hive.metastore.Metastore.SplitInfo, org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder, org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder>
+          getInfosFieldBuilder() {
+        if (infosBuilder_ == null) {
+          infosBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+              org.apache.hadoop.hive.metastore.Metastore.SplitInfo, org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder, org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder>(
+                  infos_,
+                  ((bitField0_ & 0x00000001) == 0x00000001),
+                  getParentForChildren(),
+                  isClean());
+          infos_ = null;
+        }
+        return infosBuilder_;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.SplitInfos)
+    }
+
+    static {
+      defaultInstance = new SplitInfos(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.SplitInfos)
+  }
+
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_hadoop_hive_metastore_SplitInfo_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_hadoop_hive_metastore_SplitInfo_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_apache_hadoop_hive_metastore_SplitInfos_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_apache_hadoop_hive_metastore_SplitInfos_fieldAccessorTable;
+
+  public static com.google.protobuf.Descriptors.FileDescriptor
+      getDescriptor() {
+    return descriptor;
+  }
+  private static com.google.protobuf.Descriptors.FileDescriptor
+      descriptor;
+  static {
+    java.lang.String[] descriptorData = {
+      "\n\017metastore.proto\022 org.apache.hadoop.hiv" +
+      "e.metastore\":\n\tSplitInfo\022\016\n\006offset\030\001 \002(\003" +
+      "\022\016\n\006length\030\002 \002(\003\022\r\n\005index\030\003 \002(\005\"H\n\nSplit" +
+      "Infos\022:\n\005infos\030\001 \003(\0132+.org.apache.hadoop" +
+      ".hive.metastore.SplitInfo"
+    };
+    com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+      new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+        public com.google.protobuf.ExtensionRegistry assignDescriptors(
+            com.google.protobuf.Descriptors.FileDescriptor root) {
+          descriptor = root;
+          internal_static_org_apache_hadoop_hive_metastore_SplitInfo_descriptor =
+            getDescriptor().getMessageTypes().get(0);
+          internal_static_org_apache_hadoop_hive_metastore_SplitInfo_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_hadoop_hive_metastore_SplitInfo_descriptor,
+              new java.lang.String[] { "Offset", "Length", "Index", });
+          internal_static_org_apache_hadoop_hive_metastore_SplitInfos_descriptor =
+            getDescriptor().getMessageTypes().get(1);
+          internal_static_org_apache_hadoop_hive_metastore_SplitInfos_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_apache_hadoop_hive_metastore_SplitInfos_descriptor,
+              new java.lang.String[] { "Infos", });
+          return null;
+        }
+      };
+    com.google.protobuf.Descriptors.FileDescriptor
+      .internalBuildGeneratedFileFrom(descriptorData,
+        new com.google.protobuf.Descriptors.FileDescriptor[] {
+        }, assigner);
+  }
+
+  // @@protoc_insertion_point(outer_class_scope)
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c97f518d/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index ccb4c98..40e6e62 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -45,105 +45,7 @@ import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
 import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
-import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions;
-import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest;
-import org.apache.hadoop.hive.metastore.api.AddPartitionsResult;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.CheckLockRequest;
-import org.apache.hadoop.hive.metastore.api.ClearFileMetadataRequest;
-import org.apache.hadoop.hive.metastore.api.ClearFileMetadataResult;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
-import org.apache.hadoop.hive.metastore.api.CompactionRequest;
-import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.DropPartitionsExpr;
-import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest;
-import org.apache.hadoop.hive.metastore.api.DropPartitionsResult;
-import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FireEventRequest;
-import org.apache.hadoop.hive.metastore.api.FireEventResponse;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse;
-import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprRequest;
-import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprResult;
-import org.apache.hadoop.hive.metastore.api.GetFileMetadataRequest;
-import org.apache.hadoop.hive.metastore.api.GetFileMetadataResult;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
-import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest;
-import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse;
-import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
-import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
-import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeRequest;
-import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeResponse;
-import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleRequest;
-import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleResponse;
-import org.apache.hadoop.hive.metastore.api.HeartbeatRequest;
-import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest;
-import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.HiveObjectType;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.LockRequest;
-import org.apache.hadoop.hive.metastore.api.LockResponse;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.OpenTxnRequest;
-import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionListComposingSpec;
-import org.apache.hadoop.hive.metastore.api.PartitionSpec;
-import org.apache.hadoop.hive.metastore.api.PartitionSpecWithSharedSD;
-import org.apache.hadoop.hive.metastore.api.PartitionWithoutSD;
-import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest;
-import org.apache.hadoop.hive.metastore.api.PartitionsByExprResult;
-import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest;
-import org.apache.hadoop.hive.metastore.api.PartitionsStatsResult;
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
-import org.apache.hadoop.hive.metastore.api.PutFileMetadataRequest;
-import org.apache.hadoop.hive.metastore.api.PutFileMetadataResult;
-import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
-import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
-import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
-import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
-import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
-import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
-import org.apache.hadoop.hive.metastore.api.SkewedInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableStatsRequest;
-import org.apache.hadoop.hive.metastore.api.TableStatsResult;
-import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
-import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
-import org.apache.hadoop.hive.metastore.api.TxnOpenException;
-import org.apache.hadoop.hive.metastore.api.Type;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.api.UnlockRequest;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.api.*;
 import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
 import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
@@ -5707,26 +5609,64 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     @Override
     public GetFileMetadataByExprResult get_file_metadata_by_expr(GetFileMetadataByExprRequest req)
         throws TException {
-      throw new UnsupportedOperationException("Not implemented yet");
+      GetFileMetadataByExprResult result = new GetFileMetadataByExprResult();
+      RawStore ms = getMS();
+      if (!ms.isFileMetadataSupported()) {
+        result.setIsSupported(false);
+        result.setMetadata(EMPTY_MAP_FM2); // Set the required field.
+        return result;
+      }
+      result.setIsSupported(true);
+      List<Long> fileIds = req.getFileIds();
+      byte[] expr = req.getExpr();
+      boolean needMetadata = req.isDoGetFooters();
+      ByteBuffer[] metadatas = new ByteBuffer[fileIds.size()];
+      ByteBuffer[] stripeBitsets = new ByteBuffer[fileIds.size()];
+      boolean[] eliminated = new boolean[fileIds.size()];
+      getMS().getFileMetadataByExpr(fileIds, expr, metadatas, stripeBitsets, eliminated);
+      for (int i = 0; i < metadatas.length; ++i) {
+        long fileId = fileIds.get(i);
+        ByteBuffer metadata = metadatas[i];
+        if (metadata == null) continue;
+        metadata = (eliminated[i] || !needMetadata) ? null
+            : handleReadOnlyBufferForThrift(metadata);
+        MetadataPpdResult mpr = new MetadataPpdResult();
+        ByteBuffer bitset = eliminated[i] ? null : handleReadOnlyBufferForThrift(stripeBitsets[i]);
+        mpr.setMetadata(metadata);
+        mpr.setIncludeBitset(bitset);
+        result.putToMetadata(fileId, mpr);
+      }
+      if (!result.isSetMetadata()) {
+        result.setMetadata(EMPTY_MAP_FM2); // Set the required field.
+      }
+      return result;
     }
 
+    private final static Map<Long, ByteBuffer> EMPTY_MAP_FM1 = new HashMap<Long, ByteBuffer>(1);
+    private final static Map<Long, MetadataPpdResult> EMPTY_MAP_FM2 =
+        new HashMap<Long, MetadataPpdResult>(1);
+
     @Override
     public GetFileMetadataResult get_file_metadata(GetFileMetadataRequest req) throws TException {
-      List<Long> fileIds = req.getFileIds();
-      ByteBuffer[] metadatas = getMS().getFileMetadata(fileIds);
       GetFileMetadataResult result = new GetFileMetadataResult();
-      result.setIsSupported(metadatas != null);
-      if (metadatas != null) {
-        assert metadatas.length == fileIds.size();
-        for (int i = 0; i < metadatas.length; ++i) {
-          ByteBuffer bb = metadatas[i];
-          if (bb == null) continue;
-          bb = handleReadOnlyBufferForThrift(bb);
-          result.putToMetadata(fileIds.get(i), bb);
-        }
+      RawStore ms = getMS();
+      if (!ms.isFileMetadataSupported()) {
+        result.setIsSupported(false);
+        result.setMetadata(EMPTY_MAP_FM1); // Set the required field.
+        return result;
+      }
+      result.setIsSupported(true);
+      List<Long> fileIds = req.getFileIds();
+      ByteBuffer[] metadatas = ms.getFileMetadata(fileIds);
+      assert metadatas.length == fileIds.size();
+      for (int i = 0; i < metadatas.length; ++i) {
+        ByteBuffer bb = metadatas[i];
+        if (bb == null) continue;
+        bb = handleReadOnlyBufferForThrift(bb);
+        result.putToMetadata(fileIds.get(i), bb);
       }
       if (!result.isSetMetadata()) {
-        result.setMetadata(new HashMap<Long, ByteBuffer>()); // Set the required field.
+        result.setMetadata(EMPTY_MAP_FM1); // Set the required field.
       }
       return result;
     }
@@ -5744,7 +5684,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
     @Override
     public PutFileMetadataResult put_file_metadata(PutFileMetadataRequest req) throws TException {
-      getMS().putFileMetadata(req.getFileIds(), req.getMetadata());
+      RawStore ms = getMS();
+      if (ms.isFileMetadataSupported()) {
+        ms.putFileMetadata(req.getFileIds(), req.getMetadata());
+      }
       return new PutFileMetadataResult();
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/c97f518d/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index a02f179..f0c1893 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -7652,12 +7652,23 @@ public class ObjectStore implements RawStore, Configurable {
 
   @Override
   public ByteBuffer[] getFileMetadata(List<Long> fileIds) {
-    return null; // Not supported for now; callers have to handle this accordingly.
+    throw new UnsupportedOperationException();
   }
 
   @Override
   public void putFileMetadata(List<Long> fileIds, List<ByteBuffer> metadata) {
-    // Not supported for now.
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean isFileMetadataSupported() {
+    return false;
+  }
+
+  @Override
+  public void getFileMetadataByExpr(List<Long> fileIds, byte[] expr,
+      ByteBuffer[] metadatas, ByteBuffer[] stripeBitsets, boolean[] eliminated) {
+    throw new UnsupportedOperationException();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/c97f518d/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java b/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java
index 5195481..ed59829 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java
@@ -18,9 +18,12 @@
 
 package org.apache.hadoop.hive.metastore;
 
+import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.List;
 
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 
 /**
@@ -48,4 +51,21 @@ public interface PartitionExpressionProxy {
   public boolean filterPartitionsByExpr(List<String> partColumnNames,
       List<PrimitiveTypeInfo> partColumnTypeInfos, byte[] expr,
       String defaultPartitionName, List<String> partitionNames) throws MetaException;
+
+  /**
+   * Creates SARG from serialized representation.
+   * @param expr SARG, serialized as Kryo.
+   * @return SARG.
+   */
+  public SearchArgument createSarg(byte[] expr);
+
+  /**
+   * Applies SARG to file metadata, and produces some result for this file.
+   * @param sarg SARG
+   * @param byteBuffer File metadata from metastore cache.
+   * @return The result to return to client for this file, or null if file is eliminated.
+   * @throws IOException
+   */
+  public ByteBuffer applySargToFileMetadata(SearchArgument sarg, ByteBuffer byteBuffer)
+      throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c97f518d/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
index 1968256..45428ed 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -594,7 +594,37 @@ public interface RawStore extends Configurable {
    */
   public void flushCache();
 
+  /**
+   * @param fileIds List of file IDs from the filesystem.
+   * @return File metadata buffers from file metadata cache. The array is fileIds-sized, and
+   *         the entries (or nulls, if metadata is not in cache) correspond to fileIds in the list
+   */
   ByteBuffer[] getFileMetadata(List<Long> fileIds) throws MetaException;
 
+  /**
+   * @param fileIds List of file IDs from the filesystem.
+   * @param metadata Metadata buffers corresponding to fileIds in the list.
+   */
   void putFileMetadata(List<Long> fileIds, List<ByteBuffer> metadata) throws MetaException;
+
+  /**
+   * @return Whether file metadata cache is supported by this implementation.
+   */
+  boolean isFileMetadataSupported();
+
+  /**
+   * Gets file metadata from cache after applying a format-specific expression that can
+   * produce additional information based on file metadata and also filter the file list.
+   * @param fileIds List of file IDs from the filesystem.
+   * @param expr Format-specific serialized expression applicable to the files' metadatas.
+   * @param metadatas Output parameter; fileIds-sized array to receive the metadatas
+   *                  for corresponding files, if any.
+   * @param exprResults Output parameter; fileIds-sized array to receive the format-specific
+   *                    expression results for the corresponding files.
+   * @param eliminated Output parameter; fileIds-sized array to receive the indication of whether
+   *                   the corresponding files are entirely eliminated by the expression.
+   */
+  void getFileMetadataByExpr(
+      List<Long> fileIds, byte[] expr, ByteBuffer[] metadatas,
+      ByteBuffer[] exprResults, boolean[] eliminated) throws MetaException;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c97f518d/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
index f69b4c7..951c081 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
@@ -1738,13 +1738,21 @@ public class HBaseReadWrite {
    * @return Serialized file metadata.
    */
   ByteBuffer[] getFileMetadata(List<Long> fileIds) throws IOException {
+    ByteBuffer[] result = new ByteBuffer[fileIds.size()];
+    getFileMetadata(fileIds, result);
+    return result;
+  }
+
+  /**
+   * @param fileIds file ID list.
+   * @return Serialized file metadata.
+   */
+  void getFileMetadata(List<Long> fileIds, ByteBuffer[] result) throws IOException {
     byte[][] keys = new byte[fileIds.size()][];
     for (int i = 0; i < fileIds.size(); ++i) {
       keys[i] = HBaseUtils.makeLongKey(fileIds.get(i));
     }
-    ByteBuffer[] result = new ByteBuffer[keys.length];
     multiRead(FILE_METADATA_TABLE, CATALOG_CF, CATALOG_COL, keys, result);
-    return result;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/c97f518d/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
index df0fac3..1ea25f5 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
@@ -68,6 +68,7 @@ import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.PlanResult;
 import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.ScanPlan;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hive.common.util.HiveStringUtils;
@@ -2356,6 +2357,11 @@ public class HBaseStore implements RawStore {
   }
 
   @Override
+  public boolean isFileMetadataSupported() {
+    return true;
+  }
+
+  @Override
   public ByteBuffer[] getFileMetadata(List<Long> fileIds) throws MetaException {
     openTransaction();
     boolean commit = true;
@@ -2371,6 +2377,31 @@ public class HBaseStore implements RawStore {
   }
 
   @Override
+  public void getFileMetadataByExpr(List<Long> fileIds, byte[] expr, ByteBuffer[] metadatas,
+      ByteBuffer[] results, boolean[] eliminated) throws MetaException {
+    SearchArgument sarg = expressionProxy.createSarg(expr);
+    boolean commit = true;
+    try {
+      // For now, don't push anything into HBase, nor store anything special in HBase
+      getHBase().getFileMetadata(fileIds, metadatas);
+      for (int i = 0; i < metadatas.length;  ++i) {
+        if (metadatas[i] == null) continue;
+        ByteBuffer result = expressionProxy.applySargToFileMetadata(sarg, metadatas[i]);
+        eliminated[i] = (result == null);
+        if (!eliminated[i]) {
+          results[i] = result;
+        }
+      }
+    } catch (IOException e) {
+      commit = false;
+      LOG.error("Unable to get file metadata", e);
+      throw new MetaException("Error reading file metadata " + e.getMessage());
+    } finally {
+      commitOrRoleBack(commit);
+    }
+  }
+
+  @Override
   public void putFileMetadata(List<Long> fileIds, List<ByteBuffer> metadata) throws MetaException {
     openTransaction();
     boolean commit = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/c97f518d/metastore/src/protobuf/org/apache/hadoop/hive/metastore/metastore.proto
----------------------------------------------------------------------
diff --git a/metastore/src/protobuf/org/apache/hadoop/hive/metastore/metastore.proto b/metastore/src/protobuf/org/apache/hadoop/hive/metastore/metastore.proto
new file mode 100644
index 0000000..29b99b4
--- /dev/null
+++ b/metastore/src/protobuf/org/apache/hadoop/hive/metastore/metastore.proto
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+
+message SplitInfo {
+  required int64 offset = 1;
+  required int64 length = 2;
+  required int32 index = 3;
+}
+
+message SplitInfos {
+  repeated SplitInfo infos = 1;
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c97f518d/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 0f3331a..d11c0d5 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -769,4 +769,15 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
   @Override
   public void putFileMetadata(List<Long> fileIds, List<ByteBuffer> metadata) {
   }
+
+  @Override
+  public boolean isFileMetadataSupported() {
+    return false;
+  }
+
+
+  @Override
+  public void getFileMetadataByExpr(List<Long> fileIds, byte[] expr,
+      ByteBuffer[] metadatas, ByteBuffer[] stripeBitsets, boolean[] eliminated) {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c97f518d/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index 126a2c2..2de049a 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -786,6 +786,16 @@ public class DummyRawStoreForJdoConnection implements RawStore {
   @Override
   public void putFileMetadata(List<Long> fileIds, List<ByteBuffer> metadata) {
   }
+
+  @Override
+  public boolean isFileMetadataSupported() {
+    return false;
+  }
+
+  @Override
+  public void getFileMetadataByExpr(List<Long> fileIds, byte[] expr,
+      ByteBuffer[] metadatas, ByteBuffer[] stripeBitsets, boolean[] eliminated) {
+  }
 }
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/c97f518d/metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java
index bae1391..d72bf76 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java
@@ -19,8 +19,10 @@
 package org.apache.hadoop.hive.metastore;
 
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 
+import java.nio.ByteBuffer;
 import java.util.List;
 
 /**
@@ -38,4 +40,14 @@ public class MockPartitionExpressionForMetastore implements PartitionExpressionP
       List<String> partitionNames) throws MetaException {
     return false;
   }
+
+  @Override
+  public SearchArgument createSarg(byte[] expr) {
+    return null;
+  }
+
+  @Override
+  public ByteBuffer applySargToFileMetadata(SearchArgument sarg, ByteBuffer byteBuffer) {
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c97f518d/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
index 78a9ea0..9089d1c 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.metastore;
 
+import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
@@ -35,6 +36,7 @@ import org.apache.hadoop.hive.metastore.api.Role;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.junit.After;
 import org.junit.Assert;
@@ -67,6 +69,16 @@ public class TestObjectStore {
         throws MetaException {
       return false;
     }
+
+    @Override
+    public SearchArgument createSarg(byte[] expr) {
+      return null;
+    }
+
+    @Override
+    public ByteBuffer applySargToFileMetadata(SearchArgument sarg, ByteBuffer byteBuffer) {
+      return null;
+    }
   }
 
   @Before

http://git-wip-us.apache.org/repos/asf/hive/blob/c97f518d/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java
index 6c288f4..2198892 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java
@@ -31,12 +31,14 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.PartitionExpressionProxy;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
@@ -65,6 +67,16 @@ public class MockUtils {
       return false;
     }
 
+    @Override
+    public SearchArgument createSarg(byte[] expr) {
+      return null;
+    }
+
+    @Override
+    public ByteBuffer applySargToFileMetadata(SearchArgument sarg, ByteBuffer byteBuffer) {
+      return null;
+    }
+
   }
 
   static HBaseStore init(Configuration conf, HTableInterface htable,

http://git-wip-us.apache.org/repos/asf/hive/blob/c97f518d/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileMetaInfo.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileMetaInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileMetaInfo.java
index 2853119..95c674e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileMetaInfo.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileMetaInfo.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.io.orc.OrcFile.WriterVersion;
  * that is useful for Reader implementation
  *
  */
-class FileMetaInfo {
+public class FileMetaInfo {
   ByteBuffer footerMetaAndPsBuffer;
   final String compressionType;
   final int bufferSize;
@@ -57,4 +57,8 @@ class FileMetaInfo {
     this.writerVersion = writerVersion;
     this.footerMetaAndPsBuffer = fullFooterBuffer;
   }
+
+  public OrcFile.WriterVersion getWriterVersion() {
+    return writerVersion;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/c97f518d/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java b/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
index 690b8c9..0fff9aa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
@@ -418,9 +418,16 @@ public class ConvertAstToSearchArg {
   }
 
 
+  private final static ThreadLocal<Kryo> kryo = new ThreadLocal<Kryo>() {
+    protected Kryo initialValue() { return new Kryo(); }
+  };
+
   public static SearchArgument create(String kryo) {
-    Input input = new Input(Base64.decodeBase64(kryo));
-    return new Kryo().readObject(input, SearchArgumentImpl.class);
+    return create(Base64.decodeBase64(kryo));
+  }
+
+  public static SearchArgument create(byte[] kryoBytes) {
+    return kryo.get().readObject(new Input(kryoBytes), SearchArgumentImpl.class);
   }
 
   public static SearchArgument createFromConf(Configuration conf) {

http://git-wip-us.apache.org/repos/asf/hive/blob/c97f518d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionExpressionForMetastore.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionExpressionForMetastore.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionExpressionForMetastore.java
index 9ffa177..b76229c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionExpressionForMetastore.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionExpressionForMetastore.java
@@ -18,13 +18,23 @@
 
 package org.apache.hadoop.hive.ql.optimizer.ppr;
 
+import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.metastore.Metastore.SplitInfo;
+import org.apache.hadoop.hive.metastore.Metastore.SplitInfos;
 import org.apache.hadoop.hive.metastore.PartitionExpressionProxy;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
+import org.apache.hadoop.hive.ql.io.orc.OrcProto;
+import org.apache.hadoop.hive.ql.io.orc.ReaderImpl;
+import org.apache.hadoop.hive.ql.io.orc.StripeInformation;
+import org.apache.hadoop.hive.ql.io.sarg.ConvertAstToSearchArg;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
@@ -71,4 +81,34 @@ public class PartitionExpressionForMetastore implements PartitionExpressionProxy
     }
     return expr;
   }
+
+  @Override
+  public SearchArgument createSarg(byte[] expr) {
+    return ConvertAstToSearchArg.create(expr);
+  }
+
+  @Override
+  public ByteBuffer applySargToFileMetadata(
+      SearchArgument sarg, ByteBuffer byteBuffer) throws IOException {
+    // TODO: ideally we should store shortened representation of only the necessary fields
+    //       in HBase; it will probably require custom SARG application code.
+    ReaderImpl.FooterInfo fi = ReaderImpl.extractMetaInfoFromFooter(byteBuffer, null);
+    OrcProto.Footer footer = fi.getFooter();
+    int stripeCount = footer.getStripesCount();
+    boolean[] result = OrcInputFormat.pickStripesViaTranslatedSarg(
+        sarg, fi.getFileMetaInfo().getWriterVersion(),
+        footer.getTypesList(), fi.getMetadata(), stripeCount);
+    // For ORC case, send the boundaries of the stripes so we don't have to send the footer.
+    SplitInfos.Builder sb = SplitInfos.newBuilder();
+    List<StripeInformation> stripes = fi.getStripes();
+    boolean isEliminated = true;
+    for (int i = 0; i < result.length; ++i) {
+      if (result != null && !result[i]) continue;
+      isEliminated = false;
+      StripeInformation si = stripes.get(i);
+      sb.addInfos(SplitInfo.newBuilder().setIndex(i)
+          .setOffset(si.getOffset()).setLength(si.getLength()));
+    }
+    return isEliminated ? null : ByteBuffer.wrap(sb.build().toByteArray());
+  }
 }


Mime
View raw message