hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ser...@apache.org
Subject svn commit: r1658861 [2/2] - in /hive/branches/llap: llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/ llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/ ql/src/java/org/apache/hadoop/hive/ql/io/orc/ ql/src/test/org/apache/hadoop/...
Date Wed, 11 Feb 2015 01:44:14 GMT
Added: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java?rev=1658861&view=auto
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
(added)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
Wed Feb 11 01:44:14 2015
@@ -0,0 +1,422 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.io.orc;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.hive.common.DiskRange;
+import org.apache.hadoop.hive.llap.DebugUtils;
+import org.apache.hadoop.hive.ql.io.orc.RecordReaderImpl.BufferChunk;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.hive.shims.HadoopShims.ByteBufferPoolShim;
+import org.apache.hadoop.hive.shims.HadoopShims.ZeroCopyReaderShim;
+
+import com.google.common.collect.ComparisonChain;
+
+/**
+ * Stateless methods shared between RecordReaderImpl and EncodedReaderImpl.
+ */
+class RecordReaderUtils {
+  static boolean[] findPresentStreamsByColumn(
+      List<OrcProto.Stream> streamList, List<OrcProto.Type> types) {
+    boolean[] hasNull = new boolean[types.size()];
+    for(OrcProto.Stream stream: streamList) {
+      if (stream.getKind() == OrcProto.Stream.Kind.PRESENT) {
+        hasNull[stream.getColumn()] = true;
+      }
+    }
+    return hasNull;
+  }
+
+  /**
+   * Does region A overlap region B? The end points are inclusive on both sides.
+   * @param leftA A's left point
+   * @param rightA A's right point
+   * @param leftB B's left point
+   * @param rightB B's right point
+   * @return Does region A overlap region B?
+   */
+  static boolean overlap(long leftA, long rightA, long leftB, long rightB) {
+    if (leftA <= leftB) {
+      return rightA >= leftB;
+    }
+    return rightB >= leftA;
+  }
+
+
+  static DiskRange addEntireStreamToRanges(long offset, long length,
+      DiskRange lastRange, LinkedList<DiskRange> result) {
+    long end = offset + length;
+    if (lastRange != null && overlap(lastRange.offset, lastRange.end, offset, end))
{
+      lastRange.offset = Math.min(lastRange.offset, offset);
+      lastRange.end = Math.max(lastRange.end, end);
+    } else {
+      lastRange = new DiskRange(offset, end);
+      result.add(lastRange);
+    }
+    return lastRange;
+  }
+
+  static DiskRange addRgFilteredStreamToRanges(OrcProto.Stream stream,
+      boolean[] includedRowGroups, boolean isCompressed, OrcProto.RowIndex index,
+      OrcProto.ColumnEncoding encoding, OrcProto.Type type, int compressionSize, boolean
hasNull,
+      long offset, long length, DiskRange lastRange, LinkedList<DiskRange> result)
{
+    for (int group = 0; group < includedRowGroups.length; ++group) {
+      if (!includedRowGroups[group]) continue;
+      int posn = getIndexPosition(
+          encoding.getKind(), type.getKind(), stream.getKind(), isCompressed, hasNull);
+      long start = index.getEntry(group).getPositions(posn);
+      final long nextGroupOffset;
+      boolean isLast = group == (includedRowGroups.length - 1);
+      nextGroupOffset = isLast ? length : index.getEntry(group + 1).getPositions(posn);
+
+      start += offset;
+      long end = offset + estimateRgEndOffset(
+          isCompressed, isLast, nextGroupOffset, length, compressionSize);
+      if (lastRange != null && overlap(lastRange.offset, lastRange.end, start, end))
{
+        lastRange.offset = Math.min(lastRange.offset, start);
+        lastRange.end = Math.max(lastRange.end, end);
+      } else {
+        if (DebugUtils.isTraceOrcEnabled()) {
+          RecordReaderImpl.LOG.info("Creating new range for RG read; last range (which can
"
+              + "include some previous RGs) was " + lastRange);
+        }
+        lastRange = new DiskRange(start, end);
+        result.add(lastRange);
+      }
+    }
+    return lastRange;
+  }
+
+  static long estimateRgEndOffset(boolean isCompressed, boolean isLast,
+      long nextGroupOffset, long streamLength, int bufferSize) {
+    // figure out the worst case last location
+    // if adjacent groups have the same compressed block offset then stretch the slop
+    // by factor of 2 to safely accommodate the next compression block.
+    // One for the current compression block and another for the next compression block.
+    long slop = isCompressed ? 2 * (OutStream.HEADER_SIZE + bufferSize) : WORST_UNCOMPRESSED_SLOP;
+    return isLast ? streamLength : Math.min(streamLength, nextGroupOffset + slop);
+  }
+
+  private static final int BYTE_STREAM_POSITIONS = 1;
+  private static final int RUN_LENGTH_BYTE_POSITIONS = BYTE_STREAM_POSITIONS + 1;
+  private static final int BITFIELD_POSITIONS = RUN_LENGTH_BYTE_POSITIONS + 1;
+  private static final int RUN_LENGTH_INT_POSITIONS = BYTE_STREAM_POSITIONS + 1;
+
+  /**
+   * Get the offset in the index positions for the column that the given
+   * stream starts.
+   * @param columnEncoding the encoding of the column
+   * @param columnType the type of the column
+   * @param streamType the kind of the stream
+   * @param isCompressed is the file compressed
+   * @param hasNulls does the column have a PRESENT stream?
+   * @return the number of positions that will be used for that stream
+   */
+  public static int getIndexPosition(OrcProto.ColumnEncoding.Kind columnEncoding,
+                              OrcProto.Type.Kind columnType,
+                              OrcProto.Stream.Kind streamType,
+                              boolean isCompressed,
+                              boolean hasNulls) {
+    if (streamType == OrcProto.Stream.Kind.PRESENT) {
+      return 0;
+    }
+    int compressionValue = isCompressed ? 1 : 0;
+    int base = hasNulls ? (BITFIELD_POSITIONS + compressionValue) : 0;
+    switch (columnType) {
+      case BOOLEAN:
+      case BYTE:
+      case SHORT:
+      case INT:
+      case LONG:
+      case FLOAT:
+      case DOUBLE:
+      case DATE:
+      case STRUCT:
+      case MAP:
+      case LIST:
+      case UNION:
+        return base;
+      case CHAR:
+      case VARCHAR:
+      case STRING:
+        if (columnEncoding == OrcProto.ColumnEncoding.Kind.DICTIONARY ||
+            columnEncoding == OrcProto.ColumnEncoding.Kind.DICTIONARY_V2) {
+          return base;
+        } else {
+          if (streamType == OrcProto.Stream.Kind.DATA) {
+            return base;
+          } else {
+            return base + BYTE_STREAM_POSITIONS + compressionValue;
+          }
+        }
+      case BINARY:
+        if (streamType == OrcProto.Stream.Kind.DATA) {
+          return base;
+        }
+        return base + BYTE_STREAM_POSITIONS + compressionValue;
+      case DECIMAL:
+        if (streamType == OrcProto.Stream.Kind.DATA) {
+          return base;
+        }
+        return base + BYTE_STREAM_POSITIONS + compressionValue;
+      case TIMESTAMP:
+        if (streamType == OrcProto.Stream.Kind.DATA) {
+          return base;
+        }
+        return base + RUN_LENGTH_INT_POSITIONS + compressionValue;
+      default:
+        throw new IllegalArgumentException("Unknown type " + columnType);
+    }
+  }
+
+  // for uncompressed streams, what is the most overlap with the following set
+  // of rows (long vint literal group).
+  static final int WORST_UNCOMPRESSED_SLOP = 2 + 8 * 512;
+
+  /**
+   * Is this stream part of a dictionary?
+   * @return is this part of a dictionary?
+   */
+  static boolean isDictionary(OrcProto.Stream.Kind kind,
+                              OrcProto.ColumnEncoding encoding) {
+    assert kind != OrcProto.Stream.Kind.DICTIONARY_COUNT;
+    OrcProto.ColumnEncoding.Kind encodingKind = encoding.getKind();
+    return kind == OrcProto.Stream.Kind.DICTIONARY_DATA ||
+      (kind == OrcProto.Stream.Kind.LENGTH &&
+       (encodingKind == OrcProto.ColumnEncoding.Kind.DICTIONARY ||
+        encodingKind == OrcProto.ColumnEncoding.Kind.DICTIONARY_V2));
+  }
+
+  /**
+   * Build a string representation of a list of disk ranges.
+   * @param ranges ranges to stringify
+   * @return the resulting string
+   */
+  static String stringifyDiskRanges(List<DiskRange> ranges) {
+    StringBuilder buffer = new StringBuilder();
+    buffer.append("[");
+    for(int i=0; i < ranges.size(); ++i) {
+      if (i != 0) {
+        buffer.append(", ");
+      }
+      buffer.append(ranges.get(i).toString());
+    }
+    buffer.append("]");
+    return buffer.toString();
+  }
+
+  /**
+   * Read the list of ranges from the file.
+   * @param file the file to read
+   * @param base the base of the stripe
+   * @param ranges the disk ranges within the stripe to read
+   * @return the bytes read for each disk range, which is the same length as
+   *    ranges
+   * @throws IOException
+   */
+  static void readDiskRanges(FSDataInputStream file,
+                                 ZeroCopyReaderShim zcr,
+                                 long base,
+                                 LinkedList<DiskRange> ranges,
+                                 boolean doForceDirect) throws IOException {
+    ListIterator<DiskRange> rangeIter = ranges.listIterator();
+    while (rangeIter.hasNext()) {
+      DiskRange range = rangeIter.next();
+      if (range.hasData()) continue;
+      int len = (int) (range.end - range.offset);
+      long off = range.offset;
+      file.seek(base + off);
+      if (zcr != null) {
+        boolean hasReplaced = false;
+        while (len > 0) {
+          ByteBuffer partial = zcr.readBuffer(len, false);
+          BufferChunk bc = new BufferChunk(partial, off);
+          if (!hasReplaced) {
+            rangeIter.set(bc);
+            hasReplaced = true;
+          } else {
+            rangeIter.add(bc);
+          }
+          int read = partial.remaining();
+          len -= read;
+          off += read;
+        }
+      } else if (doForceDirect) {
+        ByteBuffer directBuf = ByteBuffer.allocateDirect(len);
+        try {
+          while (directBuf.remaining() > 0) {
+            int count = file.read(directBuf);
+            if (count < 0) throw new EOFException();
+            directBuf.position(directBuf.position() + count);
+          }
+        } catch (UnsupportedOperationException ex) {
+          RecordReaderImpl.LOG.error("Stream does not support direct read; we will copy.");
+          byte[] buffer = new byte[len];
+          file.readFully(buffer, 0, buffer.length);
+          directBuf.put(buffer);
+        }
+        directBuf.position(0);
+        rangeIter.set(new BufferChunk(directBuf, range.offset));
+      } else {
+        byte[] buffer = new byte[len];
+        file.readFully(buffer, 0, buffer.length);
+        rangeIter.set(new BufferChunk(ByteBuffer.wrap(buffer), range.offset));
+      }
+    }
+  }
+
+
+  static List<DiskRange> getStreamBuffers(List<DiskRange> ranges, long offset,
long length) {
+    // This assumes sorted ranges (as do many other parts of ORC code.
+    ArrayList<DiskRange> buffers = new ArrayList<DiskRange>();
+    long streamEnd = offset + length;
+    boolean inRange = false;
+    for (DiskRange range : ranges) {
+      if (!inRange) {
+        if (range.end <= offset) continue; // Skip until we are in range.
+        inRange = true;
+        if (range.offset < offset) {
+          // Partial first buffer, add a slice of it.
+          DiskRange partial = range.slice(offset, Math.min(streamEnd, range.end));
+          partial.shiftBy(-offset);
+          buffers.add(partial);
+          if (range.end >= streamEnd) break; // Partial first buffer is also partial last
buffer.
+          continue;
+        }
+      } else if (range.offset >= streamEnd) {
+        break;
+      }
+      if (range.end > streamEnd) {
+        // Partial last buffer (may also be the first buffer), add a slice of it.
+        DiskRange partial = range.slice(range.offset, streamEnd);
+        partial.shiftBy(-offset);
+        buffers.add(partial);
+        break;
+      }
+      // Buffer that belongs entirely to one stream.
+      // TODO: ideally we would want to reuse the object and remove it from the list, but
we cannot
+      //       because bufferChunks is also used by clearStreams for zcr. Create a useless
dup.
+      DiskRange full = range.slice(range.offset, range.end);
+      full.shiftBy(-offset);
+      buffers.add(full);
+      if (range.end == streamEnd) break;
+    }
+    return buffers;
+  }
+
+  static ZeroCopyReaderShim createZeroCopyShim(FSDataInputStream file,
+      CompressionCodec codec, ByteBufferAllocatorPool pool) throws IOException {
+    if ((codec == null || ((codec instanceof DirectDecompressionCodec)
+            && ((DirectDecompressionCodec) codec).isAvailable()))) {
+      /* codec is null or is available */
+      return ShimLoader.getHadoopShims().getZeroCopyReader(file, pool);
+    }
+    return null;
+  }
+
+  // this is an implementation copied from ElasticByteBufferPool in hadoop-2,
+  // which lacks a clear()/clean() operation
+  public final static class ByteBufferAllocatorPool implements ByteBufferPoolShim {
+    private static final class Key implements Comparable<Key> {
+      private final int capacity;
+      private final long insertionGeneration;
+
+      Key(int capacity, long insertionGeneration) {
+        this.capacity = capacity;
+        this.insertionGeneration = insertionGeneration;
+      }
+
+      @Override
+      public int compareTo(Key other) {
+        return ComparisonChain.start().compare(capacity, other.capacity)
+            .compare(insertionGeneration, other.insertionGeneration).result();
+      }
+
+      @Override
+      public boolean equals(Object rhs) {
+        if (rhs == null) {
+          return false;
+        }
+        try {
+          Key o = (Key) rhs;
+          return (compareTo(o) == 0);
+        } catch (ClassCastException e) {
+          return false;
+        }
+      }
+
+      @Override
+      public int hashCode() {
+        return new HashCodeBuilder().append(capacity).append(insertionGeneration)
+            .toHashCode();
+      }
+    }
+
+    private final TreeMap<Key, ByteBuffer> buffers = new TreeMap<Key, ByteBuffer>();
+
+    private final TreeMap<Key, ByteBuffer> directBuffers = new TreeMap<Key, ByteBuffer>();
+
+    private long currentGeneration = 0;
+
+    private final TreeMap<Key, ByteBuffer> getBufferTree(boolean direct) {
+      return direct ? directBuffers : buffers;
+    }
+
+    public void clear() {
+      buffers.clear();
+      directBuffers.clear();
+    }
+
+    @Override
+    public ByteBuffer getBuffer(boolean direct, int length) {
+      TreeMap<Key, ByteBuffer> tree = getBufferTree(direct);
+      Map.Entry<Key, ByteBuffer> entry = tree.ceilingEntry(new Key(length, 0));
+      if (entry == null) {
+        return direct ? ByteBuffer.allocateDirect(length) : ByteBuffer
+            .allocate(length);
+      }
+      tree.remove(entry.getKey());
+      return entry.getValue();
+    }
+
+    @Override
+    public void putBuffer(ByteBuffer buffer) {
+      TreeMap<Key, ByteBuffer> tree = getBufferTree(buffer.isDirect());
+      while (true) {
+        Key key = new Key(buffer.capacity(), currentGeneration++);
+        if (!tree.containsKey(key)) {
+          tree.put(key, buffer);
+          return;
+        }
+        // Buffers are indexed by (capacity, generation).
+        // If our key is not unique on the first try, we try again
+      }
+    }
+  }
+}

Modified: hive/branches/llap/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRecordReaderImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRecordReaderImpl.java?rev=1658861&r1=1658860&r2=1658861&view=diff
==============================================================================
--- hive/branches/llap/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRecordReaderImpl.java
(original)
+++ hive/branches/llap/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestRecordReaderImpl.java
Wed Feb 11 01:44:14 2015
@@ -759,15 +759,15 @@ public class TestRecordReaderImpl {
 
   @Test
   public void testOverlap() throws Exception {
-    assertTrue(!RecordReaderImpl.overlap(0, 10, -10, -1));
-    assertTrue(RecordReaderImpl.overlap(0, 10, -1, 0));
-    assertTrue(RecordReaderImpl.overlap(0, 10, -1, 1));
-    assertTrue(RecordReaderImpl.overlap(0, 10, 2, 8));
-    assertTrue(RecordReaderImpl.overlap(0, 10, 5, 10));
-    assertTrue(RecordReaderImpl.overlap(0, 10, 10, 11));
-    assertTrue(RecordReaderImpl.overlap(0, 10, 0, 10));
-    assertTrue(RecordReaderImpl.overlap(0, 10, -1, 11));
-    assertTrue(!RecordReaderImpl.overlap(0, 10, 11, 12));
+    assertTrue(!RecordReaderUtils.overlap(0, 10, -10, -1));
+    assertTrue(RecordReaderUtils.overlap(0, 10, -1, 0));
+    assertTrue(RecordReaderUtils.overlap(0, 10, -1, 1));
+    assertTrue(RecordReaderUtils.overlap(0, 10, 2, 8));
+    assertTrue(RecordReaderUtils.overlap(0, 10, 5, 10));
+    assertTrue(RecordReaderUtils.overlap(0, 10, 10, 11));
+    assertTrue(RecordReaderUtils.overlap(0, 10, 0, 10));
+    assertTrue(RecordReaderUtils.overlap(0, 10, -1, 11));
+    assertTrue(!RecordReaderUtils.overlap(0, 10, 11, 12));
   }
 
   private static List<DiskRange> diskRanges(Integer... points) {
@@ -806,55 +806,55 @@ public class TestRecordReaderImpl {
 
   @Test
   public void testGetIndexPosition() throws Exception {
-    assertEquals(0, RecordReaderImpl.getIndexPosition
+    assertEquals(0, RecordReaderUtils.getIndexPosition
         (OrcProto.ColumnEncoding.Kind.DIRECT, OrcProto.Type.Kind.INT,
             OrcProto.Stream.Kind.PRESENT, true, true));
-    assertEquals(4, RecordReaderImpl.getIndexPosition
+    assertEquals(4, RecordReaderUtils.getIndexPosition
         (OrcProto.ColumnEncoding.Kind.DIRECT, OrcProto.Type.Kind.INT,
             OrcProto.Stream.Kind.DATA, true, true));
-    assertEquals(3, RecordReaderImpl.getIndexPosition
+    assertEquals(3, RecordReaderUtils.getIndexPosition
         (OrcProto.ColumnEncoding.Kind.DIRECT, OrcProto.Type.Kind.INT,
             OrcProto.Stream.Kind.DATA, false, true));
-    assertEquals(0, RecordReaderImpl.getIndexPosition
+    assertEquals(0, RecordReaderUtils.getIndexPosition
         (OrcProto.ColumnEncoding.Kind.DIRECT, OrcProto.Type.Kind.INT,
             OrcProto.Stream.Kind.DATA, true, false));
-    assertEquals(4, RecordReaderImpl.getIndexPosition
+    assertEquals(4, RecordReaderUtils.getIndexPosition
         (OrcProto.ColumnEncoding.Kind.DICTIONARY, OrcProto.Type.Kind.STRING,
             OrcProto.Stream.Kind.DATA, true, true));
-    assertEquals(4, RecordReaderImpl.getIndexPosition
+    assertEquals(4, RecordReaderUtils.getIndexPosition
         (OrcProto.ColumnEncoding.Kind.DIRECT, OrcProto.Type.Kind.BINARY,
             OrcProto.Stream.Kind.DATA, true, true));
-    assertEquals(3, RecordReaderImpl.getIndexPosition
+    assertEquals(3, RecordReaderUtils.getIndexPosition
         (OrcProto.ColumnEncoding.Kind.DIRECT, OrcProto.Type.Kind.BINARY,
             OrcProto.Stream.Kind.DATA, false, true));
-    assertEquals(6, RecordReaderImpl.getIndexPosition
+    assertEquals(6, RecordReaderUtils.getIndexPosition
         (OrcProto.ColumnEncoding.Kind.DIRECT, OrcProto.Type.Kind.BINARY,
             OrcProto.Stream.Kind.LENGTH, true, true));
-    assertEquals(4, RecordReaderImpl.getIndexPosition
+    assertEquals(4, RecordReaderUtils.getIndexPosition
         (OrcProto.ColumnEncoding.Kind.DIRECT, OrcProto.Type.Kind.BINARY,
             OrcProto.Stream.Kind.LENGTH, false, true));
-    assertEquals(4, RecordReaderImpl.getIndexPosition
+    assertEquals(4, RecordReaderUtils.getIndexPosition
         (OrcProto.ColumnEncoding.Kind.DIRECT, OrcProto.Type.Kind.DECIMAL,
             OrcProto.Stream.Kind.DATA, true, true));
-    assertEquals(3, RecordReaderImpl.getIndexPosition
+    assertEquals(3, RecordReaderUtils.getIndexPosition
         (OrcProto.ColumnEncoding.Kind.DIRECT, OrcProto.Type.Kind.DECIMAL,
             OrcProto.Stream.Kind.DATA, false, true));
-    assertEquals(6, RecordReaderImpl.getIndexPosition
+    assertEquals(6, RecordReaderUtils.getIndexPosition
         (OrcProto.ColumnEncoding.Kind.DIRECT, OrcProto.Type.Kind.DECIMAL,
             OrcProto.Stream.Kind.SECONDARY, true, true));
-    assertEquals(4, RecordReaderImpl.getIndexPosition
+    assertEquals(4, RecordReaderUtils.getIndexPosition
         (OrcProto.ColumnEncoding.Kind.DIRECT, OrcProto.Type.Kind.DECIMAL,
             OrcProto.Stream.Kind.SECONDARY, false, true));
-    assertEquals(4, RecordReaderImpl.getIndexPosition
+    assertEquals(4, RecordReaderUtils.getIndexPosition
         (OrcProto.ColumnEncoding.Kind.DIRECT, OrcProto.Type.Kind.TIMESTAMP,
             OrcProto.Stream.Kind.DATA, true, true));
-    assertEquals(3, RecordReaderImpl.getIndexPosition
+    assertEquals(3, RecordReaderUtils.getIndexPosition
         (OrcProto.ColumnEncoding.Kind.DIRECT, OrcProto.Type.Kind.TIMESTAMP,
             OrcProto.Stream.Kind.DATA, false, true));
-    assertEquals(7, RecordReaderImpl.getIndexPosition
+    assertEquals(7, RecordReaderUtils.getIndexPosition
         (OrcProto.ColumnEncoding.Kind.DIRECT, OrcProto.Type.Kind.TIMESTAMP,
             OrcProto.Stream.Kind.SECONDARY, true, true));
-    assertEquals(5, RecordReaderImpl.getIndexPosition
+    assertEquals(5, RecordReaderUtils.getIndexPosition
         (OrcProto.ColumnEncoding.Kind.DIRECT, OrcProto.Type.Kind.TIMESTAMP,
             OrcProto.Stream.Kind.SECONDARY, false, true));
   }
@@ -932,9 +932,9 @@ public class TestRecordReaderImpl {
     result = RecordReaderImpl.planReadPartialDataStreams(streams, indexes,
         columns, rowGroups, false, encodings, types, 32768);
     assertThat(result, is(diskRanges(0, 1000, 100, 1000, 400, 1000,
-        1000, 11000 + RecordReaderImpl.WORST_UNCOMPRESSED_SLOP,
-        11000, 21000 + RecordReaderImpl.WORST_UNCOMPRESSED_SLOP,
-        41000, 51000 + RecordReaderImpl.WORST_UNCOMPRESSED_SLOP)));
+        1000, 11000 + RecordReaderUtils.WORST_UNCOMPRESSED_SLOP,
+        11000, 21000 + RecordReaderUtils.WORST_UNCOMPRESSED_SLOP,
+        41000, 51000 + RecordReaderUtils.WORST_UNCOMPRESSED_SLOP)));
 
     // if we read no rows, don't read any bytes
     rowGroups = new boolean[]{false, false, false, false, false, false};
@@ -955,7 +955,7 @@ public class TestRecordReaderImpl {
     result = RecordReaderImpl.planReadPartialDataStreams(streams, indexes,
         columns, rowGroups, false, encodings, types, 32768);
     assertThat(result, is(diskRanges(100100, 102000,
-        112000, 122000 + RecordReaderImpl.WORST_UNCOMPRESSED_SLOP)));
+        112000, 122000 + RecordReaderUtils.WORST_UNCOMPRESSED_SLOP)));
 
     rowGroups = new boolean[]{false, false, false, false, false, true};
     indexes[1] = indexes[2];
@@ -1128,8 +1128,8 @@ public class TestRecordReaderImpl {
     result = RecordReaderImpl.planReadPartialDataStreams(streams, indexes,
         columns, rowGroups, false, encodings, types, 32768);
     assertThat(result, is(diskRanges(100, 1000, 400, 1000, 500, 1000,
-        11000, 21000 + RecordReaderImpl.WORST_UNCOMPRESSED_SLOP,
-        41000, 51000 + RecordReaderImpl.WORST_UNCOMPRESSED_SLOP,
+        11000, 21000 + RecordReaderUtils.WORST_UNCOMPRESSED_SLOP,
+        41000, 51000 + RecordReaderUtils.WORST_UNCOMPRESSED_SLOP,
         51000, 95000, 95000, 97000, 97000, 100000)));
   }
 }



Mime
View raw message