hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1543710 - in /hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common: ./ src/main/java/ src/main/java/org/apache/hadoop/io/ src/main/java/org/apache/hadoop/io/compress/ src/main/java/org/apache/hadoop/io/compress/snappy/ src/...
Date Wed, 20 Nov 2013 05:31:03 GMT
Author: arp
Date: Wed Nov 20 05:31:02 2013
New Revision: 1543710

URL: http://svn.apache.org/r1543710
Log:
Merging r1543613 through r1543709 from trunk to branch HDFS-2832

Added:
    hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DirectDecompressionCodec.java
      - copied unchanged from r1543612, hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DirectDecompressionCodec.java
    hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DirectDecompressor.java
      - copied unchanged from r1543612, hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DirectDecompressor.java
Removed:
    hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DirectCompressor.java
Modified:
    hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/CHANGES.txt   (contents,
props changed)
    hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/  
(props changed)
    hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
    hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
    hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
    hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
    hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
    hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
    hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
    hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java
    hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
    hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java
    hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java

Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/CHANGES.txt Wed Nov
20 05:31:02 2013
@@ -387,8 +387,8 @@ Release 2.3.0 - UNRELEASED
 
     HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
 
-    HADOOP-10047. Add a direct-buffer based apis for compression. (Gopal V via
-    acmurthy)
+   HADOOP-10047. Add a direct-buffer based apis for compression. (Gopal V
+   via acmurthy)
 
   BUG FIXES
 

Propchange: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/CHANGES.txt
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt:r1543510-1543612

Propchange: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java:r1543510-1543709

Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
Wed Nov 20 05:31:02 2013
@@ -203,8 +203,8 @@ public class ReadaheadPool {
       // It's also possible that we'll end up requesting readahead on some
       // other FD, which may be wasted work, but won't cause a problem.
       try {
-        NativeIO.POSIX.posixFadviseIfPossible(identifier, fd, off, len,
-            NativeIO.POSIX.POSIX_FADV_WILLNEED);
+        NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(identifier,
+            fd, off, len, NativeIO.POSIX.POSIX_FADV_WILLNEED);
       } catch (IOException ioe) {
         if (canceled) {
           // no big deal - the reader canceled the request and closed

Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
Wed Nov 20 05:31:02 2013
@@ -28,11 +28,12 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
 import org.apache.hadoop.io.compress.zlib.ZlibFactory;
 
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public class DefaultCodec implements Configurable, CompressionCodec {
+public class DefaultCodec implements Configurable, CompressionCodec, DirectDecompressionCodec
{
   private static final Log LOG = LogFactory.getLog(DefaultCodec.class);
   
   Configuration conf;
@@ -103,6 +104,15 @@ public class DefaultCodec implements Con
     return ZlibFactory.getZlibDecompressor(conf);
   }
   
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public DirectDecompressor createDirectDecompressor() {
+    return ZlibFactory.getZlibDirectDecompressor(conf);
+  }
+  
+  
   @Override
   public String getDefaultExtension() {
     return ".deflate";

Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
Wed Nov 20 05:31:02 2013
@@ -25,6 +25,8 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.DefaultCodec;
 import org.apache.hadoop.io.compress.zlib.*;
+import org.apache.hadoop.io.compress.zlib.ZlibDecompressor.ZlibDirectDecompressor;
+
 import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
 /**
@@ -218,6 +220,13 @@ public class GzipCodec extends DefaultCo
       ? GzipZlibDecompressor.class
       : BuiltInGzipDecompressor.class;
   }
+    
+  @Override
+  public DirectDecompressor createDirectDecompressor() {
+    return ZlibFactory.isNativeZlibLoaded(conf) 
+        ? new ZlibDecompressor.ZlibDirectDecompressor(
+          ZlibDecompressor.CompressionHeader.AUTODETECT_GZIP_ZLIB, 0) : null;
+  }
 
   @Override
   public String getDefaultExtension() {

Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
Wed Nov 20 05:31:02 2013
@@ -26,13 +26,14 @@ import org.apache.hadoop.conf.Configurab
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.snappy.SnappyCompressor;
 import org.apache.hadoop.io.compress.snappy.SnappyDecompressor;
+import org.apache.hadoop.io.compress.snappy.SnappyDecompressor.SnappyDirectDecompressor;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.NativeCodeLoader;
 
 /**
  * This class creates snappy compressors/decompressors.
  */
-public class SnappyCodec implements Configurable, CompressionCodec {
+public class SnappyCodec implements Configurable, CompressionCodec, DirectDecompressionCodec
{
   Configuration conf;
 
   /**
@@ -203,6 +204,14 @@ public class SnappyCodec implements Conf
         CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
     return new SnappyDecompressor(bufferSize);
   }
+  
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public DirectDecompressor createDirectDecompressor() {
+    return isNativeCodeLoaded() ? new SnappyDirectDecompressor() : null;
+  }
 
   /**
    * Get the default filename extension for this kind of compression.

Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
Wed Nov 20 05:31:02 2013
@@ -25,6 +25,7 @@ import java.nio.ByteBuffer;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.compress.Decompressor;
+import org.apache.hadoop.io.compress.DirectDecompressor;
 import org.apache.hadoop.util.NativeCodeLoader;
 
 /**
@@ -282,4 +283,75 @@ public class SnappyDecompressor implemen
   private native static void initIDs();
 
   private native int decompressBytesDirect();
+  
+  int decompressDirect(ByteBuffer src, ByteBuffer dst) throws IOException {
+    assert (this instanceof SnappyDirectDecompressor);
+    
+    ByteBuffer presliced = dst;
+    if (dst.position() > 0) {
+      presliced = dst;
+      dst = dst.slice();
+    }
+
+    Buffer originalCompressed = compressedDirectBuf;
+    Buffer originalUncompressed = uncompressedDirectBuf;
+    int originalBufferSize = directBufferSize;
+    compressedDirectBuf = src.slice();
+    compressedDirectBufLen = src.remaining();
+    uncompressedDirectBuf = dst;
+    directBufferSize = dst.remaining();
+    int n = 0;
+    try {
+      n = decompressBytesDirect();
+      presliced.position(presliced.position() + n);
+      // SNAPPY always consumes the whole buffer or throws an exception
+      src.position(src.limit());
+      finished = true;
+    } finally {
+      compressedDirectBuf = originalCompressed;
+      uncompressedDirectBuf = originalUncompressed;
+      compressedDirectBufLen = 0;
+      directBufferSize = originalBufferSize;
+    }
+    return n;
+  }
+  
+  public static class SnappyDirectDecompressor extends SnappyDecompressor implements
+      DirectDecompressor {
+    
+    @Override
+    public boolean finished() {
+      return (endOfInput && super.finished());
+    }
+
+    @Override
+    public void reset() {
+      super.reset();
+      endOfInput = true;
+    }
+
+    private boolean endOfInput;
+
+    @Override
+    public synchronized void decompress(ByteBuffer src, ByteBuffer dst)
+        throws IOException {
+      assert dst.isDirect() : "dst.isDirect()";
+      assert src.isDirect() : "src.isDirect()";
+      assert dst.remaining() > 0 : "dst.remaining() > 0";
+      this.decompressDirect(src, dst);
+      endOfInput = !src.hasRemaining();
+    }
+
+    @Override
+    public synchronized void setDictionary(byte[] b, int off, int len) {
+      throw new UnsupportedOperationException(
+          "byte[] arrays are not supported for DirectDecompressor");
+    }
+
+    @Override
+    public synchronized int decompress(byte[] b, int off, int len) {
+      throw new UnsupportedOperationException(
+          "byte[] arrays are not supported for DirectDecompressor");
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
Wed Nov 20 05:31:02 2013
@@ -24,7 +24,6 @@ import java.nio.ByteBuffer;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.Compressor;
-import org.apache.hadoop.io.compress.DirectCompressor;
 import org.apache.hadoop.util.NativeCodeLoader;
 
 import org.apache.commons.logging.Log;
@@ -36,7 +35,7 @@ import org.apache.commons.logging.LogFac
  * http://www.zlib.net/
  * 
  */
-public class ZlibCompressor implements Compressor,DirectCompressor {
+public class ZlibCompressor implements Compressor {
 
   private static final Log LOG = LogFactory.getLog(ZlibCompressor.class);
 
@@ -421,7 +420,6 @@ public class ZlibCompressor implements C
     compressedDirectBuf.limit(directBufferSize);
     compressedDirectBuf.position(directBufferSize);
     userBufOff = userBufLen = 0;
-    userBuf = null;
   }
   
   @Override
@@ -437,110 +435,6 @@ public class ZlibCompressor implements C
       throw new NullPointerException();
   }
   
-  private int put(ByteBuffer dst, ByteBuffer src) {
-    // this will lop off data from src[pos:limit] into dst[pos:limit]
-    int l1 = src.remaining();
-    int l2 = dst.remaining();
-    int pos1 = src.position();
-    int pos2 = dst.position();
-    int len = Math.min(l1, l2);
-
-    if (len == 0) {
-      return 0;
-    }
-
-    ByteBuffer slice = src.slice();
-    slice.limit(len);
-    dst.put(slice);
-    src.position(pos1 + len);
-    return len;
-  }
-
-  public int compress(ByteBuffer dst, ByteBuffer src) throws IOException {
-    assert dst.remaining() > 0 : "dst.remaining() == 0";
-    int n = 0;
-    
-    /* fast path for clean state and direct buffers */
-    /* TODO: reset should free userBuf? */
-    if((src != null && src.isDirect()) && dst.isDirect() && userBuf
== null) {
-      /*
-       * TODO: fix these assumptions in inflateDirect(), eventually by allowing
-       * it to read position()/limit() directly
-       */
-      boolean cleanDst = (dst.position() == 0 && dst.remaining() == dst.capacity()
&& dst.capacity() >= directBufferSize);
-      boolean cleanState = (keepUncompressedBuf == false && uncompressedDirectBufLen
== 0 && compressedDirectBuf.remaining() == 0);
-      /* use the buffers directly */
-      if(cleanDst && cleanState) {
-        Buffer originalCompressed = compressedDirectBuf;
-        Buffer originalUncompressed = uncompressedDirectBuf;
-        int originalBufferSize = directBufferSize;
-        uncompressedDirectBuf = src;
-        uncompressedDirectBufOff = src.position();
-        uncompressedDirectBufLen = src.remaining();
-        compressedDirectBuf = dst;
-        directBufferSize = dst.remaining();
-        // Compress data
-        n = deflateBytesDirect();
-        // we move dst.position() forward, not limit() 
-        // unlike the local buffer case, which moves it when we put() into the dst
-        dst.position(n);
-        if(uncompressedDirectBufLen > 0) {
-          src.position(uncompressedDirectBufOff);
-        } else {
-          src.position(src.limit());
-        }
-        compressedDirectBuf = originalCompressed;
-        uncompressedDirectBuf = originalUncompressed;
-        uncompressedDirectBufOff = 0;
-        uncompressedDirectBufLen = 0;
-        directBufferSize = originalBufferSize;
-        return n;
-      }
-    }
-    
-    // Check if there is compressed data
-    if (compressedDirectBuf.remaining() > 0) {
-      n = put(dst, (ByteBuffer) compressedDirectBuf);
-    }
-
-    if (dst.remaining() == 0) {
-      return n;
-    } else {
-      needsInput();
-
-      // if we have drained userBuf, read from src (ideally, do not mix buffer
-      // modes, but sometimes you can)
-      if (userBufLen == 0 && src != null && src.remaining() > 0) {
-        put((ByteBuffer) uncompressedDirectBuf, src);
-        uncompressedDirectBufLen = uncompressedDirectBuf.position();
-      }
-
-      // Re-initialize the zlib's output direct buffer
-      compressedDirectBuf.rewind();
-      compressedDirectBuf.limit(directBufferSize);
-
-      // Compress data
-      int more = deflateBytesDirect();
-
-      compressedDirectBuf.limit(more);
-
-      // Check if zlib consumed all input buffer
-      // set keepUncompressedBuf properly
-      if (uncompressedDirectBufLen <= 0) { // zlib consumed all input buffer
-        keepUncompressedBuf = false;
-        uncompressedDirectBuf.clear();
-        uncompressedDirectBufOff = 0;
-        uncompressedDirectBufLen = 0;
-      } else { // zlib did not consume all input buffer
-        keepUncompressedBuf = true;
-      }
-
-      // fill the dst buffer from compressedDirectBuf
-      int fill = put(dst, ((ByteBuffer) compressedDirectBuf));
-      return n + fill;
-    }
-  }
-  
   private native static void initIDs();
   private native static long init(int level, int strategy, int windowBits);
   private native static void setDictionary(long strm, byte[] b, int off,

Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
Wed Nov 20 05:31:02 2013
@@ -32,7 +32,7 @@ import org.apache.hadoop.util.NativeCode
  * http://www.zlib.net/
  * 
  */
-public class ZlibDecompressor implements Decompressor,DirectDecompressor {
+public class ZlibDecompressor implements Decompressor {
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024;
   
   // HACK - Use this as a global lock in the JNI layer
@@ -107,7 +107,7 @@ public class ZlibDecompressor implements
    */
   public ZlibDecompressor(CompressionHeader header, int directBufferSize) {
     this.header = header;
-    this.directBufferSize = directBufferSize;
+    this.directBufferSize = directBufferSize;    
     compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
     uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
     uncompressedDirectBuf.position(directBufferSize);
@@ -281,7 +281,6 @@ public class ZlibDecompressor implements
     uncompressedDirectBuf.limit(directBufferSize);
     uncompressedDirectBuf.position(directBufferSize);
     userBufOff = userBufLen = 0;
-    userBuf = null;
   }
 
   @Override
@@ -301,108 +300,6 @@ public class ZlibDecompressor implements
     if (stream == 0)
       throw new NullPointerException();
   }
-    
-  private int put(ByteBuffer dst, ByteBuffer src) {
-    // this will lop off data from src[pos:limit] into dst[pos:limit], using the
-    // min() of both remaining()
-    int l1 = src.remaining();
-    int l2 = dst.remaining();
-    int pos1 = src.position();
-    int pos2 = dst.position();
-    int len = Math.min(l1, l2);
-
-    if (len == 0) {
-      return 0;
-    }
-
-    ByteBuffer slice = src.slice();
-    slice.limit(len);
-    dst.put(slice);
-    src.position(pos1 + len);
-    return len;
-  }
-
-  public int decompress(ByteBuffer dst, ByteBuffer src) throws IOException {
-    assert dst.remaining() > 0 : "dst.remaining == 0";
-    int n = 0;
-    
-    /* fast path for clean state and direct buffers */
-    if((src != null && src.isDirect()) && dst.isDirect() && userBuf
== null) {
-      /*
-       * TODO: fix these assumptions in inflateDirect(), eventually by allowing
-       * it to read position()/limit() directly
-       */
-      boolean cleanDst = (dst.position() == 0 && dst.remaining() == dst.capacity()
&& dst.remaining() >= directBufferSize);
-      boolean cleanState = (compressedDirectBufLen == 0 && uncompressedDirectBuf.remaining()
== 0);
-      /* use the buffers directly */
-      if(cleanDst && cleanState) {
-        Buffer originalCompressed = compressedDirectBuf;
-        Buffer originalUncompressed = uncompressedDirectBuf;
-        int originalBufferSize = directBufferSize;
-        compressedDirectBuf = src;
-        compressedDirectBufOff = src.position();
-        compressedDirectBufLen = src.remaining();
-        uncompressedDirectBuf = dst;
-        directBufferSize = dst.remaining();
-        // Compress data
-        n = inflateBytesDirect();
-        dst.position(n);
-        if(compressedDirectBufLen > 0) {
-          src.position(compressedDirectBufOff);
-        } else {
-          src.position(src.limit());
-        }
-        compressedDirectBuf = originalCompressed;
-        uncompressedDirectBuf = originalUncompressed;        
-        compressedDirectBufOff = 0;
-        compressedDirectBufLen = 0;
-        directBufferSize = originalBufferSize;
-        return n;
-      }
-    }
-    
-    // Check if there is compressed data
-    if (uncompressedDirectBuf.remaining() > 0) {
-      n = put(dst, (ByteBuffer) uncompressedDirectBuf);
-    }
-
-    if (dst.remaining() == 0) {
-      return n;
-    } else {
-      if (needsInput()) {
-        // this does not update buffers if we have no userBuf
-        if (userBufLen <= 0) {
-          compressedDirectBufOff = 0;
-          compressedDirectBufLen = 0;
-          compressedDirectBuf.rewind().limit(directBufferSize);
-        }
-        if (src != null) {
-          assert src.remaining() > 0 : "src.remaining() == 0";
-        }
-      }
-
-      // if we have drained userBuf, read from src (ideally, do not mix buffer
-      // modes, but sometimes you can)
-      if (userBufLen == 0 && src != null && src.remaining() > 0) {
-        compressedDirectBufLen += put(((ByteBuffer) compressedDirectBuf), src);
-      }
-      
-      // Re-initialize the zlib's output direct buffer
-      uncompressedDirectBuf.rewind();
-      uncompressedDirectBuf.limit(directBufferSize);
-
-      // Compress data
-      int more = inflateBytesDirect();
-
-      uncompressedDirectBuf.limit(more);
-
-      // Get atmost 'len' bytes
-      int fill = put(dst, ((ByteBuffer) uncompressedDirectBuf));
-      return n + fill;
-    }
-  }
-
-  
   
   private native static void initIDs();
   private native static long init(int windowBits);
@@ -414,4 +311,86 @@ public class ZlibDecompressor implements
   private native static int getRemaining(long strm);
   private native static void reset(long strm);
   private native static void end(long strm);
+    
+  int inflateDirect(ByteBuffer src, ByteBuffer dst) throws IOException {
+    assert (this instanceof ZlibDirectDecompressor);
+    
+    ByteBuffer presliced = dst;
+    if (dst.position() > 0) {
+      presliced = dst;
+      dst = dst.slice();
+    }
+
+    Buffer originalCompressed = compressedDirectBuf;
+    Buffer originalUncompressed = uncompressedDirectBuf;
+    int originalBufferSize = directBufferSize;
+    compressedDirectBuf = src;
+    compressedDirectBufOff = src.position();
+    compressedDirectBufLen = src.remaining();
+    uncompressedDirectBuf = dst;
+    directBufferSize = dst.remaining();
+    int n = 0;
+    try {
+      n = inflateBytesDirect();
+      presliced.position(presliced.position() + n);
+      if (compressedDirectBufLen > 0) {
+        src.position(compressedDirectBufOff);
+      } else {
+        src.position(src.limit());
+      }
+    } finally {
+      compressedDirectBuf = originalCompressed;
+      uncompressedDirectBuf = originalUncompressed;
+      compressedDirectBufOff = 0;
+      compressedDirectBufLen = 0;
+      directBufferSize = originalBufferSize;
+    }
+    return n;
+  }
+  
+  public static class ZlibDirectDecompressor 
+      extends ZlibDecompressor implements DirectDecompressor {
+    public ZlibDirectDecompressor() {
+      super(CompressionHeader.DEFAULT_HEADER, 0);
+    }
+
+    public ZlibDirectDecompressor(CompressionHeader header, int directBufferSize) {
+      super(header, directBufferSize);
+    }
+    
+    @Override
+    public boolean finished() {
+      return (endOfInput && super.finished());
+    }
+    
+    @Override
+    public void reset() {
+      super.reset();
+      endOfInput = true;
+    }
+    
+    private boolean endOfInput;
+
+    @Override
+    public synchronized void decompress(ByteBuffer src, ByteBuffer dst)
+        throws IOException {
+      assert dst.isDirect() : "dst.isDirect()";
+      assert src.isDirect() : "src.isDirect()";
+      assert dst.remaining() > 0 : "dst.remaining() > 0";      
+      this.inflateDirect(src, dst);
+      endOfInput = !src.hasRemaining();
+    }
+
+    @Override
+    public synchronized void setDictionary(byte[] b, int off, int len) {
+      throw new UnsupportedOperationException(
+          "byte[] arrays are not supported for DirectDecompressor");
+    }
+
+    @Override
+    public synchronized int decompress(byte[] b, int off, int len) {
+      throw new UnsupportedOperationException(
+          "byte[] arrays are not supported for DirectDecompressor");
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java
Wed Nov 20 05:31:02 2013
@@ -23,6 +23,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.Compressor;
 import org.apache.hadoop.io.compress.Decompressor;
+import org.apache.hadoop.io.compress.DirectDecompressor;
 import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel;
 import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy;
 import org.apache.hadoop.util.NativeCodeLoader;
@@ -116,6 +117,17 @@ public class ZlibFactory {
     return (isNativeZlibLoaded(conf)) ? 
       new ZlibDecompressor() : new BuiltInZlibInflater(); 
   }
+  
+  /**
+   * Return the appropriate implementation of the zlib direct decompressor. 
+   * 
+   * @param conf configuration
+   * @return the appropriate implementation of the zlib decompressor.
+   */
+  public static DirectDecompressor getZlibDirectDecompressor(Configuration conf) {
+    return (isNativeZlibLoaded(conf)) ? 
+      new ZlibDecompressor.ZlibDirectDecompressor() : null; 
+  }
 
   public static void setCompressionStrategy(Configuration conf,
       CompressionStrategy strategy) {

Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
Wed Nov 20 05:31:02 2013
@@ -98,9 +98,6 @@ public class NativeIO {
 
     private static final Log LOG = LogFactory.getLog(NativeIO.class);
 
-    @VisibleForTesting
-    public static CacheTracker cacheTracker = null;
-    
     private static boolean nativeLoaded = false;
     private static boolean fadvisePossible = true;
     private static boolean syncFileRangePossible = true;
@@ -111,18 +108,61 @@ public class NativeIO {
 
     private static long cacheTimeout = -1;
 
-    public static interface CacheTracker {
-      public void fadvise(String identifier, long offset, long len, int flags);
+    private static CacheManipulator cacheManipulator = new CacheManipulator();
+
+    public static CacheManipulator getCacheManipulator() {
+      return cacheManipulator;
     }
 
-    public static CacheManipulator cacheManipulator = new CacheManipulator();
+    public static void setCacheManipulator(CacheManipulator cacheManipulator) {
+      POSIX.cacheManipulator = cacheManipulator;
+    }
 
+    /**
+     * Used to manipulate the operating system cache.
+     */
     @VisibleForTesting
     public static class CacheManipulator {
       public void mlock(String identifier, ByteBuffer buffer,
           long len) throws IOException {
         POSIX.mlock(buffer, len);
       }
+
+      public long getMemlockLimit() {
+        return NativeIO.getMemlockLimit();
+      }
+
+      public long getOperatingSystemPageSize() {
+        return NativeIO.getOperatingSystemPageSize();
+      }
+
+      public void posixFadviseIfPossible(String identifier,
+        FileDescriptor fd, long offset, long len, int flags)
+            throws NativeIOException {
+        NativeIO.POSIX.posixFadviseIfPossible(identifier, fd, offset,
+            len, flags);
+      }
+    }
+
+    /**
+     * A CacheManipulator used for testing which does not actually call mlock.
+     * This allows many tests to be run even when the operating system does not
+     * allow mlock, or only allows limited mlocking.
+     */
+    @VisibleForTesting
+    public static class NoMlockCacheManipulator extends CacheManipulator {
+      public void mlock(String identifier, ByteBuffer buffer,
+          long len) throws IOException {
+        LOG.info("mlocking " + identifier);
+      }
+
+      public long getMemlockLimit() {
+        return 1125899906842624L;
+      }
+
+      public long getOperatingSystemPageSize() {
+        return 4096;
+      }
     }
 
     static {
@@ -207,12 +247,9 @@ public class NativeIO {
      *
      * @throws NativeIOException if there is an error with the syscall
      */
-    public static void posixFadviseIfPossible(String identifier,
+    static void posixFadviseIfPossible(String identifier,
         FileDescriptor fd, long offset, long len, int flags)
         throws NativeIOException {
-      if (cacheTracker != null) {
-        cacheTracker.fadvise(identifier, offset, len, flags);
-      }
       if (nativeLoaded && fadvisePossible) {
         try {
           posix_fadvise(fd, offset, len, flags);
@@ -566,7 +603,7 @@ public class NativeIO {
    *         Long.MAX_VALUE if there is no limit;
    *         The number of bytes that can be locked into memory otherwise.
    */
-  public static long getMemlockLimit() {
+  static long getMemlockLimit() {
     return isAvailable() ? getMemlockLimit0() : 0;
   }
 
@@ -575,7 +612,7 @@ public class NativeIO {
   /**
    * @return the operating system's page size.
    */
-  public static long getOperatingSystemPageSize() {
+  static long getOperatingSystemPageSize() {
     try {
       Field f = Unsafe.class.getDeclaredField("theUnsafe");
       f.setAccessible(true);

Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java
Wed Nov 20 05:31:02 2013
@@ -29,6 +29,7 @@ import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.lang.reflect.Array;
+import java.nio.ByteBuffer;
 import java.util.Random;
 
 import org.apache.hadoop.io.DataInputBuffer;
@@ -38,6 +39,7 @@ import org.apache.hadoop.io.compress.Blo
 import org.apache.hadoop.io.compress.CompressionInputStream;
 import org.apache.hadoop.io.compress.CompressionOutputStream;
 import org.apache.hadoop.io.compress.SnappyCodec;
+import org.apache.hadoop.io.compress.snappy.SnappyDecompressor.SnappyDirectDecompressor;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -147,7 +149,7 @@ public class TestSnappyCompressorDecompr
       fail("testSnappyCompressorCompressAIOBException ex error !!!");
     }
   }
-
+  
   @Test
   public void testSnappyDecompressorCompressAIOBException() {
     try {
@@ -275,6 +277,56 @@ public class TestSnappyCompressorDecompr
       fail("testSnappyBlockCompression ex error !!!");
     }
   }
+  
+  private void compressDecompressLoop(int rawDataSize) throws IOException {
+    byte[] rawData = BytesGenerator.get(rawDataSize);    
+    byte[] compressedResult = new byte[rawDataSize+20];
+    int directBufferSize = Math.max(rawDataSize*2, 64*1024);    
+    SnappyCompressor compressor = new SnappyCompressor(directBufferSize);
+    compressor.setInput(rawData, 0, rawDataSize);
+    int compressedSize = compressor.compress(compressedResult, 0, compressedResult.length);
+    SnappyDirectDecompressor decompressor = new SnappyDirectDecompressor();
+   
+    ByteBuffer inBuf = ByteBuffer.allocateDirect(compressedSize);
+    ByteBuffer outBuf = ByteBuffer.allocateDirect(rawDataSize);
+
+    inBuf.put(compressedResult, 0, compressedSize);
+    inBuf.flip();    
+
+    ByteBuffer expected = ByteBuffer.wrap(rawData);
+    
+    outBuf.clear();
+    while(!decompressor.finished()) {
+      decompressor.decompress(inBuf, outBuf);
+      if (outBuf.remaining() == 0) {
+        outBuf.flip();
+        while (outBuf.remaining() > 0) {        
+          assertEquals(expected.get(), outBuf.get());
+        }
+        outBuf.clear();
+      }
+    }
+    outBuf.flip();
+    while (outBuf.remaining() > 0) {        
+      assertEquals(expected.get(), outBuf.get());
+    }
+    outBuf.clear();
+    
+    assertEquals(0, expected.remaining());
+  }
+  
+  @Test
+  public void testSnappyDirectBlockCompression() {
+    int[] size = { 4 * 1024, 64 * 1024, 128 * 1024, 1024 * 1024 };    
+    assumeTrue(SnappyCodec.isNativeCodeLoaded());
+    try {
+      for (int i = 0; i < size.length; i++) {
+        compressDecompressLoop(size[i]);
+      }
+    } catch (IOException ex) {
+      fail("testSnappyDirectBlockCompression ex !!!" + ex);
+    }
+  }
 
   @Test
   public void testSnappyCompressorDecopressorLogicWithCompressionStreams() {

Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java
Wed Nov 20 05:31:02 2013
@@ -20,13 +20,12 @@ package org.apache.hadoop.io.compress.zl
 import static org.junit.Assert.*;
 import static org.junit.Assume.*;
 
-import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
-import java.io.Console;
 import java.io.IOException;
 import java.io.InputStream;
 import java.nio.ByteBuffer;
 import java.util.Random;
+import java.util.zip.DeflaterOutputStream;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -38,12 +37,10 @@ import org.apache.hadoop.io.compress.Dec
 import org.apache.hadoop.io.compress.CompressDecompressTester.CompressionTestStrategy;
 import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel;
 import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy;
-import org.apache.log4j.ConsoleAppender;
+import org.apache.hadoop.io.compress.zlib.ZlibDecompressor.ZlibDirectDecompressor;
+import org.apache.hadoop.util.NativeCodeLoader;
 import org.junit.Before;
 import org.junit.Test;
-
-import sun.util.logging.resources.logging;
-
 import com.google.common.collect.ImmutableSet;
 
 public class TestZlibCompressorDecompressor {
@@ -159,143 +156,54 @@ public class TestZlibCompressorDecompres
     }
   }
   
-  private void compressDecompressLoop(int rawDataSize, int inSize, int outSize)
-      throws IOException {
+  
+  private void compressDecompressLoop(int rawDataSize) throws IOException {
     byte[] rawData = null;
     rawData = generate(rawDataSize);
-    ByteArrayOutputStream baos = new ByteArrayOutputStream();
-    ByteBuffer inBuf = ByteBuffer.allocateDirect(inSize);
-    ByteBuffer outBuf = ByteBuffer.allocateDirect(outSize);
-    ZlibCompressor compressor = new ZlibCompressor();
-    ZlibDecompressor decompressor = new ZlibDecompressor();
+    ByteArrayOutputStream baos = new ByteArrayOutputStream(rawDataSize+12);
+    DeflaterOutputStream dos = new DeflaterOutputStream(baos);
+    dos.write(rawData);
+    dos.flush();
+    dos.close();
+    byte[] compressedResult = baos.toByteArray();
+    int compressedSize = compressedResult.length;
+    ZlibDirectDecompressor decompressor = new ZlibDirectDecompressor();
+   
+    ByteBuffer inBuf = ByteBuffer.allocateDirect(compressedSize);
+    ByteBuffer outBuf = ByteBuffer.allocateDirect(rawDataSize);
+
+    inBuf.put(compressedResult, 0, compressedSize);
+    inBuf.flip();    
+
+    ByteBuffer expected = ByteBuffer.wrap(rawData);
+    
     outBuf.clear();
-    /* compression loop */
-    int off = 0;
-    int len = rawDataSize;
-    int min = Math.min(inBuf.remaining(), len);
-    if (min > 0) {
-      inBuf.put(rawData, off, min);
-    }
-    inBuf.flip();
-    len -= min;
-    off += min;
-    while (!compressor.finished()) {
-      compressor.compress(outBuf, inBuf);
+    while(!decompressor.finished()) {
+      decompressor.decompress(inBuf, outBuf);
       if (outBuf.remaining() == 0) {
-        // flush when the buffer is full
         outBuf.flip();
-        while (outBuf.remaining() > 0) {
-          baos.write(outBuf.get());
+        while (outBuf.remaining() > 0) {        
+          assertEquals(expected.get(), outBuf.get());
         }
         outBuf.clear();
       }
-      if (inBuf != null && inBuf.remaining() == 0) {
-        inBuf.clear();
-        if (len > 0) {
-          min = Math.min(inBuf.remaining(), len);
-          inBuf.put(rawData, off, min);
-          inBuf.flip();
-          len -= min;
-          off += min;
-        } else {
-          inBuf = null;
-          compressor.finish();
-        }
-      }
     }
-
     outBuf.flip();
-    if (outBuf.remaining() > 0) {
-      while (outBuf.remaining() > 0) {
-        baos.write(outBuf.get());
-      }
-      outBuf.clear();
+    while (outBuf.remaining() > 0) {        
+      assertEquals(expected.get(), outBuf.get());
     }
-
-    compressor.end();
-
-    byte[] compressed = baos.toByteArray();
-    ByteBuffer expected = ByteBuffer.wrap(rawData);
     outBuf.clear();
-    inBuf = ByteBuffer.allocateDirect(inSize);
-    inBuf.clear();
-
-    // zlib always has header
-    if (compressed.length != 0) {
-      off = 0;
-      len = compressed.length;
-      min = Math.min(inBuf.remaining(), len);
-      inBuf.put(compressed, off, min);
-      inBuf.flip();
-      len -= min;
-      off += min;
-      while (!decompressor.finished()) {
-        decompressor.decompress(outBuf, inBuf);
-        if (outBuf.remaining() == 0) {
-          outBuf.flip();
-          while (outBuf.remaining() > 0) {
-            assertEquals(expected.get(), outBuf.get());
-          }
-          outBuf.clear();
-        }
-
-        if (inBuf != null && inBuf.remaining() == 0) {
-          inBuf.clear();
-          if (len > 0) {
-            min = Math.min(inBuf.remaining(), len);
-            inBuf.put(compressed, off, min);
-            inBuf.flip();
-            len -= min;
-            off += min;
-          }
-        }
-      }
-    }
-
-    outBuf.flip();
-    if (outBuf.remaining() > 0) {
-      while (outBuf.remaining() > 0) {
-        assertEquals(expected.get(), outBuf.get());
-      }
-      outBuf.clear();
-    }
-
+    
     assertEquals(0, expected.remaining());
   }
-  
+
   @Test
   public void testZlibDirectCompressDecompress() {
-    int[] size = { 4, 16, 4 * 1024, 64 * 1024, 128 * 1024, 256 * 1024,
-        1024 * 1024 };
+    int[] size = { 1, 4, 16, 4 * 1024, 64 * 1024, 128 * 1024, 1024 * 1024 };
+    assumeTrue(NativeCodeLoader.isNativeCodeLoaded());
     try {
-      // 0-2 bytes results in sizeof(outBuf) > sizeof(inBuf)
-      compressDecompressLoop(0, 4096, 4096);
-      compressDecompressLoop(0, 1, 1);
-      compressDecompressLoop(1, 1, 2);
-      compressDecompressLoop(1, 2, 1);
-      compressDecompressLoop(2, 3, 2);
-
       for (int i = 0; i < size.length; i++) {
-        compressDecompressLoop(size[i], 4096, 4096);
-        compressDecompressLoop(size[i], 1, 1);
-        compressDecompressLoop(size[i], 1, 2);
-        compressDecompressLoop(size[i], 2, 1);
-        compressDecompressLoop(size[i], 3, 2);
-        compressDecompressLoop(size[i], size[i], 4096);
-        compressDecompressLoop(size[i], size[i] - 1, 4096);
-        compressDecompressLoop(size[i], size[i] + 1, 4096);
-        compressDecompressLoop(size[i], 4096, size[i]);
-        compressDecompressLoop(size[i], 4096, size[i] - 1);
-        compressDecompressLoop(size[i], 4096, size[i] + 1);
-        compressDecompressLoop(size[i], size[i] - 1, size[i] - 1);
-
-        compressDecompressLoop(size[i], size[i] / 2, 4096);
-        compressDecompressLoop(size[i], size[i] / 2 - 1, 4096);
-        compressDecompressLoop(size[i], size[i] / 2 + 1, 4096);
-        compressDecompressLoop(size[i], 4096, size[i] / 2);
-        compressDecompressLoop(size[i], 4096, size[i] / 2 - 1);
-        compressDecompressLoop(size[i], 4096, size[i] / 2 + 1);
-        compressDecompressLoop(size[i], size[i] / 2 - 1, size[i] / 2 - 1);
+        compressDecompressLoop(size[i]);
       }
     } catch (IOException ex) {
       fail("testZlibDirectCompressDecompress ex !!!" + ex);



Mime
View raw message