hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject [08/11] HDFS-6134 and HADOOP-10150 subtasks.
Date Thu, 28 Aug 2014 22:07:03 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77bd85b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
new file mode 100644
index 0000000..f5acc73
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
@@ -0,0 +1,721 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.EnumSet;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.ByteBufferReadable;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.ReadOption;
+import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.fs.Syncable;
+import org.apache.hadoop.io.ByteBufferPool;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.RandomDatum;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public abstract class CryptoStreamsTestBase {
+  protected static final Log LOG = LogFactory.getLog(
+      CryptoStreamsTestBase.class);
+
+  protected static CryptoCodec codec;
+  private static final byte[] key = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 
+    0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16};
+  private static final byte[] iv = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 
+    0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08};
+  
+  protected static final int count = 10000;
+  protected static int defaultBufferSize = 8192;
+  protected static int smallBufferSize = 1024;
+  private byte[] data;
+  private int dataLen;
+  
+  @Before
+  public void setUp() throws IOException {
+    // Generate data
+    final int seed = new Random().nextInt();
+    final DataOutputBuffer dataBuf = new DataOutputBuffer();
+    final RandomDatum.Generator generator = new RandomDatum.Generator(seed);
+    for(int i = 0; i < count; ++i) {
+      generator.next();
+      final RandomDatum key = generator.getKey();
+      final RandomDatum value = generator.getValue();
+      
+      key.write(dataBuf);
+      value.write(dataBuf);
+    }
+    LOG.info("Generated " + count + " records");
+    data = dataBuf.getData();
+    dataLen = dataBuf.getLength();
+  }
+  
+  protected void writeData(OutputStream out) throws Exception {
+    out.write(data, 0, dataLen);
+    out.close();
+  }
+  
+  protected int getDataLen() {
+    return dataLen;
+  }
+  
+  private int readAll(InputStream in, byte[] b, int off, int len) 
+      throws IOException {
+    int n = 0;
+    int total = 0;
+    while (n != -1) {
+      total += n;
+      if (total >= len) {
+        break;
+      }
+      n = in.read(b, off + total, len - total);
+    }
+    
+    return total;
+  }
+  
+  protected OutputStream getOutputStream(int bufferSize) throws IOException {
+    return getOutputStream(bufferSize, key, iv);
+  }
+  
+  protected abstract OutputStream getOutputStream(int bufferSize, byte[] key, 
+      byte[] iv) throws IOException;
+  
+  protected InputStream getInputStream(int bufferSize) throws IOException {
+    return getInputStream(bufferSize, key, iv);
+  }
+  
+  protected abstract InputStream getInputStream(int bufferSize, byte[] key, 
+      byte[] iv) throws IOException;
+  
+  /** Test crypto reading with different buffer size. */
+  @Test(timeout=120000)
+  public void testRead() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    // Default buffer size
+    InputStream in = getInputStream(defaultBufferSize);
+    readCheck(in);
+    in.close();
+    
+    // Small buffer size
+    in = getInputStream(smallBufferSize);
+    readCheck(in);
+    in.close();
+  }
+  
+  private void readCheck(InputStream in) throws Exception {
+    byte[] result = new byte[dataLen];
+    int n = readAll(in, result, 0, dataLen);
+    
+    Assert.assertEquals(dataLen, n);
+    byte[] expectedData = new byte[n];
+    System.arraycopy(data, 0, expectedData, 0, n);
+    Assert.assertArrayEquals(result, expectedData);
+    
+    // EOF
+    n = in.read(result, 0, dataLen);
+    Assert.assertEquals(n, -1);
+    in.close();
+  }
+  
+  /** Test crypto writing with different buffer size. */
+  @Test(timeout = 120000)
+  public void testWrite() throws Exception {
+    // Default buffer size
+    writeCheck(defaultBufferSize);
+
+    // Small buffer size
+    writeCheck(smallBufferSize);
+  }
+
+  private void writeCheck(int bufferSize) throws Exception {
+    OutputStream out = getOutputStream(bufferSize);
+    writeData(out);
+
+    if (out instanceof FSDataOutputStream) {
+      Assert.assertEquals(((FSDataOutputStream) out).getPos(), getDataLen());
+    }
+  }
+
+  /** Test crypto with different IV. */
+  @Test(timeout=120000)
+  public void testCryptoIV() throws Exception {
+    byte[] iv1 = iv.clone();
+    
+    // Counter base: Long.MAX_VALUE
+    setCounterBaseForIV(iv1, Long.MAX_VALUE);
+    cryptoCheck(iv1);
+    
+    // Counter base: Long.MAX_VALUE - 1
+    setCounterBaseForIV(iv1, Long.MAX_VALUE - 1);
+    cryptoCheck(iv1);
+    
+    // Counter base: Integer.MAX_VALUE
+    setCounterBaseForIV(iv1, Integer.MAX_VALUE);
+    cryptoCheck(iv1);
+    
+    // Counter base: 0
+    setCounterBaseForIV(iv1, 0);
+    cryptoCheck(iv1);
+    
+    // Counter base: -1
+    setCounterBaseForIV(iv1, -1);
+    cryptoCheck(iv1);
+  }
+  
+  private void cryptoCheck(byte[] iv) throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize, key, iv);
+    writeData(out);
+    
+    InputStream in = getInputStream(defaultBufferSize, key, iv);
+    readCheck(in);
+    in.close();
+  }
+  
+  private void setCounterBaseForIV(byte[] iv, long counterBase) {
+    ByteBuffer buf = ByteBuffer.wrap(iv);
+    buf.order(ByteOrder.BIG_ENDIAN);
+    buf.putLong(iv.length - 8, counterBase);
+  }
+  
+  /**
+   * Test hflush/hsync of crypto output stream, and with different buffer size.
+   */
+  @Test(timeout=120000)
+  public void testSyncable() throws IOException {
+    syncableCheck();
+  }
+  
+  private void syncableCheck() throws IOException {
+    OutputStream out = getOutputStream(smallBufferSize);
+    try {
+      int bytesWritten = dataLen / 3;
+      out.write(data, 0, bytesWritten);
+      ((Syncable) out).hflush();
+      
+      InputStream in = getInputStream(defaultBufferSize);
+      verify(in, bytesWritten, data);
+      in.close();
+      
+      out.write(data, bytesWritten, dataLen - bytesWritten);
+      ((Syncable) out).hsync();
+      
+      in = getInputStream(defaultBufferSize);
+      verify(in, dataLen, data);
+      in.close();
+    } finally {
+      out.close();
+    }
+  }
+  
+  private void verify(InputStream in, int bytesToVerify, 
+      byte[] expectedBytes) throws IOException {
+    final byte[] readBuf = new byte[bytesToVerify];
+    readAll(in, readBuf, 0, bytesToVerify);
+    for (int i = 0; i < bytesToVerify; i++) {
+      Assert.assertEquals(expectedBytes[i], readBuf[i]);
+    }
+  }
+  
+  private int readAll(InputStream in, long pos, byte[] b, int off, int len) 
+      throws IOException {
+    int n = 0;
+    int total = 0;
+    while (n != -1) {
+      total += n;
+      if (total >= len) {
+        break;
+      }
+      n = ((PositionedReadable) in).read(pos + total, b, off + total, 
+          len - total);
+    }
+    
+    return total;
+  }
+  
+  /** Test positioned read. */
+  @Test(timeout=120000)
+  public void testPositionedRead() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    InputStream in = getInputStream(defaultBufferSize);
+    // Pos: 1/3 dataLen
+    positionedReadCheck(in , dataLen / 3);
+
+    // Pos: 1/2 dataLen
+    positionedReadCheck(in, dataLen / 2);
+    in.close();
+  }
+  
+  private void positionedReadCheck(InputStream in, int pos) throws Exception {
+    byte[] result = new byte[dataLen];
+    int n = readAll(in, pos, result, 0, dataLen);
+    
+    Assert.assertEquals(dataLen, n + pos);
+    byte[] readData = new byte[n];
+    System.arraycopy(result, 0, readData, 0, n);
+    byte[] expectedData = new byte[n];
+    System.arraycopy(data, pos, expectedData, 0, n);
+    Assert.assertArrayEquals(readData, expectedData);
+  }
+  
+  /** Test read fully */
+  @Test(timeout=120000)
+  public void testReadFully() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    InputStream in = getInputStream(defaultBufferSize);
+    final int len1 = dataLen / 4;
+    // Read len1 bytes
+    byte[] readData = new byte[len1];
+    readAll(in, readData, 0, len1);
+    byte[] expectedData = new byte[len1];
+    System.arraycopy(data, 0, expectedData, 0, len1);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    // Pos: 1/3 dataLen
+    readFullyCheck(in, dataLen / 3);
+    
+    // Read len1 bytes
+    readData = new byte[len1];
+    readAll(in, readData, 0, len1);
+    expectedData = new byte[len1];
+    System.arraycopy(data, len1, expectedData, 0, len1);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    // Pos: 1/2 dataLen
+    readFullyCheck(in, dataLen / 2);
+    
+    // Read len1 bytes
+    readData = new byte[len1];
+    readAll(in, readData, 0, len1);
+    expectedData = new byte[len1];
+    System.arraycopy(data, 2 * len1, expectedData, 0, len1);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    in.close();
+  }
+  
+  private void readFullyCheck(InputStream in, int pos) throws Exception {
+    byte[] result = new byte[dataLen - pos];
+    ((PositionedReadable) in).readFully(pos, result);
+    
+    byte[] expectedData = new byte[dataLen - pos];
+    System.arraycopy(data, pos, expectedData, 0, dataLen - pos);
+    Assert.assertArrayEquals(result, expectedData);
+    
+    result = new byte[dataLen]; // Exceeds maximum length 
+    try {
+      ((PositionedReadable) in).readFully(pos, result);
+      Assert.fail("Read fully exceeds maximum length should fail.");
+    } catch (IOException e) {
+    }
+  }
+  
+  /** Test seek to different position. */
+  @Test(timeout=120000)
+  public void testSeek() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    InputStream in = getInputStream(defaultBufferSize);
+    // Pos: 1/3 dataLen
+    seekCheck(in, dataLen / 3);
+    
+    // Pos: 0
+    seekCheck(in, 0);
+    
+    // Pos: 1/2 dataLen
+    seekCheck(in, dataLen / 2);
+    
+    final long pos = ((Seekable) in).getPos();
+    
+    // Pos: -3
+    try {
+      seekCheck(in, -3);
+      Assert.fail("Seek to negative offset should fail.");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains("Cannot seek to negative " +
+          "offset", e);
+    }
+    Assert.assertEquals(pos, ((Seekable) in).getPos());
+    
+    // Pos: dataLen + 3
+    try {
+      seekCheck(in, dataLen + 3);
+      Assert.fail("Seek after EOF should fail.");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Cannot seek after EOF", e);
+    }
+    Assert.assertEquals(pos, ((Seekable) in).getPos());
+    
+    in.close();
+  }
+  
+  private void seekCheck(InputStream in, int pos) throws Exception {
+    byte[] result = new byte[dataLen];
+    ((Seekable) in).seek(pos);
+    int n = readAll(in, result, 0, dataLen);
+    
+    Assert.assertEquals(dataLen, n + pos);
+    byte[] readData = new byte[n];
+    System.arraycopy(result, 0, readData, 0, n);
+    byte[] expectedData = new byte[n];
+    System.arraycopy(data, pos, expectedData, 0, n);
+    Assert.assertArrayEquals(readData, expectedData);
+  }
+  
+  /** Test get position. */
+  @Test(timeout=120000)
+  public void testGetPos() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    // Default buffer size
+    InputStream in = getInputStream(defaultBufferSize);
+    byte[] result = new byte[dataLen];
+    int n1 = readAll(in, result, 0, dataLen / 3);
+    Assert.assertEquals(n1, ((Seekable) in).getPos());
+    
+    int n2 = readAll(in, result, n1, dataLen - n1);
+    Assert.assertEquals(n1 + n2, ((Seekable) in).getPos());
+    in.close();
+  }
+  
+  @Test(timeout=120000)
+  public void testAvailable() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    // Default buffer size
+    InputStream in = getInputStream(defaultBufferSize);
+    byte[] result = new byte[dataLen];
+    int n1 = readAll(in, result, 0, dataLen / 3);
+    Assert.assertEquals(in.available(), dataLen - n1);
+    
+    int n2 = readAll(in, result, n1, dataLen - n1);
+    Assert.assertEquals(in.available(), dataLen - n1 - n2);
+    in.close();
+  }
+  
+  /** Test skip. */
+  @Test(timeout=120000)
+  public void testSkip() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+        
+    // Default buffer size
+    InputStream in = getInputStream(defaultBufferSize);
+    byte[] result = new byte[dataLen];
+    int n1 = readAll(in, result, 0, dataLen / 3);
+    Assert.assertEquals(n1, ((Seekable) in).getPos());
+    
+    long skipped = in.skip(dataLen / 3);
+    int n2 = readAll(in, result, 0, dataLen);
+    
+    Assert.assertEquals(dataLen, n1 + skipped + n2);
+    byte[] readData = new byte[n2];
+    System.arraycopy(result, 0, readData, 0, n2);
+    byte[] expectedData = new byte[n2];
+    System.arraycopy(data, dataLen - n2, expectedData, 0, n2);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    try {
+      skipped = in.skip(-3);
+      Assert.fail("Skip Negative length should fail.");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains("Negative skip length", e);
+    }
+    
+    // Skip after EOF
+    skipped = in.skip(3);
+    Assert.assertEquals(skipped, 0);
+    
+    in.close();
+  }
+  
+  private void byteBufferReadCheck(InputStream in, ByteBuffer buf, 
+      int bufPos) throws Exception {
+    buf.position(bufPos);
+    int n = ((ByteBufferReadable) in).read(buf);
+    byte[] readData = new byte[n];
+    buf.rewind();
+    buf.position(bufPos);
+    buf.get(readData);
+    byte[] expectedData = new byte[n];
+    System.arraycopy(data, 0, expectedData, 0, n);
+    Assert.assertArrayEquals(readData, expectedData);
+  }
+  
+  /** Test byte buffer read with different buffer size. */
+  @Test(timeout=120000)
+  public void testByteBufferRead() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    // Default buffer size, initial buffer position is 0
+    InputStream in = getInputStream(defaultBufferSize);
+    ByteBuffer buf = ByteBuffer.allocate(dataLen + 100);
+    byteBufferReadCheck(in, buf, 0);
+    in.close();
+    
+    // Default buffer size, initial buffer position is not 0
+    in = getInputStream(defaultBufferSize);
+    buf.clear();
+    byteBufferReadCheck(in, buf, 11);
+    in.close();
+    
+    // Small buffer size, initial buffer position is 0
+    in = getInputStream(smallBufferSize);
+    buf.clear();
+    byteBufferReadCheck(in, buf, 0);
+    in.close();
+    
+    // Small buffer size, initial buffer position is not 0
+    in = getInputStream(smallBufferSize);
+    buf.clear();
+    byteBufferReadCheck(in, buf, 11);
+    in.close();
+    
+    // Direct buffer, default buffer size, initial buffer position is 0
+    in = getInputStream(defaultBufferSize);
+    buf = ByteBuffer.allocateDirect(dataLen + 100);
+    byteBufferReadCheck(in, buf, 0);
+    in.close();
+    
+    // Direct buffer, default buffer size, initial buffer position is not 0
+    in = getInputStream(defaultBufferSize);
+    buf.clear();
+    byteBufferReadCheck(in, buf, 11);
+    in.close();
+    
+    // Direct buffer, small buffer size, initial buffer position is 0
+    in = getInputStream(smallBufferSize);
+    buf.clear();
+    byteBufferReadCheck(in, buf, 0);
+    in.close();
+    
+    // Direct buffer, small buffer size, initial buffer position is not 0
+    in = getInputStream(smallBufferSize);
+    buf.clear();
+    byteBufferReadCheck(in, buf, 11);
+    in.close();
+  }
+  
+  @Test(timeout=120000)
+  public void testCombinedOp() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    final int len1 = dataLen / 8;
+    final int len2 = dataLen / 10;
+    
+    InputStream in = getInputStream(defaultBufferSize);
+    // Read len1 data.
+    byte[] readData = new byte[len1];
+    readAll(in, readData, 0, len1);
+    byte[] expectedData = new byte[len1];
+    System.arraycopy(data, 0, expectedData, 0, len1);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    long pos = ((Seekable) in).getPos();
+    Assert.assertEquals(len1, pos);
+    
+    // Seek forward len2
+    ((Seekable) in).seek(pos + len2);
+    // Skip forward len2
+    long n = in.skip(len2);
+    Assert.assertEquals(len2, n);
+    
+    // Pos: 1/4 dataLen
+    positionedReadCheck(in , dataLen / 4);
+    
+    // Pos should be len1 + len2 + len2
+    pos = ((Seekable) in).getPos();
+    Assert.assertEquals(len1 + len2 + len2, pos);
+    
+    // Read forward len1
+    ByteBuffer buf = ByteBuffer.allocate(len1);
+    int nRead = ((ByteBufferReadable) in).read(buf);
+    readData = new byte[nRead];
+    buf.rewind();
+    buf.get(readData);
+    expectedData = new byte[nRead];
+    System.arraycopy(data, (int)pos, expectedData, 0, nRead);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    // Pos should be len1 + 2 * len2 + nRead
+    pos = ((Seekable) in).getPos();
+    Assert.assertEquals(len1 + 2 * len2 + nRead, pos);
+    
+    // Pos: 1/3 dataLen
+    positionedReadCheck(in , dataLen / 3);
+    
+    // Read forward len1
+    readData = new byte[len1];
+    readAll(in, readData, 0, len1);
+    expectedData = new byte[len1];
+    System.arraycopy(data, (int)pos, expectedData, 0, len1);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    // Pos should be 2 * len1 + 2 * len2 + nRead
+    pos = ((Seekable) in).getPos();
+    Assert.assertEquals(2 * len1 + 2 * len2 + nRead, pos);
+    
+    // Read forward len1
+    buf = ByteBuffer.allocate(len1);
+    nRead = ((ByteBufferReadable) in).read(buf);
+    readData = new byte[nRead];
+    buf.rewind();
+    buf.get(readData);
+    expectedData = new byte[nRead];
+    System.arraycopy(data, (int)pos, expectedData, 0, nRead);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    // ByteBuffer read after EOF
+    ((Seekable) in).seek(dataLen);
+    buf.clear();
+    n = ((ByteBufferReadable) in).read(buf);
+    Assert.assertEquals(n, -1);
+    
+    in.close();
+  }
+  
+  @Test(timeout=120000)
+  public void testSeekToNewSource() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    InputStream in = getInputStream(defaultBufferSize);
+    
+    final int len1 = dataLen / 8;
+    byte[] readData = new byte[len1];
+    readAll(in, readData, 0, len1);
+    
+    // Pos: 1/3 dataLen
+    seekToNewSourceCheck(in, dataLen / 3);
+    
+    // Pos: 0
+    seekToNewSourceCheck(in, 0);
+    
+    // Pos: 1/2 dataLen
+    seekToNewSourceCheck(in, dataLen / 2);
+    
+    // Pos: -3
+    try {
+      seekToNewSourceCheck(in, -3);
+      Assert.fail("Seek to negative offset should fail.");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains("Cannot seek to negative " +
+          "offset", e);
+    }
+    
+    // Pos: dataLen + 3
+    try {
+      seekToNewSourceCheck(in, dataLen + 3);
+      Assert.fail("Seek after EOF should fail.");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Attempted to read past " +
+          "end of file", e);
+    }
+    
+    in.close();
+  }
+  
+  private void seekToNewSourceCheck(InputStream in, int targetPos) 
+      throws Exception {
+    byte[] result = new byte[dataLen];
+    ((Seekable) in).seekToNewSource(targetPos);
+    int n = readAll(in, result, 0, dataLen);
+    
+    Assert.assertEquals(dataLen, n + targetPos);
+    byte[] readData = new byte[n];
+    System.arraycopy(result, 0, readData, 0, n);
+    byte[] expectedData = new byte[n];
+    System.arraycopy(data, targetPos, expectedData, 0, n);
+    Assert.assertArrayEquals(readData, expectedData);
+  }
+  
+  private ByteBufferPool getBufferPool() {
+    return new ByteBufferPool() {
+      @Override
+      public ByteBuffer getBuffer(boolean direct, int length) {
+        return ByteBuffer.allocateDirect(length);
+      }
+      
+      @Override
+      public void putBuffer(ByteBuffer buffer) {
+      }
+    };
+  }
+  
+  @Test(timeout=120000)
+  public void testHasEnhancedByteBufferAccess() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    InputStream in = getInputStream(defaultBufferSize);
+    final int len1 = dataLen / 8;
+    // ByteBuffer size is len1
+    ByteBuffer buffer = ((HasEnhancedByteBufferAccess) in).read(
+        getBufferPool(), len1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
+    int n1 = buffer.remaining();
+    byte[] readData = new byte[n1];
+    buffer.get(readData);
+    byte[] expectedData = new byte[n1];
+    System.arraycopy(data, 0, expectedData, 0, n1);
+    Assert.assertArrayEquals(readData, expectedData);
+    ((HasEnhancedByteBufferAccess) in).releaseBuffer(buffer);
+    
+    // Read len1 bytes
+    readData = new byte[len1];
+    readAll(in, readData, 0, len1);
+    expectedData = new byte[len1];
+    System.arraycopy(data, n1, expectedData, 0, len1);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    // ByteBuffer size is len1
+    buffer = ((HasEnhancedByteBufferAccess) in).read(
+        getBufferPool(), len1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
+    int n2 = buffer.remaining();
+    readData = new byte[n2];
+    buffer.get(readData);
+    expectedData = new byte[n2];
+    System.arraycopy(data, n1 + len1, expectedData, 0, n2);
+    Assert.assertArrayEquals(readData, expectedData);
+    ((HasEnhancedByteBufferAccess) in).releaseBuffer(buffer);
+    
+    in.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77bd85b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java
new file mode 100644
index 0000000..49b5056
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.BufferedInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.security.GeneralSecurityException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.RandomDatum;
+import org.apache.hadoop.util.NativeCodeLoader;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Test;
+
+public class TestCryptoCodec {
+  private static final Log LOG= LogFactory.getLog(TestCryptoCodec.class);
+  private static final byte[] key = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 
+    0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16};
+  private static final byte[] iv = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 
+    0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08};
+  private static final int bufferSize = 4096;
+  
+  private Configuration conf = new Configuration();
+  private int count = 10000;
+  private int seed = new Random().nextInt();
+  
+  @Test(timeout=120000)
+  public void testJceAesCtrCryptoCodec() throws Exception {
+    cryptoCodecTest(conf, seed, 0, 
+        "org.apache.hadoop.crypto.JceAesCtrCryptoCodec");
+    cryptoCodecTest(conf, seed, count, 
+        "org.apache.hadoop.crypto.JceAesCtrCryptoCodec");
+  }
+  
+  @Test(timeout=1200000)
+  public void testOpensslAesCtrCryptoCodec() throws Exception {
+    Assume.assumeTrue(NativeCodeLoader.buildSupportsOpenssl());
+    Assert.assertEquals(null, OpensslCipher.getLoadingFailureReason());
+    cryptoCodecTest(conf, seed, 0, 
+        "org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec");
+    cryptoCodecTest(conf, seed, count, 
+        "org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec");
+  }
+  
+  private void cryptoCodecTest(Configuration conf, int seed, int count, 
+      String codecClass) throws IOException, GeneralSecurityException {
+    CryptoCodec codec = null;
+    try {
+      codec = (CryptoCodec)ReflectionUtils.newInstance(
+          conf.getClassByName(codecClass), conf);
+    } catch (ClassNotFoundException cnfe) {
+      throw new IOException("Illegal crypto codec!");
+    }
+    LOG.info("Created a Codec object of type: " + codecClass);
+    
+    // Generate data
+    DataOutputBuffer data = new DataOutputBuffer();
+    RandomDatum.Generator generator = new RandomDatum.Generator(seed);
+    for(int i = 0; i < count; ++i) {
+      generator.next();
+      RandomDatum key = generator.getKey();
+      RandomDatum value = generator.getValue();
+      
+      key.write(data);
+      value.write(data);
+    }
+    LOG.info("Generated " + count + " records");
+    
+    // Encrypt data
+    DataOutputBuffer encryptedDataBuffer = new DataOutputBuffer();
+    CryptoOutputStream out = new CryptoOutputStream(encryptedDataBuffer, 
+        codec, bufferSize, key, iv);
+    out.write(data.getData(), 0, data.getLength());
+    out.flush();
+    out.close();
+    LOG.info("Finished encrypting data");
+    
+    // Decrypt data
+    DataInputBuffer decryptedDataBuffer = new DataInputBuffer();
+    decryptedDataBuffer.reset(encryptedDataBuffer.getData(), 0, 
+        encryptedDataBuffer.getLength());
+    CryptoInputStream in = new CryptoInputStream(decryptedDataBuffer, 
+        codec, bufferSize, key, iv);
+    DataInputStream dataIn = new DataInputStream(new BufferedInputStream(in));
+    
+    // Check
+    DataInputBuffer originalData = new DataInputBuffer();
+    originalData.reset(data.getData(), 0, data.getLength());
+    DataInputStream originalIn = new DataInputStream(
+        new BufferedInputStream(originalData));
+    
+    for(int i=0; i < count; ++i) {
+      RandomDatum k1 = new RandomDatum();
+      RandomDatum v1 = new RandomDatum();
+      k1.readFields(originalIn);
+      v1.readFields(originalIn);
+      
+      RandomDatum k2 = new RandomDatum();
+      RandomDatum v2 = new RandomDatum();
+      k2.readFields(dataIn);
+      v2.readFields(dataIn);
+      assertTrue("original and encrypted-then-decrypted-output not equal",
+                 k1.equals(k2) && v1.equals(v2));
+      
+      // original and encrypted-then-decrypted-output have the same hashCode
+      Map<RandomDatum, String> m = new HashMap<RandomDatum, String>();
+      m.put(k1, k1.toString());
+      m.put(v1, v1.toString());
+      String result = m.get(k2);
+      assertEquals("k1 and k2 hashcode not equal", result, k1.toString());
+      result = m.get(v2);
+      assertEquals("v1 and v2 hashcode not equal", result, v1.toString());
+    }
+
+    // Decrypt data byte-at-a-time
+    originalData.reset(data.getData(), 0, data.getLength());
+    decryptedDataBuffer.reset(encryptedDataBuffer.getData(), 0, 
+        encryptedDataBuffer.getLength());
+    in = new CryptoInputStream(decryptedDataBuffer, 
+        codec, bufferSize, key, iv);
+
+    // Check
+    originalIn = new DataInputStream(new BufferedInputStream(originalData));
+    int expected;
+    do {
+      expected = originalIn.read();
+      assertEquals("Decrypted stream read by byte does not match",
+        expected, in.read());
+    } while (expected != -1);
+
+    LOG.info("SUCCESS! Completed checking " + count + " records");
+    
+    // Check secure random generator
+    testSecureRandom(codec);
+  }
+  
+  /** Test secure random generator */
+  private void testSecureRandom(CryptoCodec codec) {
+    // len = 16
+    checkSecureRandom(codec, 16);
+    // len = 32
+    checkSecureRandom(codec, 32);
+    // len = 128
+    checkSecureRandom(codec, 128);
+  }
+  
+  private void checkSecureRandom(CryptoCodec codec, int len) {
+    byte[] rand = new byte[len];
+    byte[] rand1 = new byte[len];
+    codec.generateSecureRandom(rand);
+    codec.generateSecureRandom(rand1);
+    
+    Assert.assertEquals(len, rand.length);
+    Assert.assertEquals(len, rand1.length);
+    Assert.assertFalse(Arrays.equals(rand, rand1));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77bd85b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
new file mode 100644
index 0000000..056ee9a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
@@ -0,0 +1,381 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.io.EOFException;
+import java.io.FileDescriptor;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.EnumSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ByteBufferReadable;
+import org.apache.hadoop.fs.CanSetDropBehind;
+import org.apache.hadoop.fs.CanSetReadahead;
+import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
+import org.apache.hadoop.fs.HasFileDescriptor;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.ReadOption;
+import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.fs.Syncable;
+import org.apache.hadoop.io.ByteBufferPool;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+public class TestCryptoStreams extends CryptoStreamsTestBase {
+  /**
+   * Data storage.
+   * {@link #getOutputStream(int)} will write to this buf.
+   * {@link #getInputStream(int)} will read from this buf.
+   */
+  private byte[] buf;
+  private int bufLen;
+  
+  @BeforeClass
+  public static void init() throws Exception {
+    Configuration conf = new Configuration();
+    codec = CryptoCodec.getInstance(conf);
+  }
+  
+  @AfterClass
+  public static void shutdown() throws Exception {
+  }
+  
+  @Override
+  protected OutputStream getOutputStream(int bufferSize, byte[] key, byte[] iv) 
+      throws IOException {
+    DataOutputBuffer out = new DataOutputBuffer() {
+      @Override
+      public void flush() throws IOException {
+        buf = getData();
+        bufLen = getLength();
+      }
+      @Override
+      public void close() throws IOException {
+        buf = getData();
+        bufLen = getLength();
+      }
+    };
+    return new CryptoOutputStream(new FakeOutputStream(out),
+        codec, bufferSize, key, iv);
+  }
+  
+  @Override
+  protected InputStream getInputStream(int bufferSize, byte[] key, byte[] iv) 
+      throws IOException {
+    DataInputBuffer in = new DataInputBuffer();
+    in.reset(buf, 0, bufLen);
+    return new CryptoInputStream(new FakeInputStream(in), codec, bufferSize, 
+        key, iv);
+  }
+  
+  private class FakeOutputStream extends OutputStream 
+      implements Syncable, CanSetDropBehind{
+    private final byte[] oneByteBuf = new byte[1];
+    private final DataOutputBuffer out;
+    private boolean closed;
+    
+    public FakeOutputStream(DataOutputBuffer out) {
+      this.out = out;
+    }
+    
+    @Override
+    public void write(byte b[], int off, int len) throws IOException {
+      if (b == null) {
+        throw new NullPointerException();
+      } else if (off < 0 || len < 0 || len > b.length - off) {
+        throw new IndexOutOfBoundsException();
+      } else if (len == 0) {
+        return;
+      }
+      
+      checkStream();
+      
+      out.write(b, off, len);
+    }
+    
+    @Override
+    public void flush() throws IOException {
+      checkStream();
+      out.flush();
+    }
+    
+    @Override
+    public void close() throws IOException {
+      if (closed) {
+        return;
+      }
+      
+      out.close();
+      closed = true;
+    }
+
+    @Override
+    public void write(int b) throws IOException {
+      oneByteBuf[0] = (byte)(b & 0xff);
+      write(oneByteBuf, 0, oneByteBuf.length);
+    }
+
+    @Override
+    public void setDropBehind(Boolean dropCache) throws IOException,
+        UnsupportedOperationException {
+    }
+
+    @Override
+    public void sync() throws IOException {
+      hflush();
+    }
+
+    @Override
+    public void hflush() throws IOException {
+      checkStream();
+      flush();
+    }
+
+    @Override
+    public void hsync() throws IOException {
+      checkStream();
+      flush();
+    }
+    
+    private void checkStream() throws IOException {
+      if (closed) {
+        throw new IOException("Stream is closed!");
+      }
+    }
+  }
+  
+  private class FakeInputStream extends InputStream implements 
+      Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, 
+      CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess {
+    private final byte[] oneByteBuf = new byte[1];
+    private int pos = 0;
+    private final byte[] data;
+    private final int length;
+    private boolean closed = false;
+
+    public FakeInputStream(DataInputBuffer in) {
+      data = in.getData();
+      length = in.getLength();
+    }
+    
+    @Override
+    public void seek(long pos) throws IOException {
+      if (pos > length) {
+        throw new IOException("Cannot seek after EOF.");
+      }
+      if (pos < 0) {
+        throw new IOException("Cannot seek to negative offset.");
+      }
+      checkStream();
+      this.pos = (int)pos;
+    }
+    
+    @Override
+    public long getPos() throws IOException {
+      return pos;
+    }
+    
+    @Override
+    public int available() throws IOException {
+      return length - pos;
+    }
+    
+    @Override
+    public int read(byte b[], int off, int len) throws IOException {
+      if (b == null) {
+        throw new NullPointerException();
+      } else if (off < 0 || len < 0 || len > b.length - off) {
+        throw new IndexOutOfBoundsException();
+      } else if (len == 0) {
+        return 0;
+      }
+      
+      checkStream();
+      
+      if (pos < length) {
+        int n = (int) Math.min(len, length - pos);
+        System.arraycopy(data, pos, b, off, n);
+        pos += n;
+        return n;
+      }
+      
+      return -1;
+    }
+    
+    private void checkStream() throws IOException {
+      if (closed) {
+        throw new IOException("Stream is closed!");
+      }
+    }
+    
+    @Override
+    public int read(ByteBuffer buf) throws IOException {
+      checkStream();
+      if (pos < length) {
+        int n = (int) Math.min(buf.remaining(), length - pos);
+        if (n > 0) {
+          buf.put(data, pos, n);
+        }
+        pos += n;
+        return n;
+      }
+      return -1;
+    }
+    
+    @Override
+    public long skip(long n) throws IOException {
+      checkStream();
+      if ( n > 0 ) {
+        if( n + pos > length ) {
+          n = length - pos;
+        }
+        pos += n;
+        return n;
+      }
+      return n < 0 ? -1 : 0;
+    }
+    
+    @Override
+    public void close() throws IOException {
+      closed = true;
+    }
+
+    @Override
+    public int read(long position, byte[] b, int off, int len)
+        throws IOException {
+      if (b == null) {
+        throw new NullPointerException();
+      } else if (off < 0 || len < 0 || len > b.length - off) {
+        throw new IndexOutOfBoundsException();
+      } else if (len == 0) {
+        return 0;
+      }
+      
+      if (position > length) {
+        throw new IOException("Cannot read after EOF.");
+      }
+      if (position < 0) {
+        throw new IOException("Cannot read to negative offset.");
+      }
+      
+      checkStream();
+      
+      if (position < length) {
+        int n = (int) Math.min(len, length - position);
+        System.arraycopy(data, (int)position, b, off, n);
+        return n;
+      }
+      
+      return -1;
+    }
+
+    @Override
+    public void readFully(long position, byte[] b, int off, int len)
+        throws IOException {
+      if (b == null) {
+        throw new NullPointerException();
+      } else if (off < 0 || len < 0 || len > b.length - off) {
+        throw new IndexOutOfBoundsException();
+      } else if (len == 0) {
+        return;
+      }
+      
+      if (position > length) {
+        throw new IOException("Cannot read after EOF.");
+      }
+      if (position < 0) {
+        throw new IOException("Cannot read to negative offset.");
+      }
+      
+      checkStream();
+      
+      if (position + len > length) {
+        throw new EOFException("Reach the end of stream.");
+      }
+      
+      System.arraycopy(data, (int)position, b, off, len);
+    }
+
+    @Override
+    public void readFully(long position, byte[] buffer) throws IOException {
+      readFully(position, buffer, 0, buffer.length);
+    }
+
+    @Override
+    public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
+        EnumSet<ReadOption> opts) throws IOException,
+        UnsupportedOperationException {
+      if (bufferPool == null) {
+        throw new IOException("Please specify buffer pool.");
+      }
+      ByteBuffer buffer = bufferPool.getBuffer(true, maxLength);
+      int pos = buffer.position();
+      int n = read(buffer);
+      if (n >= 0) {
+        buffer.position(pos);
+        return buffer;
+      }
+      
+      return null;
+    }
+
+    @Override
+    public void releaseBuffer(ByteBuffer buffer) {
+      
+    }
+
+    @Override
+    public void setReadahead(Long readahead) throws IOException,
+        UnsupportedOperationException {
+    }
+
+    @Override
+    public void setDropBehind(Boolean dropCache) throws IOException,
+        UnsupportedOperationException {
+    }
+
+    @Override
+    public FileDescriptor getFileDescriptor() throws IOException {
+      return null;
+    }
+    
+    @Override
+    public boolean seekToNewSource(long targetPos) throws IOException {
+      if (targetPos > length) {
+        throw new IOException("Attempted to read past end of file.");
+      }
+      if (targetPos < 0) {
+        throw new IOException("Cannot seek after EOF.");
+      }
+      checkStream();
+      this.pos = (int)targetPos;
+      return false;
+    }
+
+    @Override
+    public int read() throws IOException {
+      int ret = read( oneByteBuf, 0, 1 );
+      return ( ret <= 0 ) ? -1 : (oneByteBuf[0] & 0xff);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77bd85b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
new file mode 100644
index 0000000..765a364
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+public class TestCryptoStreamsForLocalFS extends CryptoStreamsTestBase {
+  private static final String TEST_ROOT_DIR
+    = System.getProperty("test.build.data","build/test/data") + "/work-dir/localfs";
+
+  private final File base = new File(TEST_ROOT_DIR);
+  private final Path file = new Path(TEST_ROOT_DIR, "test-file");
+  private static LocalFileSystem fileSys;
+  
+  @BeforeClass
+  public static void init() throws Exception {
+    Configuration conf = new Configuration();
+    conf = new Configuration(false);
+    conf.set("fs.file.impl", LocalFileSystem.class.getName());
+    fileSys = FileSystem.getLocal(conf);
+    conf.set(
+        CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX
+            + CipherSuite.AES_CTR_NOPADDING.getConfigSuffix(),
+        OpensslAesCtrCryptoCodec.class.getName() + ","
+            + JceAesCtrCryptoCodec.class.getName());
+    codec = CryptoCodec.getInstance(conf);
+  }
+  
+  @AfterClass
+  public static void shutdown() throws Exception {
+  }
+  
+  @Before
+  @Override
+  public void setUp() throws IOException {
+    fileSys.delete(new Path(TEST_ROOT_DIR), true);
+    super.setUp();
+  }
+  
+  @After
+  public void cleanUp() throws IOException {
+    FileUtil.setWritable(base, true);
+    FileUtil.fullyDelete(base);
+    assertTrue(!base.exists());
+  }
+  
+  @Override
+  protected OutputStream getOutputStream(int bufferSize, byte[] key, byte[] iv) 
+      throws IOException {
+    return new CryptoOutputStream(fileSys.create(file), codec, bufferSize, 
+        key, iv);
+  }
+  
+  @Override
+  protected InputStream getInputStream(int bufferSize, byte[] key, byte[] iv) 
+      throws IOException {
+    return new CryptoInputStream(fileSys.open(file), codec, bufferSize, 
+        key, iv);
+  }
+  
+  @Ignore("ChecksumFSInputChecker doesn't support ByteBuffer read")
+  @Override
+  @Test(timeout=1000)
+  public void testByteBufferRead() throws Exception {}
+  
+  @Ignore("ChecksumFSOutputSummer doesn't support Syncable")
+  @Override
+  @Test(timeout=1000)
+  public void testSyncable() throws IOException {}
+  
+  @Ignore("ChecksumFSInputChecker doesn't support ByteBuffer read")
+  @Override
+  @Test(timeout=1000)
+  public void testCombinedOp() throws Exception {}
+  
+  @Ignore("ChecksumFSInputChecker doesn't support enhanced ByteBuffer access")
+  @Override
+  @Test(timeout=1000)
+  public void testHasEnhancedByteBufferAccess() throws Exception {
+  }
+  
+  @Ignore("ChecksumFSInputChecker doesn't support seekToNewSource")
+  @Override
+  @Test(timeout=1000)
+  public void testSeekToNewSource() throws Exception {
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77bd85b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java
new file mode 100644
index 0000000..e9c313f
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java
@@ -0,0 +1,123 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+/**
+ * Test crypto streams using normal stream which does not support the 
+ * additional interfaces that the Hadoop FileSystem streams implement 
+ * (Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, 
+ * CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess, Syncable, 
+ * CanSetDropBehind)
+ */
+public class TestCryptoStreamsNormal extends CryptoStreamsTestBase {
+  /**
+   * Data storage.
+   * {@link #getOutputStream(int, byte[], byte[])} will write to this buffer.
+   * {@link #getInputStream(int, byte[], byte[])} will read from this buffer.
+   */
+  private byte[] buffer;
+  private int bufferLen;
+  
+  @BeforeClass
+  public static void init() throws Exception {
+    Configuration conf = new Configuration();
+    codec = CryptoCodec.getInstance(conf);
+  }
+  
+  @AfterClass
+  public static void shutdown() throws Exception {
+  }
+
+  @Override
+  protected OutputStream getOutputStream(int bufferSize, byte[] key, byte[] iv)
+      throws IOException {
+    OutputStream out = new ByteArrayOutputStream() {
+      @Override
+      public void flush() throws IOException {
+        buffer = buf;
+        bufferLen = count;
+      }
+      @Override
+      public void close() throws IOException {
+        buffer = buf;
+        bufferLen = count;
+      }
+    };
+    return new CryptoOutputStream(out, codec, bufferSize, key, iv);
+  }
+
+  @Override
+  protected InputStream getInputStream(int bufferSize, byte[] key, byte[] iv)
+      throws IOException {
+    ByteArrayInputStream in = new ByteArrayInputStream(buffer, 0, bufferLen);
+    return new CryptoInputStream(in, codec, bufferSize, 
+        key, iv);
+  }
+  
+  @Ignore("Wrapped stream doesn't support Syncable")
+  @Override
+  @Test(timeout=1000)
+  public void testSyncable() throws IOException {}
+  
+  @Ignore("Wrapped stream doesn't support PositionedRead")
+  @Override
+  @Test(timeout=1000)
+  public void testPositionedRead() throws IOException {}
+
+  @Ignore("Wrapped stream doesn't support ReadFully")
+  @Override
+  @Test(timeout=1000)
+  public void testReadFully() throws IOException {}
+  
+  @Ignore("Wrapped stream doesn't support Seek")
+  @Override
+  @Test(timeout=1000)
+  public void testSeek() throws IOException {}
+  
+  @Ignore("Wrapped stream doesn't support ByteBufferRead")
+  @Override
+  @Test(timeout=1000)
+  public void testByteBufferRead() throws IOException {}
+  
+  @Ignore("Wrapped stream doesn't support ByteBufferRead, Seek")
+  @Override
+  @Test(timeout=1000)
+  public void testCombinedOp() throws IOException {}
+  
+  @Ignore("Wrapped stream doesn't support SeekToNewSource")
+  @Override
+  @Test(timeout=1000)
+  public void testSeekToNewSource() throws IOException {}
+  
+  @Ignore("Wrapped stream doesn't support HasEnhancedByteBufferAccess")
+  @Override
+  @Test(timeout=1000)
+  public void testHasEnhancedByteBufferAccess() throws IOException {}
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77bd85b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java
new file mode 100644
index 0000000..f64e8dc
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.BeforeClass;
+
+public class TestCryptoStreamsWithOpensslAesCtrCryptoCodec 
+    extends TestCryptoStreams {
+  
+  @BeforeClass
+  public static void init() throws Exception {
+    Configuration conf = new Configuration();
+    codec = CryptoCodec.getInstance(conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77bd85b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java
new file mode 100644
index 0000000..966a887
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.nio.ByteBuffer;
+import java.security.NoSuchAlgorithmException;
+
+import javax.crypto.NoSuchPaddingException;
+import javax.crypto.ShortBufferException;
+
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assume;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestOpensslCipher {
+  private static final byte[] key = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 
+    0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16};
+  private static final byte[] iv = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 
+    0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08};
+  
+  @Test(timeout=120000)
+  public void testGetInstance() throws Exception {
+    Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null);
+    OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding");
+    Assert.assertTrue(cipher != null);
+    
+    try {
+      cipher = OpensslCipher.getInstance("AES2/CTR/NoPadding");
+      Assert.fail("Should specify correct algorithm.");
+    } catch (NoSuchAlgorithmException e) {
+      // Expect NoSuchAlgorithmException
+    }
+    
+    try {
+      cipher = OpensslCipher.getInstance("AES/CTR/NoPadding2");
+      Assert.fail("Should specify correct padding.");
+    } catch (NoSuchPaddingException e) {
+      // Expect NoSuchPaddingException
+    }
+  }
+  
+  @Test(timeout=120000)
+  public void testUpdateArguments() throws Exception {
+    Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null);
+    OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding");
+    Assert.assertTrue(cipher != null);
+    
+    cipher.init(OpensslCipher.ENCRYPT_MODE, key, iv);
+    
+    // Require direct buffers
+    ByteBuffer input = ByteBuffer.allocate(1024);
+    ByteBuffer output = ByteBuffer.allocate(1024);
+    
+    try {
+      cipher.update(input, output);
+      Assert.fail("Input and output buffer should be direct buffer.");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(
+          "Direct buffers are required", e);
+    }
+    
+    // Output buffer length should be sufficient to store output data 
+    input = ByteBuffer.allocateDirect(1024);
+    output = ByteBuffer.allocateDirect(1000);
+    try {
+      cipher.update(input, output);
+      Assert.fail("Output buffer length should be sufficient " +
+          "to store output data");
+    } catch (ShortBufferException e) {
+      GenericTestUtils.assertExceptionContains(
+          "Output buffer is not sufficient", e);
+    }
+  }
+  
+  @Test(timeout=120000)
+  public void testDoFinalArguments() throws Exception {
+    Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null);
+    OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding");
+    Assert.assertTrue(cipher != null);
+    
+    cipher.init(OpensslCipher.ENCRYPT_MODE, key, iv);
+    
+    // Require direct buffer
+    ByteBuffer output = ByteBuffer.allocate(1024);
+    
+    try {
+      cipher.doFinal(output);
+      Assert.fail("Output buffer should be direct buffer.");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(
+          "Direct buffer is required", e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77bd85b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOpensslSecureRandom.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOpensslSecureRandom.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOpensslSecureRandom.java
new file mode 100644
index 0000000..f40c6ac
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOpensslSecureRandom.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.random;
+
+import java.util.Arrays;
+
+import org.junit.Test;
+
+public class TestOpensslSecureRandom {
+  
+  @Test(timeout=120000)
+  public void testRandomBytes() throws Exception {
+    OpensslSecureRandom random = new OpensslSecureRandom();
+    
+    // len = 16
+    checkRandomBytes(random, 16);
+    // len = 32
+    checkRandomBytes(random, 32);
+    // len = 128
+    checkRandomBytes(random, 128);
+    // len = 256
+    checkRandomBytes(random, 256);
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  private void checkRandomBytes(OpensslSecureRandom random, int len) {
+    byte[] bytes = new byte[len];
+    byte[] bytes1 = new byte[len];
+    random.nextBytes(bytes);
+    random.nextBytes(bytes1);
+    
+    while (Arrays.equals(bytes, bytes1)) {
+      random.nextBytes(bytes1);
+    }
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  @Test(timeout=120000)
+  public void testRandomInt() throws Exception {
+    OpensslSecureRandom random = new OpensslSecureRandom();
+    
+    int rand1 = random.nextInt();
+    int rand2 = random.nextInt();
+    while (rand1 == rand2) {
+      rand2 = random.nextInt();
+    }
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  @Test(timeout=120000)
+  public void testRandomLong() throws Exception {
+    OpensslSecureRandom random = new OpensslSecureRandom();
+    
+    long rand1 = random.nextLong();
+    long rand2 = random.nextLong();
+    while (rand1 == rand2) {
+      rand2 = random.nextLong();
+    }
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  @Test(timeout=120000)
+  public void testRandomFloat() throws Exception {
+    OpensslSecureRandom random = new OpensslSecureRandom();
+    
+    float rand1 = random.nextFloat();
+    float rand2 = random.nextFloat();
+    while (rand1 == rand2) {
+      rand2 = random.nextFloat();
+    }
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  @Test(timeout=120000)
+  public void testRandomDouble() throws Exception {
+    OpensslSecureRandom random = new OpensslSecureRandom();
+    
+    double rand1 = random.nextDouble();
+    double rand2 = random.nextDouble();
+    while (rand1 == rand2) {
+      rand2 = random.nextDouble();
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77bd85b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java
new file mode 100644
index 0000000..8fc5c70
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.random;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import org.apache.commons.lang.SystemUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Assume;
+import org.junit.Test;
+
+public class TestOsSecureRandom {
+
+  private static OsSecureRandom getOsSecureRandom() throws IOException {
+    Assume.assumeTrue(SystemUtils.IS_OS_LINUX);
+    OsSecureRandom random = new OsSecureRandom();
+    random.setConf(new Configuration());
+    return random;
+  }
+
+  @Test(timeout=120000)
+  public void testRandomBytes() throws Exception {
+    OsSecureRandom random = getOsSecureRandom();
+    // len = 16
+    checkRandomBytes(random, 16);
+    // len = 32
+    checkRandomBytes(random, 32);
+    // len = 128
+    checkRandomBytes(random, 128);
+    // len = 256
+    checkRandomBytes(random, 256);
+    random.close();
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  private void checkRandomBytes(OsSecureRandom random, int len) {
+    byte[] bytes = new byte[len];
+    byte[] bytes1 = new byte[len];
+    random.nextBytes(bytes);
+    random.nextBytes(bytes1);
+    
+    while (Arrays.equals(bytes, bytes1)) {
+      random.nextBytes(bytes1);
+    }
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  @Test(timeout=120000)
+  public void testRandomInt() throws Exception {
+    OsSecureRandom random = getOsSecureRandom();
+    
+    int rand1 = random.nextInt();
+    int rand2 = random.nextInt();
+    while (rand1 == rand2) {
+      rand2 = random.nextInt();
+    }
+    random.close();
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  @Test(timeout=120000)
+  public void testRandomLong() throws Exception {
+    OsSecureRandom random = getOsSecureRandom();
+    
+    long rand1 = random.nextLong();
+    long rand2 = random.nextLong();
+    while (rand1 == rand2) {
+      rand2 = random.nextLong();
+    }
+    random.close();
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  @Test(timeout=120000)
+  public void testRandomFloat() throws Exception {
+    OsSecureRandom random = getOsSecureRandom();
+    
+    float rand1 = random.nextFloat();
+    float rand2 = random.nextFloat();
+    while (rand1 == rand2) {
+      rand2 = random.nextFloat();
+    }
+    random.close();
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  @Test(timeout=120000)
+  public void testRandomDouble() throws Exception {
+    OsSecureRandom random = getOsSecureRandom();
+    
+    double rand1 = random.nextDouble();
+    double rand2 = random.nextDouble();
+    while (rand1 == rand2) {
+      rand2 = random.nextDouble();
+    }
+    random.close();
+  }
+
+  @Test(timeout=120000)
+  public void testRefillReservoir() throws Exception {
+    OsSecureRandom random = getOsSecureRandom();
+
+    for (int i = 0; i < 8196; i++) {
+      random.nextLong();
+    }
+    random.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77bd85b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java
index 9efaca9..473c177 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.*;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.crypto.OpensslCipher;
 import org.apache.hadoop.io.compress.Lz4Codec;
 import org.apache.hadoop.io.compress.SnappyCodec;
 import org.apache.hadoop.io.compress.zlib.ZlibFactory;
@@ -54,6 +55,9 @@ public class TestNativeCodeLoader {
     if (NativeCodeLoader.buildSupportsSnappy()) {
       assertFalse(SnappyCodec.getLibraryName().isEmpty());
     }
+    if (NativeCodeLoader.buildSupportsOpenssl()) {
+      assertFalse(OpensslCipher.getLibraryName().isEmpty());
+    }
     assertFalse(Lz4Codec.getLibraryName().isEmpty());
     LOG.info("TestNativeCodeLoader: libhadoop.so is loaded.");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77bd85b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index c44edda..804b504 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -324,7 +324,23 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*permission. Passing -f overwrites the destination if it already exists.( )*</expected-output>
+          <expected-output>^( |\t)*permission. Passing -f overwrites the destination if it already exists. raw( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*namespace extended attributes are preserved if \(1\) they are supported \(HDFS( )*</expected-output>
+        </comparator>
+        <comparator>
+            <type>RegexpComparator</type>
+            <expected-output>^( |\t)*only\) and, \(2\) all of the source and target pathnames are in the \/\.reserved\/raw( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*hierarchy. raw namespace xattr preservation is determined solely by the presence( )*</expected-output>
+        </comparator>
+        <comparator>
+            <type>RegexpComparator</type>
+            <expected-output>^\s*\(or absence\) of the \/\.reserved\/raw prefix and not by the -p option.( )*</expected-output>
         </comparator>
       </comparators>
     </test>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77bd85b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt
new file mode 100644
index 0000000..0171b82
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt
@@ -0,0 +1,102 @@
+Hadoop HDFS Change Log for HDFS-6134 and HADOOP-10150
+
+fs-encryption (Unreleased)
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+    HDFS-6387. HDFS CLI admin tool for creating & deleting an
+    encryption zone. (clamb)
+
+    HDFS-6386. HDFS Encryption Zones (clamb)
+
+    HDFS-6388. HDFS integration with KeyProvider. (clamb)
+
+    HDFS-6473. Protocol and API for Encryption Zones (clamb)
+
+    HDFS-6392. Wire crypto streams for encrypted files in
+    DFSClient. (clamb and yliu)
+
+    HDFS-6476. Print out the KeyProvider after finding KP successfully on
+    startup. (Juan Yu via wang)
+
+    HDFS-6391. Get the Key/IV from the NameNode for encrypted files in
+    DFSClient. (Charles Lamb and wang)
+
+    HDFS-6389. Rename restrictions for encryption zones. (clamb)
+
+    HDFS-6605. Client server negotiation of cipher suite. (wang)
+
+    HDFS-6625. Remove the Delete Encryption Zone function (clamb)
+
+    HDFS-6516. List of Encryption Zones should be based on inodes (clamb)
+
+    HDFS-6629. Not able to create symlinks after HDFS-6516 (umamaheswararao)
+
+    HDFS-6635. Refactor encryption zone functionality into new
+    EncryptionZoneManager class. (wang)
+
+    HDFS-6474. Namenode needs to get the actual keys and iv from the
+    KeyProvider. (wang)
+
+    HDFS-6619. Clean up encryption-related tests. (wang)
+
+    HDFS-6405. Test Crypto streams in HDFS. (yliu via wang)
+
+    HDFS-6490. Fix the keyid format for generated keys in
+    FSNamesystem.createEncryptionZone (clamb)
+
+    HDFS-6716. Update usage of KeyProviderCryptoExtension APIs on NameNode.
+    (wang)
+
+    HDFS-6718. Remove EncryptionZoneManager lock. (wang)
+
+    HDFS-6720. Remove KeyProvider in EncryptionZoneManager. (wang)
+
+    HDFS-6738. Remove unnecessary getEncryptionZoneForPath call in
+    EZManager#createEncryptionZone. (clamb)
+
+    HDFS-6724. Decrypt EDEK before creating
+    CryptoInputStream/CryptoOutputStream. (wang)
+
+    HDFS-6509. Create a special /.reserved/raw directory for raw access to
+    encrypted data. (clamb via wang)
+
+    HDFS-6771. Require specification of an encryption key when creating
+    an encryption zone. (wang)
+
+    HDFS-6730. Create a .RAW extended attribute namespace. (clamb)
+
+    HDFS-6692. Add more HDFS encryption tests. (wang)
+
+    HDFS-6780. Batch the encryption zones listing API. (wang)
+
+    HDFS-6394. HDFS encryption documentation. (wang)
+
+    HDFS-6834. Improve the configuration guidance in DFSClient when there 
+    are no Codec classes found in configs. (umamahesh)
+
+    HDFS-6546. Add non-superuser capability to get the encryption zone
+    for a specific path. (clamb)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HDFS-6733. Creating encryption zone results in NPE when
+    KeyProvider is null. (clamb)
+
+    HDFS-6785. Should not be able to create encryption zone using path
+    to a non-directory file. (clamb)
+
+    HDFS-6807. Fix TestReservedRawPaths. (clamb)
+
+    HDFS-6814. Mistakenly dfs.namenode.list.encryption.zones.num.responses configured
+    as boolean. (umamahesh)
+
+    HDFS-6817. Fix findbugs and other warnings. (yliu)
+
+    HDFS-6839. Fix TestCLI to expect new output. (clamb)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77bd85b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 1aed889..f3fef4c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -452,6 +452,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                   <include>datatransfer.proto</include>
                   <include>fsimage.proto</include>
                   <include>hdfs.proto</include>
+                  <include>encryption.proto</include>
                 </includes>
               </source>
               <output>${project.build.directory}/generated-sources/java</output>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77bd85b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 2d6ecf2..5fbb3db 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -27,7 +27,7 @@
 
 bin=`which $0`
 bin=`dirname ${bin}`
-bin=`cd "$bin"; pwd`
+bin=`cd "$bin" > /dev/null; pwd`
 
 DEFAULT_LIBEXEC_DIR="$bin"/../libexec
 HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
@@ -61,6 +61,7 @@ function print_usage(){
   echo "  portmap              run a portmap service"
   echo "  nfs3                 run an NFS version 3 gateway"
   echo "  cacheadmin           configure the HDFS cache"
+  echo "  crypto               configure HDFS encryption zones"
   echo ""
   echo "Most commands print help when invoked w/o parameters."
 }
@@ -184,6 +185,8 @@ elif [ "$COMMAND" = "nfs3" ] ; then
   HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NFS3_OPTS"
 elif [ "$COMMAND" = "cacheadmin" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
+elif [ "$COMMAND" = "crypto" ] ; then
+  CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
 else
   CLASS="$COMMAND"
 fi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77bd85b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
index a0e75f8..111630c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.fs;
 
-
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
@@ -31,6 +30,7 @@ import java.util.NoSuchElementException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CryptoCodec;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -38,6 +38,8 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSInputStream;
+import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
@@ -59,6 +61,7 @@ import org.apache.hadoop.util.Progressable;
 public class Hdfs extends AbstractFileSystem {
 
   DFSClient dfs;
+  final CryptoCodec factory;
   private boolean verifyChecksum = true;
 
   static {
@@ -85,6 +88,7 @@ public class Hdfs extends AbstractFileSystem {
     }
 
     this.dfs = new DFSClient(theUri, conf, getStatistics());
+    this.factory = CryptoCodec.getInstance(conf);
   }
 
   @Override
@@ -97,9 +101,12 @@ public class Hdfs extends AbstractFileSystem {
       EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
       int bufferSize, short replication, long blockSize, Progressable progress,
       ChecksumOpt checksumOpt, boolean createParent) throws IOException {
-    return new HdfsDataOutputStream(dfs.primitiveCreate(getUriPath(f),
-        absolutePermission, createFlag, createParent, replication, blockSize,
-        progress, bufferSize, checksumOpt), getStatistics());
+
+    final DFSOutputStream dfsos = dfs.primitiveCreate(getUriPath(f),
+      absolutePermission, createFlag, createParent, replication, blockSize,
+      progress, bufferSize, checksumOpt);
+    return dfs.createWrappedOutputStream(dfsos, statistics,
+        dfsos.getInitialLen());
   }
 
   @Override
@@ -308,8 +315,9 @@ public class Hdfs extends AbstractFileSystem {
   @Override
   public HdfsDataInputStream open(Path f, int bufferSize) 
       throws IOException, UnresolvedLinkException {
-    return new DFSClient.DFSDataInputStream(dfs.open(getUriPath(f),
-        bufferSize, verifyChecksum));
+    final DFSInputStream dfsis = dfs.open(getUriPath(f),
+      bufferSize, verifyChecksum);
+    return dfs.createWrappedInputStream(dfsis);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77bd85b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
index 99f629a..968ee00 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
@@ -26,8 +26,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 /**
  * XAttr is the POSIX Extended Attribute model similar to that found in
  * traditional Operating Systems.  Extended Attributes consist of one
- * or more name/value pairs associated with a file or directory. Four
- * namespaces are defined: user, trusted, security and system.
+ * or more name/value pairs associated with a file or directory. Five
+ * namespaces are defined: user, trusted, security, system and raw.
  *   1) USER namespace attributes may be used by any user to store
  *   arbitrary information. Access permissions in this namespace are
  *   defined by a file directory's permission bits. For sticky directories,
@@ -43,6 +43,12 @@ import org.apache.hadoop.classification.InterfaceAudience;
  * <br>
  *   4) SECURITY namespace attributes are used by the fs kernel for
  *   security features. It is not visible to users.
+ * <br>
+ *   5) RAW namespace attributes are used for internal system attributes that
+ *   sometimes need to be exposed. Like SYSTEM namespace attributes they are
+ *   not visible to the user except when getXAttr/getXAttrs is called on a file
+ *   or directory in the /.reserved/raw HDFS directory hierarchy.  These
+ *   attributes can only be accessed by the superuser.
  * <p/>
  * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
  * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
@@ -55,7 +61,8 @@ public class XAttr {
     USER,
     TRUSTED,
     SECURITY,
-    SYSTEM;
+    SYSTEM,
+    RAW;
   }
   
   private final NameSpace ns;


Mime
View raw message