hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1581691 - in /hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ src/main/java/org/apache/hadoop/hdfs/server/balancer/ src...
Date Wed, 26 Mar 2014 04:15:19 GMT
Author: arp
Date: Wed Mar 26 04:15:18 2014
New Revision: 1581691

URL: http://svn.apache.org/r1581691
Log:
HDFS-5190: Merging 1581690 from branch-2 to branch-2.4.

Added:
    hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java
      - copied unchanged from r1581690, hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java
Modified:
    hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
    hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
    hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
    hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1581691&r1=1581690&r2=1581691&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Mar
26 04:15:18 2014
@@ -176,6 +176,9 @@ Release 2.4.0 - UNRELEASED
     HDFS-6124. Add final modifier to class members. (Suresh Srinivas via
     Arpit Agarwal)
 
+    HDFS-5910. Enhance DataTransferProtocol to allow per-connection choice
+    of encryption/plain-text. (Benoy Antony via Arpit Agarwal)
+
   OPTIMIZATIONS
 
     HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1581691&r1=1581690&r2=1581691&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
(original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
Wed Mar 26 04:15:18 2014
@@ -155,6 +155,7 @@ import org.apache.hadoop.hdfs.protocol.d
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
 import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
+import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
@@ -230,6 +231,7 @@ public class DFSClient implements java.i
   private final Random r = new Random();
   private SocketAddress[] localInterfaceAddrs;
   private DataEncryptionKey encryptionKey;
+  final TrustedChannelResolver trustedChannelResolver;
   private final CachingStrategy defaultReadCachingStrategy;
   private final CachingStrategy defaultWriteCachingStrategy;
   private final ClientContext clientContext;
@@ -611,6 +613,7 @@ public class DFSClient implements java.i
     if (numThreads > 0) {
       this.initThreadsNumForHedgedReads(numThreads);
     }
+    this.trustedChannelResolver = TrustedChannelResolver.getInstance(getConfiguration());
   }
   
   /**
@@ -1831,7 +1834,8 @@ public class DFSClient implements java.i
   @InterfaceAudience.Private
   public DataEncryptionKey getDataEncryptionKey()
       throws IOException {
-    if (shouldEncryptData()) {
+    if (shouldEncryptData() && 
+        !this.trustedChannelResolver.isTrusted()) {
       synchronized (this) {
         if (encryptionKey == null ||
             encryptionKey.expiryDate < Time.now()) {

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1581691&r1=1581690&r2=1581691&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
(original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
Wed Mar 26 04:15:18 2014
@@ -538,6 +538,7 @@ public class DFSConfigKeys extends Commo
   public static final String DFS_ENCRYPT_DATA_TRANSFER_KEY = "dfs.encrypt.data.transfer";
   public static final boolean DFS_ENCRYPT_DATA_TRANSFER_DEFAULT = false;
   public static final String DFS_DATA_ENCRYPTION_ALGORITHM_KEY = "dfs.encrypt.data.transfer.algorithm";
+  public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class";
   
   // Journal-node related configs. These are read on the JN side.
   public static final String  DFS_JOURNALNODE_EDITS_DIR_KEY = "dfs.journalnode.edits.dir";

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1581691&r1=1581690&r2=1581691&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
(original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
Wed Mar 26 04:15:18 2014
@@ -1043,7 +1043,8 @@ public class DFSOutputStream extends FSO
         
         OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
         InputStream unbufIn = NetUtils.getInputStream(sock);
-        if (dfsClient.shouldEncryptData()) {
+        if (dfsClient.shouldEncryptData() && 
+            !dfsClient.trustedChannelResolver.isTrusted(sock.getInetAddress())) {
           IOStreamPair encryptedStreams =
               DataTransferEncryptor.getEncryptedStreams(
                   unbufOut, unbufIn, dfsClient.getDataEncryptionKey());
@@ -1319,7 +1320,8 @@ public class DFSOutputStream extends FSO
           
           OutputStream unbufOut = NetUtils.getOutputStream(s, writeTimeout);
           InputStream unbufIn = NetUtils.getInputStream(s);
-          if (dfsClient.shouldEncryptData()) {
+          if (dfsClient.shouldEncryptData()  && 
+              !dfsClient.trustedChannelResolver.isTrusted(s.getInetAddress())) {
             IOStreamPair encryptedStreams =
                 DataTransferEncryptor.getEncryptedStreams(unbufOut,
                     unbufIn, dfsClient.getDataEncryptionKey());

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java?rev=1581691&r1=1581690&r2=1581691&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
(original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
Wed Mar 26 04:15:18 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.A
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
+import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
@@ -71,6 +72,7 @@ class NameNodeConnector {
   private BlockTokenSecretManager blockTokenSecretManager;
   private Daemon keyupdaterthread; // AccessKeyUpdater thread
   private DataEncryptionKey encryptionKey;
+  private final TrustedChannelResolver trustedChannelResolver;
 
   NameNodeConnector(URI nameNodeUri,
       Configuration conf) throws IOException {
@@ -120,6 +122,7 @@ class NameNodeConnector {
     if (out == null) {
       throw new IOException("Another balancer is running");
     }
+    this.trustedChannelResolver = TrustedChannelResolver.getInstance(conf);
   }
 
   boolean shouldContinue(long dispatchBlockMoveBytes) {
@@ -154,7 +157,7 @@ class NameNodeConnector {
   
   DataEncryptionKey getDataEncryptionKey()
       throws IOException {
-    if (encryptDataTransfer) {
+    if (encryptDataTransfer && !this.trustedChannelResolver.isTrusted()) {
       synchronized (this) {
         if (encryptionKey == null) {
           encryptionKey = blockTokenSecretManager.generateDataEncryptionKey();

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java?rev=1581691&r1=1581690&r2=1581691&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
(original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
Wed Mar 26 04:15:18 2014
@@ -51,6 +51,7 @@ import static org.apache.hadoop.hdfs.DFS
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 
 /**
@@ -82,6 +83,7 @@ public class DNConf {
   
   final String minimumNameNodeVersion;
   final String encryptionAlgorithm;
+  final TrustedChannelResolver trustedChannelResolver;
   
   final long xceiverStopTimeout;
   final long restartReplicaExpiry;
@@ -152,6 +154,7 @@ public class DNConf {
     this.encryptDataTransfer = conf.getBoolean(DFS_ENCRYPT_DATA_TRANSFER_KEY,
         DFS_ENCRYPT_DATA_TRANSFER_DEFAULT);
     this.encryptionAlgorithm = conf.get(DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
+    this.trustedChannelResolver = TrustedChannelResolver.getInstance(conf);
     
     this.xceiverStopTimeout = conf.getLong(
         DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY,

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1581691&r1=1581690&r2=1581691&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
(original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
Wed Mar 26 04:15:18 2014
@@ -1616,7 +1616,8 @@ public class DataNode extends Configured
                             HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1);
         OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
         InputStream unbufIn = NetUtils.getInputStream(sock);
-        if (dnConf.encryptDataTransfer) {
+        if (dnConf.encryptDataTransfer && 
+            !dnConf.trustedChannelResolver.isTrusted(sock.getInetAddress())) {
           IOStreamPair encryptedStreams =
               DataTransferEncryptor.getEncryptedStreams(
                   unbufOut, unbufIn,

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1581691&r1=1581690&r2=1581691&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
(original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
Wed Mar 26 04:15:18 2014
@@ -36,9 +36,11 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.InterruptedIOException;
 import java.io.OutputStream;
+import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.net.SocketException;
+import java.net.UnknownHostException;
 import java.nio.channels.ClosedChannelException;
 import java.util.Arrays;
 
@@ -81,6 +83,7 @@ import org.apache.hadoop.security.token.
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
 
+import com.google.common.net.InetAddresses;
 import com.google.protobuf.ByteString;
 
 
@@ -169,7 +172,8 @@ class DataXceiver extends Receiver imple
       dataXceiverServer.addPeer(peer, Thread.currentThread());
       peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
       InputStream input = socketIn;
-      if ((!peer.hasSecureChannel()) && dnConf.encryptDataTransfer) {
+      if ((!peer.hasSecureChannel()) && dnConf.encryptDataTransfer &&
+          !dnConf.trustedChannelResolver.isTrusted(getClientAddress(peer))){
         IOStreamPair encryptedStreams = null;
         try {
           encryptedStreams = DataTransferEncryptor.getEncryptedStreams(socketOut,
@@ -257,6 +261,19 @@ class DataXceiver extends Receiver imple
       }
     }
   }
+  
+  /**
+   * Returns InetAddress from peer
+   * The getRemoteAddressString is the form  /ip-address:port
+   * The ip-address is extracted from peer and InetAddress is formed
+   * @param peer
+   * @return
+   * @throws UnknownHostException
+   */
+  private static InetAddress getClientAddress(Peer peer) {
+    return InetAddresses.forString(
+        peer.getRemoteAddressString().split(":")[0].substring(1));
+  }
 
   @Override
   public void requestShortCircuitFds(final ExtendedBlock blk,
@@ -637,7 +654,8 @@ class DataXceiver extends Receiver imple
           OutputStream unbufMirrorOut = NetUtils.getOutputStream(mirrorSock,
               writeTimeout);
           InputStream unbufMirrorIn = NetUtils.getInputStream(mirrorSock);
-          if (dnConf.encryptDataTransfer) {
+          if (dnConf.encryptDataTransfer &&
+              !dnConf.trustedChannelResolver.isTrusted(mirrorSock.getInetAddress())) {
             IOStreamPair encryptedStreams =
                 DataTransferEncryptor.getEncryptedStreams(
                     unbufMirrorOut, unbufMirrorIn,
@@ -963,7 +981,9 @@ class DataXceiver extends Receiver imple
       OutputStream unbufProxyOut = NetUtils.getOutputStream(proxySock,
           dnConf.socketWriteTimeout);
       InputStream unbufProxyIn = NetUtils.getInputStream(proxySock);
-      if (dnConf.encryptDataTransfer) {
+      if (dnConf.encryptDataTransfer && 
+          !dnConf.trustedChannelResolver.isTrusted(
+              proxySock.getInetAddress())) {
         IOStreamPair encryptedStreams =
             DataTransferEncryptor.getEncryptedStreams(
                 unbufProxyOut, unbufProxyIn,

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1581691&r1=1581690&r2=1581691&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
(original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
Wed Mar 26 04:15:18 2014
@@ -1338,7 +1338,8 @@
   <description>
     Whether or not actual block data that is read/written from/to HDFS should
     be encrypted on the wire. This only needs to be set on the NN and DNs,
-    clients will deduce this automatically.
+    clients will deduce this automatically. It is possible to override this setting 
+    per connection by specifying custom logic via dfs.trustedchannel.resolver.class. 
   </description>
 </property>
 
@@ -1354,6 +1355,20 @@
 </property>
 
 <property>
+  <name>dfs.trustedchannel.resolver.class</name>
+  <value></value>
+  <description>
+      TrustedChannelResolver is used to determine whether a channel 
+      is trusted for plain data transfer. The TrustedChannelResolver is
+      invoked on both client and server side. If the resolver indicates 
+      that the channel is trusted, then the data transfer will not be 
+      encrypted even if dfs.encrypt.data.transfer is set to true. The
+      default implementation returns false indicating that the channel 
+      is not trusted.
+  </description>
+</property>
+
+<property>
   <name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
   <value>false</value>
   <description>

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java?rev=1581691&r1=1581690&r2=1581691&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
(original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
Wed Mar 26 04:15:18 2014
@@ -23,6 +23,9 @@ import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.io.OutputStream;
+import java.net.InetAddress;
+import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -33,24 +36,40 @@ import org.apache.hadoop.fs.FileChecksum
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
 import org.mockito.Mockito;
 
+@RunWith(Parameterized.class)
 public class TestEncryptedTransfer {
   
+  @Parameters
+  public static Collection<Object[]> data() {
+    Collection<Object[]> params = new ArrayList<Object[]>();
+    params.add(new Object[]{null});
+    params.add(new Object[]{"org.apache.hadoop.hdfs.TestEncryptedTransfer$TestTrustedChannelResolver"});
+    return params;
+  }
+  
   private static final Log LOG = LogFactory.getLog(TestEncryptedTransfer.class);
   
   private static final String PLAIN_TEXT = "this is very secret plain text";
   private static final Path TEST_PATH = new Path("/non-encrypted-file");
   
-  private static void setEncryptionConfigKeys(Configuration conf) {
+  private void setEncryptionConfigKeys(Configuration conf) {
     conf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
+    if (resolverClazz != null){
+      conf.set(DFSConfigKeys.DFS_TRUSTEDCHANNEL_RESOLVER_CLASS, resolverClazz);
+    }
   }
   
   // Unset DFS_ENCRYPT_DATA_TRANSFER_KEY and DFS_DATA_ENCRYPTION_ALGORITHM_KEY
@@ -62,6 +81,11 @@ public class TestEncryptedTransfer {
     localConf.unset(DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
     return FileSystem.get(localConf);
   }
+  
+  String resolverClazz;
+  public TestEncryptedTransfer(String resolverClazz){
+    this.resolverClazz = resolverClazz;
+  }
 
   @Test
   public void testEncryptedRead() throws IOException {
@@ -206,7 +230,9 @@ public class TestEncryptedTransfer {
           LogFactory.getLog(DataNode.class));
       try {
         assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
-        fail("Should not have been able to read without encryption enabled.");
+        if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){
+          fail("Should not have been able to read without encryption enabled.");
+        }
       } catch (IOException ioe) {
         GenericTestUtils.assertExceptionContains("Could not obtain block:",
             ioe);
@@ -215,8 +241,10 @@ public class TestEncryptedTransfer {
       }
       fs.close();
       
-      GenericTestUtils.assertMatches(logs.getOutput(),
-          "Failed to read expected encryption handshake from client at");
+      if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){
+        GenericTestUtils.assertMatches(logs.getOutput(),
+        "Failed to read expected encryption handshake from client at");
+      }
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -456,4 +484,16 @@ public class TestEncryptedTransfer {
     out.write(PLAIN_TEXT.getBytes());
     out.close();
   }
+  
+  static class TestTrustedChannelResolver extends TrustedChannelResolver {
+    
+    public boolean isTrusted(){
+      return true;
+    }
+
+    public boolean isTrusted(InetAddress peerAddress){
+      return true;
+    }
+  }
+  
 }



Mime
View raw message