Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 92F3211B22 for ; Tue, 15 Jul 2014 21:11:13 +0000 (UTC) Received: (qmail 27633 invoked by uid 500); 15 Jul 2014 21:11:13 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 27583 invoked by uid 500); 15 Jul 2014 21:11:13 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 27572 invoked by uid 99); 15 Jul 2014 21:11:13 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 15 Jul 2014 21:11:13 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 15 Jul 2014 21:11:00 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 2A94423889E1; Tue, 15 Jul 2014 21:10:33 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1610853 [1/3] - in /hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs: ./ src/contrib/bkjournal/ src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/ src/contrib/bkjournal/src/test/java/org/apache/had... Date: Tue, 15 Jul 2014 21:10:29 -0000 To: hdfs-commits@hadoop.apache.org From: wang@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140715211033.2A94423889E1@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: wang Date: Tue Jul 15 21:10:24 2014 New Revision: 1610853 URL: http://svn.apache.org/r1610853 Log: Merge from trunk to branch Added: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/ - copied from r1610850, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java - copied unchanged from r1610850, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java - copied unchanged from r1610850, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/ - copied from r1610850, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithSaslDataTransfer.java - copied unchanged from r1610850, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithSaslDataTransfer.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestUpdatePipelineWithSnapshots.java - copied unchanged from r1610850, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestUpdatePipelineWithSnapshots.java Removed: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferEncryptor.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/pom.xml hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/BKJMUtil.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed) hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java Propchange: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1608601-1610850 Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Jul 15 21:10:24 2014 @@ -53,8 +53,6 @@ Trunk (Unreleased) HDFS-3030. Remove getProtocolVersion and getProtocolSignature from translators. (jitendra) - HDFS-2976. Remove unnecessary method (tokenRefetchNeeded) in DFSClient. - HDFS-3111. Missing license headers in trunk. (umamahesh) HDFS-3091. Update the usage limitations of ReplaceDatanodeOnFailure policy in @@ -95,8 +93,6 @@ Trunk (Unreleased) HDFS-3768. Exception in TestJettyHelper is incorrect. (Eli Reisman via jghoman) - HDFS-3851. DFSOutputStream class code cleanup. (Jing Zhao via suresh) - HDFS-2580. NameNode#main(...) can make use of GenericOptionsParser. (harsh) HDFS-2127. Add a test that ensure AccessControlExceptions contain @@ -129,6 +125,9 @@ Trunk (Unreleased) HDFS-6252. Phase out the old web UI in HDFS. (wheat9) + HDFS-6609. Use DirectorySnapshottableFeature to represent a snapshottable + directory. (Jing Zhao via wheat9) + OPTIMIZATIONS BUG FIXES @@ -197,9 +196,6 @@ Trunk (Unreleased) HDFS-3834. Remove unused static fields NAME, DESCRIPTION and Usage from Command. (Jing Zhao via suresh) - HADOOP-8158. Interrupting hadoop fs -put from the command line - causes a LeaseExpiredException. (daryn via harsh) - HDFS-2434. TestNameNodeMetrics.testCorruptBlock fails intermittently. (Jing Zhao via suresh) @@ -266,6 +262,31 @@ Release 2.6.0 - UNRELEASED HDFS-6511. BlockManager#computeInvalidateWork() could do nothing. (Juan Yu via wang) + HDFS-6638. Shorten test run time with a smaller retry timeout setting. + (Liang Xie via cnauroth) + + HDFS-6627. Rename DataNode#checkWriteAccess to checkReadAccess. + (Liang Xie via cnauroth) + + HDFS-6645. Add test for successive Snapshots between XAttr modifications. + (Stephen Chu via jing9) + + HDFS-6643. Refactor INodeWithAdditionalFields.PermissionStatusFormat and + INodeFile.HeaderFormat. (szetszwo) + + HDFS-6640. Syntax for MKDIRS, CREATESYMLINK, and SETXATTR are given wrongly + in WebHdfs document (missed webhdfs/v1). (Stephen Chu via jing9) + + HDFS-5202. Support Centralized Cache Management on Windows. (cnauroth) + + HDFS-2976. Remove unnecessary method (tokenRefetchNeeded) in DFSClient. + (Uma Maheswara Rao G) + + HDFS-3851. DFSOutputStream class code cleanup. (Jing Zhao via suresh) + + HDFS-2856. Fix block protocol so that Datanodes don't require root or jsvc. + (cnauroth) + OPTIMIZATIONS BUG FIXES @@ -273,6 +294,24 @@ Release 2.6.0 - UNRELEASED HDFS-6617. Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin due to a long edit log sync op. (Liang Xie via cnauroth) + HDFS-6646. [ HDFS Rolling Upgrade - Shell ] shutdownDatanode and getDatanodeInfo + usage is missed ( Brahma Reddy Battula via vinayakumarb) + + HDFS-6630. Unable to fetch the block information by Browsing the file system on + Namenode UI through IE9 ( Haohui Mai via vinayakumarb) + + HADOOP-8158. Interrupting hadoop fs -put from the command line + causes a LeaseExpiredException. (daryn via harsh) + + HDFS-6678. MiniDFSCluster may still be partially running after initialization + fails. (cnauroth) + + HDFS-5809. BlockPoolSliceScanner and high speed hdfs appending make + datanode to drop into infinite loop (cmccabe) + + HDFS-6456. NFS should throw error for invalid entry in + dfs.nfs.exports.allowed.hosts (Abhiraj Butala via brandonli) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -781,6 +820,23 @@ Release 2.5.0 - UNRELEASED HDFS-6604. The short-circuit cache doesn't correctly time out replicas that haven't been used in a while (cmccabe) + HDFS-4286. Changes from BOOKKEEPER-203 broken capability of including + bookkeeper-server jar in hidden package of BKJM (Rakesh R via umamahesh) + + HDFS-4221. Remove the format limitation point from BKJM documentation as HDFS-3810 + closed. (Rakesh R via umamahesh) + + HDFS-5411. Update Bookkeeper dependency to 4.2.3. (Rakesh R via umamahesh) + + HDFS-6631. TestPread#testHedgedReadLoopTooManyTimes fails intermittently. + (Liang Xie via cnauroth) + + HDFS-6647. Edit log corruption when pipeline recovery occurs for deleted + file present in snapshot (kihwal) + + HDFS-6378. NFS registration should timeout instead of hanging when + portmap/rpcbind is not available (Abhiraj Butala via brandonli) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) @@ -853,6 +909,12 @@ Release 2.5.0 - UNRELEASED HDFS-6312. WebHdfs HA failover is broken on secure clusters. (daryn via tucu) + HDFS-6618. FSNamesystem#delete drops the FSN lock between removing INodes + from the tree and deleting them from the inode map (kihwal via cmccabe) + + HDFS-6622. Rename and AddBlock may race and produce invalid edits (kihwal + via cmccabe) + Release 2.4.1 - 2014-06-23 INCOMPATIBLE CHANGES Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/pom.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/pom.xml Tue Jul 15 21:10:24 2014 @@ -142,6 +142,11 @@ http://maven.apache.org/xsd/maven-4.0.0. test + org.apache.hadoop + hadoop-minikdc + test + + org.mockito mockito-all test Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml Tue Jul 15 21:10:24 2014 @@ -163,38 +163,24 @@ http://maven.apache.org/xsd/maven-4.0.0. - org.apache.maven.plugins - maven-shade-plugin - 1.5 + maven-dependency-plugin + 2.8 + dist package - shade + copy - false - - - org.apache.bookkeeper:bookkeeper-server - org.apache.zookeeper:zookeeper - org.jboss.netty:netty - - - - - org.apache.bookkeeper - hidden.bkjournal.org.apache.bookkeeper - - - org.apache.zookeeper - hidden.bkjournal.org.apache.zookeeper - - - org.jboss.netty - hidden.bkjournal.org.jboss.netty - - + + + org.apache.bookkeeper + bookkeeper-server + jar + + + ${project.build.directory}/lib Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java Tue Jul 15 21:10:24 2014 @@ -237,7 +237,7 @@ public class BookKeeperJournalManager im zkPathLatch.countDown(); } }; - ZkUtils.createFullPathOptimistic(zkc, zkAvailablePath, new byte[0], + ZkUtils.asyncCreateFullPathOptimistic(zkc, zkAvailablePath, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, callback, null); try { Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/BKJMUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/BKJMUtil.java?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/BKJMUtil.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/BKJMUtil.java Tue Jul 15 21:10:24 2014 @@ -149,13 +149,16 @@ class BKJMUtil { int checkBookiesUp(int count, int timeout) throws Exception { ZooKeeper zkc = connectZooKeeper(); try { - boolean up = false; int mostRecentSize = 0; for (int i = 0; i < timeout; i++) { try { List children = zkc.getChildren("/ledgers/available", false); mostRecentSize = children.size(); + // Skip 'readonly znode' which is used for keeping R-O bookie details + if (children.contains("readonly")) { + mostRecentSize = children.size() - 1; + } if (LOG.isDebugEnabled()) { LOG.debug("Found " + mostRecentSize + " bookies up, " + "waiting for " + count); @@ -166,7 +169,6 @@ class BKJMUtil { } } if (mostRecentSize == count) { - up = true; break; } } catch (KeeperException e) { Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd Tue Jul 15 21:10:24 2014 @@ -47,7 +47,7 @@ if "%1" == "--config" ( goto print_usage ) - set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir + set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin for %%i in ( %hdfscommands% ) do ( if %hdfs-command% == %%i set hdfscommand=true ) @@ -146,6 +146,10 @@ goto :eof set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir goto :eof +:cacheadmin + set CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin + goto :eof + @rem This changes %1, %2 etc. Hence those cannot be used after calling this. :make_command_arguments if "%1" == "--config" ( @@ -193,6 +197,7 @@ goto :eof @echo current directory contents with a snapshot @echo lsSnapshottableDir list all snapshottable dirs owned by the current user @echo Use -help to see options + @echo cacheadmin configure the HDFS cache @echo. @echo Most commands print help when invoked w/o parameters. Propchange: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1608601-1610850 Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java Tue Jul 15 21:10:24 2014 @@ -744,7 +744,8 @@ public class BlockReaderFactory implemen } } try { - Peer peer = remotePeerFactory.newConnectedPeer(inetSocketAddress); + Peer peer = remotePeerFactory.newConnectedPeer(inetSocketAddress, token, + datanode); if (LOG.isTraceEnabled()) { LOG.trace("nextTcpPeer: created newConnectedPeer " + peer); } Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Tue Jul 15 21:10:24 2014 @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; @@ -140,6 +142,7 @@ import org.apache.hadoop.hdfs.protocol.C import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; +import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.EncryptionZone; @@ -158,16 +161,19 @@ import org.apache.hadoop.hdfs.protocol.S import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; -import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.Op; import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; +import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; +import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil; +import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.protocolPB.PBHelper; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; @@ -214,7 +220,8 @@ import com.google.common.net.InetAddress * ********************************************************/ @InterfaceAudience.Private -public class DFSClient implements java.io.Closeable, RemotePeerFactory { +public class DFSClient implements java.io.Closeable, RemotePeerFactory, + DataEncryptionKeyFactory { public static final Log LOG = LogFactory.getLog(DFSClient.class); public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB @@ -238,7 +245,7 @@ public class DFSClient implements java.i private final Random r = new Random(); private SocketAddress[] localInterfaceAddrs; private DataEncryptionKey encryptionKey; - final TrustedChannelResolver trustedChannelResolver; + final SaslDataTransferClient saslClient; private final CachingStrategy defaultReadCachingStrategy; private final CachingStrategy defaultWriteCachingStrategy; private final ClientContext clientContext; @@ -646,7 +653,12 @@ public class DFSClient implements java.i if (numThreads > 0) { this.initThreadsNumForHedgedReads(numThreads); } - this.trustedChannelResolver = TrustedChannelResolver.getInstance(getConfiguration()); + this.saslClient = new SaslDataTransferClient( + DataTransferSaslUtil.getSaslPropertiesResolver(conf), + TrustedChannelResolver.getInstance(conf), + conf.getBoolean( + IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, + IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT)); } /** @@ -1864,23 +1876,6 @@ public class DFSClient implements java.i UnresolvedPathException.class); } } - - /** - * Get the checksum of the whole file of a range of the file. Note that the - * range always starts from the beginning of the file. - * @param src The file path - * @param length The length of the range - * @return The checksum - * @see DistributedFileSystem#getFileChecksum(Path) - */ - public MD5MD5CRC32FileChecksum getFileChecksum(String src, long length) - throws IOException { - checkOpen(); - Preconditions.checkArgument(length >= 0); - return getFileChecksum(src, length, clientName, namenode, - socketFactory, dfsClientConf.socketTimeout, getDataEncryptionKey(), - dfsClientConf.connectToDnViaHostname); - } @InterfaceAudience.Private public void clearDataEncryptionKey() { @@ -1900,11 +1895,9 @@ public class DFSClient implements java.i return d == null ? false : d.getEncryptDataTransfer(); } - @InterfaceAudience.Private - public DataEncryptionKey getDataEncryptionKey() - throws IOException { - if (shouldEncryptData() && - !this.trustedChannelResolver.isTrusted()) { + @Override + public DataEncryptionKey newDataEncryptionKey() throws IOException { + if (shouldEncryptData()) { synchronized (this) { if (encryptionKey == null || encryptionKey.expiryDate < Time.now()) { @@ -1919,22 +1912,17 @@ public class DFSClient implements java.i } /** - * Get the checksum of the whole file or a range of the file. + * Get the checksum of the whole file of a range of the file. Note that the + * range always starts from the beginning of the file. * @param src The file path * @param length the length of the range, i.e., the range is [0, length] - * @param clientName the name of the client requesting the checksum. - * @param namenode the RPC proxy for the namenode - * @param socketFactory to create sockets to connect to DNs - * @param socketTimeout timeout to use when connecting and waiting for a response - * @param encryptionKey the key needed to communicate with DNs in this cluster - * @param connectToDnViaHostname whether the client should use hostnames instead of IPs * @return The checksum + * @see DistributedFileSystem#getFileChecksum(Path) */ - private static MD5MD5CRC32FileChecksum getFileChecksum(String src, - long length, String clientName, ClientProtocol namenode, - SocketFactory socketFactory, int socketTimeout, - DataEncryptionKey encryptionKey, boolean connectToDnViaHostname) + public MD5MD5CRC32FileChecksum getFileChecksum(String src, long length) throws IOException { + checkOpen(); + Preconditions.checkArgument(length >= 0); //get block locations for the file range LocatedBlocks blockLocations = callGetBlockLocations(namenode, src, 0, length); @@ -1969,7 +1957,7 @@ public class DFSClient implements java.i final DatanodeInfo[] datanodes = lb.getLocations(); //try each datanode location of the block - final int timeout = 3000 * datanodes.length + socketTimeout; + final int timeout = 3000 * datanodes.length + dfsClientConf.socketTimeout; boolean done = false; for(int j = 0; !done && j < datanodes.length; j++) { DataOutputStream out = null; @@ -1977,8 +1965,7 @@ public class DFSClient implements java.i try { //connect to a datanode - IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname, - encryptionKey, datanodes[j], timeout); + IOStreamPair pair = connectToDN(datanodes[j], timeout, lb); out = new DataOutputStream(new BufferedOutputStream(pair.out, HdfsConstants.SMALL_BUFFER_SIZE)); in = new DataInputStream(pair.in); @@ -2034,9 +2021,7 @@ public class DFSClient implements java.i } else { LOG.debug("Retrieving checksum from an earlier-version DataNode: " + "inferring checksum by reading first byte"); - ct = inferChecksumTypeByReading( - clientName, socketFactory, socketTimeout, lb, datanodes[j], - encryptionKey, connectToDnViaHostname); + ct = inferChecksumTypeByReading(lb, datanodes[j]); } if (i == 0) { // first block @@ -2110,16 +2095,13 @@ public class DFSClient implements java.i * Connect to the given datanode's datantrasfer port, and return * the resulting IOStreamPair. This includes encryption wrapping, etc. */ - private static IOStreamPair connectToDN( - SocketFactory socketFactory, boolean connectToDnViaHostname, - DataEncryptionKey encryptionKey, DatanodeInfo dn, int timeout) - throws IOException - { + private IOStreamPair connectToDN(DatanodeInfo dn, int timeout, + LocatedBlock lb) throws IOException { boolean success = false; Socket sock = null; try { sock = socketFactory.createSocket(); - String dnAddr = dn.getXferAddr(connectToDnViaHostname); + String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname); if (LOG.isDebugEnabled()) { LOG.debug("Connecting to datanode " + dnAddr); } @@ -2128,13 +2110,8 @@ public class DFSClient implements java.i OutputStream unbufOut = NetUtils.getOutputStream(sock); InputStream unbufIn = NetUtils.getInputStream(sock); - IOStreamPair ret; - if (encryptionKey != null) { - ret = DataTransferEncryptor.getEncryptedStreams( - unbufOut, unbufIn, encryptionKey); - } else { - ret = new IOStreamPair(unbufIn, unbufOut); - } + IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this, + lb.getBlockToken(), dn); success = true; return ret; } finally { @@ -2150,21 +2127,14 @@ public class DFSClient implements java.i * with older HDFS versions which did not include the checksum type in * OpBlockChecksumResponseProto. * - * @param in input stream from datanode - * @param out output stream to datanode * @param lb the located block - * @param clientName the name of the DFSClient requesting the checksum * @param dn the connected datanode * @return the inferred checksum type * @throws IOException if an error occurs */ - private static Type inferChecksumTypeByReading( - String clientName, SocketFactory socketFactory, int socketTimeout, - LocatedBlock lb, DatanodeInfo dn, - DataEncryptionKey encryptionKey, boolean connectToDnViaHostname) + private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn) throws IOException { - IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname, - encryptionKey, dn, socketTimeout); + IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb); try { DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out, @@ -2938,7 +2908,9 @@ public class DFSClient implements java.i } @Override // RemotePeerFactory - public Peer newConnectedPeer(InetSocketAddress addr) throws IOException { + public Peer newConnectedPeer(InetSocketAddress addr, + Token blockToken, DatanodeID datanodeId) + throws IOException { Peer peer = null; boolean success = false; Socket sock = null; @@ -2947,8 +2919,8 @@ public class DFSClient implements java.i NetUtils.connect(sock, addr, getRandomLocalInterfaceAddr(), dfsClientConf.socketTimeout); - peer = TcpPeerServer.peerFromSocketAndKey(sock, - getDataEncryptionKey()); + peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this, + blockToken, datanodeId); success = true; return peer; } finally { Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java Tue Jul 15 21:10:24 2014 @@ -52,4 +52,6 @@ public class DFSClientFaultInjector { public void startFetchFromDatanode() {} public void fetchFromDatanodeException() {} + + public void readFromDatanodeDelay() {} } Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Tue Jul 15 21:10:24 2014 @@ -561,6 +561,8 @@ public class DFSConfigKeys extends Commo public static final boolean DFS_ENCRYPT_DATA_TRANSFER_DEFAULT = false; public static final String DFS_DATA_ENCRYPTION_ALGORITHM_KEY = "dfs.encrypt.data.transfer.algorithm"; public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class"; + public static final String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection"; + public static final String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY = "dfs.data.transfer.saslproperties.resolver.class"; public static final String DFS_NAMENODE_KEY_VERSION_REFRESH_INTERVAL_MS_KEY = "dfs.namenode.key.version.refresh.interval.ms"; public static final int DFS_NAMENODE_KEY_VERSION_REFRESH_INTERVAL_MS_DEFAULT = 5*60*1000; Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java Tue Jul 15 21:10:24 2014 @@ -1048,6 +1048,7 @@ implements ByteBufferReadable, CanSetDro throw new IOException("truncated return from reader.read(): " + "excpected " + len + ", got " + nread); } + DFSClientFaultInjector.get().readFromDatanodeDelay(); return; } catch (ChecksumException e) { String msg = "fetchBlockByteRange(). Got a checksum exception for " Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java Tue Jul 15 21:10:24 2014 @@ -63,7 +63,6 @@ import org.apache.hadoop.hdfs.protocol.N import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; -import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException; @@ -1050,14 +1049,10 @@ public class DFSOutputStream extends FSO OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout); InputStream unbufIn = NetUtils.getInputStream(sock); - if (dfsClient.shouldEncryptData() && - !dfsClient.trustedChannelResolver.isTrusted(sock.getInetAddress())) { - IOStreamPair encryptedStreams = - DataTransferEncryptor.getEncryptedStreams( - unbufOut, unbufIn, dfsClient.getDataEncryptionKey()); - unbufOut = encryptedStreams.out; - unbufIn = encryptedStreams.in; - } + IOStreamPair saslStreams = dfsClient.saslClient.socketSend(sock, + unbufOut, unbufIn, dfsClient, blockToken, src); + unbufOut = saslStreams.out; + unbufIn = saslStreams.in; out = new DataOutputStream(new BufferedOutputStream(unbufOut, HdfsConstants.SMALL_BUFFER_SIZE)); in = new DataInputStream(unbufIn); @@ -1328,14 +1323,10 @@ public class DFSOutputStream extends FSO OutputStream unbufOut = NetUtils.getOutputStream(s, writeTimeout); InputStream unbufIn = NetUtils.getInputStream(s); - if (dfsClient.shouldEncryptData() && - !dfsClient.trustedChannelResolver.isTrusted(s.getInetAddress())) { - IOStreamPair encryptedStreams = - DataTransferEncryptor.getEncryptedStreams(unbufOut, - unbufIn, dfsClient.getDataEncryptionKey()); - unbufOut = encryptedStreams.out; - unbufIn = encryptedStreams.in; - } + IOStreamPair saslStreams = dfsClient.saslClient.socketSend(s, + unbufOut, unbufIn, dfsClient, accessToken, nodes[0]); + unbufOut = saslStreams.out; + unbufIn = saslStreams.in; out = new DataOutputStream(new BufferedOutputStream(unbufOut, HdfsConstants.SMALL_BUFFER_SIZE)); blockReplyStream = new DataInputStream(unbufIn); Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java Tue Jul 15 21:10:24 2014 @@ -21,15 +21,21 @@ import java.io.IOException; import java.net.InetSocketAddress; import org.apache.hadoop.hdfs.net.Peer; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; +import org.apache.hadoop.security.token.Token; public interface RemotePeerFactory { /** * @param addr The address to connect to. - * + * @param blockToken Token used during optional SASL negotiation + * @param datanodeId ID of destination DataNode * @return A new Peer connected to the address. * * @throws IOException If there was an error connecting or creating * the remote socket, encrypted stream, etc. */ - Peer newConnectedPeer(InetSocketAddress addr) throws IOException; + Peer newConnectedPeer(InetSocketAddress addr, + Token blockToken, DatanodeID datanodeId) + throws IOException; } Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java Tue Jul 15 21:10:24 2014 @@ -19,9 +19,7 @@ package org.apache.hadoop.hdfs.net; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; -import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.net.unix.DomainSocket; import java.io.InputStream; @@ -51,11 +49,8 @@ public class EncryptedPeer implements Pe */ private final ReadableByteChannel channel; - public EncryptedPeer(Peer enclosedPeer, DataEncryptionKey key) - throws IOException { + public EncryptedPeer(Peer enclosedPeer, IOStreamPair ios) { this.enclosedPeer = enclosedPeer; - IOStreamPair ios = DataTransferEncryptor.getEncryptedStreams( - enclosedPeer.getOutputStream(), enclosedPeer.getInputStream(), key); this.in = ios.in; this.out = ios.out; this.channel = ios.in instanceof ReadableByteChannel ? Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java Tue Jul 15 21:10:24 2014 @@ -28,10 +28,14 @@ import java.nio.channels.SocketChannel; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; +import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.security.token.Token; @InterfaceAudience.Private public class TcpPeerServer implements PeerServer { @@ -74,15 +78,16 @@ public class TcpPeerServer implements Pe } } - public static Peer peerFromSocketAndKey(Socket s, - DataEncryptionKey key) throws IOException { + public static Peer peerFromSocketAndKey( + SaslDataTransferClient saslClient, Socket s, + DataEncryptionKeyFactory keyFactory, + Token blockToken, DatanodeID datanodeId) + throws IOException { Peer peer = null; boolean success = false; try { - peer = peerFromSocket(s); - if (key != null) { - peer = new EncryptedPeer(peer, key); - } + peer = peerFromSocket(s); + peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId); success = true; return peer; } finally { Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java Tue Jul 15 21:10:24 2014 @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.server.balancer; import static com.google.common.base.Preconditions.checkArgument; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY; import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed; import java.io.BufferedInputStream; @@ -62,9 +64,11 @@ import org.apache.hadoop.hdfs.protocol.D import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; +import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; +import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil; +import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; @@ -202,6 +206,7 @@ public class Balancer { private final NameNodeConnector nnc; private final BalancingPolicy policy; + private final SaslDataTransferClient saslClient; private final double threshold; // all data node lists @@ -352,19 +357,18 @@ public class Balancer { OutputStream unbufOut = sock.getOutputStream(); InputStream unbufIn = sock.getInputStream(); - if (nnc.getDataEncryptionKey() != null) { - IOStreamPair encryptedStreams = - DataTransferEncryptor.getEncryptedStreams( - unbufOut, unbufIn, nnc.getDataEncryptionKey()); - unbufOut = encryptedStreams.out; - unbufIn = encryptedStreams.in; - } + ExtendedBlock eb = new ExtendedBlock(nnc.blockpoolID, block.getBlock()); + Token accessToken = nnc.getAccessToken(eb); + IOStreamPair saslStreams = saslClient.socketSend(sock, unbufOut, + unbufIn, nnc, accessToken, target.datanode); + unbufOut = saslStreams.out; + unbufIn = saslStreams.in; out = new DataOutputStream(new BufferedOutputStream(unbufOut, HdfsConstants.IO_FILE_BUFFER_SIZE)); in = new DataInputStream(new BufferedInputStream(unbufIn, HdfsConstants.IO_FILE_BUFFER_SIZE)); - sendRequest(out); + sendRequest(out, eb, accessToken); receiveResponse(in); bytesMoved.addAndGet(block.getNumBytes()); LOG.info("Successfully moved " + this); @@ -395,9 +399,8 @@ public class Balancer { } /* Send a block replace request to the output stream*/ - private void sendRequest(DataOutputStream out) throws IOException { - final ExtendedBlock eb = new ExtendedBlock(nnc.blockpoolID, block.getBlock()); - final Token accessToken = nnc.getAccessToken(eb); + private void sendRequest(DataOutputStream out, ExtendedBlock eb, + Token accessToken) throws IOException { new Sender(out).replaceBlock(eb, accessToken, source.getStorageID(), proxySource.getDatanode()); } @@ -876,6 +879,12 @@ public class Balancer { this.maxConcurrentMovesPerNode = conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT); + this.saslClient = new SaslDataTransferClient( + DataTransferSaslUtil.getSaslPropertiesResolver(conf), + TrustedChannelResolver.getInstance(conf), + conf.getBoolean( + IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, + IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT)); } /* Given a data node set, build a network topology and decide Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java Tue Jul 15 21:10:24 2014 @@ -34,8 +34,8 @@ import org.apache.hadoop.hdfs.NameNodePr import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; -import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; @@ -50,7 +50,7 @@ import org.apache.hadoop.util.Daemon; * The class provides utilities for {@link Balancer} to access a NameNode */ @InterfaceAudience.Private -class NameNodeConnector { +class NameNodeConnector implements DataEncryptionKeyFactory { private static final Log LOG = Balancer.LOG; private static final Path BALANCER_ID_PATH = new Path("/system/balancer.id"); private static final int MAX_NOT_CHANGED_ITERATIONS = 5; @@ -72,7 +72,6 @@ class NameNodeConnector { private BlockTokenSecretManager blockTokenSecretManager; private Daemon keyupdaterthread; // AccessKeyUpdater thread private DataEncryptionKey encryptionKey; - private final TrustedChannelResolver trustedChannelResolver; NameNodeConnector(URI nameNodeUri, Configuration conf) throws IOException { @@ -122,7 +121,6 @@ class NameNodeConnector { if (out == null) { throw new IOException("Another balancer is running"); } - this.trustedChannelResolver = TrustedChannelResolver.getInstance(conf); } boolean shouldContinue(long dispatchBlockMoveBytes) { @@ -154,10 +152,10 @@ class NameNodeConnector { BlockTokenSecretManager.AccessMode.COPY)); } } - - DataEncryptionKey getDataEncryptionKey() - throws IOException { - if (encryptDataTransfer && !this.trustedChannelResolver.isTrusted()) { + + @Override + public DataEncryptionKey newDataEncryptionKey() { + if (encryptDataTransfer) { synchronized (this) { if (encryptionKey == null) { encryptionKey = blockTokenSecretManager.generateDataEncryptionKey(); Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java Tue Jul 15 21:10:24 2014 @@ -310,18 +310,11 @@ class BlockPoolSliceScanner { } } - private synchronized void updateScanStatus(Block block, + private synchronized void updateScanStatus(BlockScanInfo info, ScanType type, boolean scanOk) { - BlockScanInfo info = blockMap.get(block); - - if ( info != null ) { - delBlockInfo(info); - } else { - // It might already be removed. Thats ok, it will be caught next time. - info = new BlockScanInfo(block); - } - + delBlockInfo(info); + long now = Time.monotonicNow(); info.lastScanType = type; info.lastScanTime = now; @@ -334,8 +327,8 @@ class BlockPoolSliceScanner { } if (verificationLog != null) { - verificationLog.append(now, block.getGenerationStamp(), - block.getBlockId()); + verificationLog.append(now, info.getGenerationStamp(), + info.getBlockId()); } } @@ -434,11 +427,13 @@ class BlockPoolSliceScanner { totalTransientErrors++; } - updateScanStatus(block.getLocalBlock(), ScanType.VERIFICATION_SCAN, true); + updateScanStatus((BlockScanInfo)block.getLocalBlock(), + ScanType.VERIFICATION_SCAN, true); return; } catch (IOException e) { - updateScanStatus(block.getLocalBlock(), ScanType.VERIFICATION_SCAN, false); + updateScanStatus((BlockScanInfo)block.getLocalBlock(), + ScanType.VERIFICATION_SCAN, false); // If the block does not exists anymore, then its not an error if (!dataset.contains(block)) { @@ -497,7 +492,7 @@ class BlockPoolSliceScanner { // Picks one block and verifies it private void verifyFirstBlock() { - Block block = null; + BlockScanInfo block = null; synchronized (this) { if (!blockInfoSet.isEmpty()) { block = blockInfoSet.first(); Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java Tue Jul 15 21:10:24 2014 @@ -52,7 +52,9 @@ import static org.apache.hadoop.hdfs.DFS import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; +import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; +import org.apache.hadoop.security.SaslPropertiesResolver; /** * Simple class encapsulating all of the configuration that the DataNode @@ -86,6 +88,7 @@ public class DNConf { final String minimumNameNodeVersion; final String encryptionAlgorithm; + final SaslPropertiesResolver saslPropsResolver; final TrustedChannelResolver trustedChannelResolver; final long xceiverStopTimeout; @@ -168,6 +171,8 @@ public class DNConf { DFS_ENCRYPT_DATA_TRANSFER_DEFAULT); this.encryptionAlgorithm = conf.get(DFS_DATA_ENCRYPTION_ALGORITHM_KEY); this.trustedChannelResolver = TrustedChannelResolver.getInstance(conf); + this.saslPropsResolver = DataTransferSaslUtil.getSaslPropertiesResolver( + conf); this.xceiverStopTimeout = conf.getLong( DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, @@ -186,7 +191,26 @@ public class DNConf { String getMinimumNameNodeVersion() { return this.minimumNameNodeVersion; } - + + /** + * Returns true if encryption enabled for DataTransferProtocol. + * + * @return boolean true if encryption enabled for DataTransferProtocol + */ + public boolean getEncryptDataTransfer() { + return encryptDataTransfer; + } + + /** + * Returns encryption algorithm configured for DataTransferProtocol, or null + * if not configured. + * + * @return encryption algorithm configured for DataTransferProtocol + */ + public String getEncryptionAlgorithm() { + return encryptionAlgorithm; + } + public long getXceiverStopTimeout() { return xceiverStopTimeout; } @@ -194,4 +218,24 @@ public class DNConf { public long getMaxLockedMemory() { return maxLockedMemory; } + + /** + * Returns the SaslPropertiesResolver configured for use with + * DataTransferProtocol, or null if not configured. + * + * @return SaslPropertiesResolver configured for use with DataTransferProtocol + */ + public SaslPropertiesResolver getSaslPropsResolver() { + return saslPropsResolver; + } + + /** + * Returns the TrustedChannelResolver configured for use with + * DataTransferProtocol, or null if not configured. + * + * @return TrustedChannelResolver configured for use with DataTransferProtocol + */ + public TrustedChannelResolver getTrustedChannelResolver() { + return trustedChannelResolver; + } } Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Tue Jul 15 21:10:24 2014 @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hdfs.server.datanode; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY; + import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; @@ -40,6 +43,9 @@ import org.apache.hadoop.hdfs.net.Domain import org.apache.hadoop.hdfs.net.TcpPeerServer; import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.hdfs.protocol.datatransfer.*; +import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; +import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; +import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferServer; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; @@ -224,6 +230,8 @@ public class DataNode extends Configured private final List usersWithLocalPathAccess; private final boolean connectToDnViaHostname; ReadaheadPool readaheadPool; + SaslDataTransferClient saslClient; + SaslDataTransferServer saslServer; private final boolean getHdfsBlockLocationsEnabled; private ObjectName dataNodeInfoBeanName; private Thread checkDiskErrorThread = null; @@ -722,15 +730,10 @@ public class DataNode extends Configured */ void startDataNode(Configuration conf, List dataDirs, - // DatanodeProtocol namenode, SecureResources resources ) throws IOException { - if(UserGroupInformation.isSecurityEnabled() && resources == null) { - if (!conf.getBoolean("ignore.secure.ports.for.testing", false)) { - throw new RuntimeException("Cannot start secure cluster without " - + "privileged resources."); - } - } + + checkSecureConfig(conf, resources); // settings global for all BPs in the Data Node this.secureResources = resources; @@ -745,15 +748,19 @@ public class DataNode extends Configured " size (%s) is greater than zero and native code is not available.", DFS_DATANODE_MAX_LOCKED_MEMORY_KEY)); } - long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit(); - if (dnConf.maxLockedMemory > ulimit) { - throw new RuntimeException(String.format( - "Cannot start datanode because the configured max locked memory" + - " size (%s) of %d bytes is more than the datanode's available" + - " RLIMIT_MEMLOCK ulimit of %d bytes.", - DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, - dnConf.maxLockedMemory, - ulimit)); + if (Path.WINDOWS) { + NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory); + } else { + long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit(); + if (dnConf.maxLockedMemory > ulimit) { + throw new RuntimeException(String.format( + "Cannot start datanode because the configured max locked memory" + + " size (%s) of %d bytes is more than the datanode's available" + + " RLIMIT_MEMLOCK ulimit of %d bytes.", + DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, + dnConf.maxLockedMemory, + ulimit)); + } } } LOG.info("Starting DataNode with maxLockedMemory = " + @@ -786,6 +793,55 @@ public class DataNode extends Configured // Create the ReadaheadPool from the DataNode context so we can // exit without having to explicitly shutdown its thread pool. readaheadPool = ReadaheadPool.getInstance(); + saslClient = new SaslDataTransferClient(dnConf.saslPropsResolver, + dnConf.trustedChannelResolver, + conf.getBoolean( + IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, + IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT)); + saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager); + } + + /** + * Checks if the DataNode has a secure configuration if security is enabled. + * There are 2 possible configurations that are considered secure: + * 1. The server has bound to privileged ports for RPC and HTTP via + * SecureDataNodeStarter. + * 2. The configuration enables SASL on DataTransferProtocol and HTTPS (no + * plain HTTP) for the HTTP server. The SASL handshake guarantees + * authentication of the RPC server before a client transmits a secret, such + * as a block access token. Similarly, SSL guarantees authentication of the + * HTTP server before a client transmits a secret, such as a delegation + * token. + * It is not possible to run with both privileged ports and SASL on + * DataTransferProtocol. For backwards-compatibility, the connection logic + * must check if the target port is a privileged port, and if so, skip the + * SASL handshake. + * + * @param conf Configuration to check + * @param resources SecuredResources obtained for DataNode + * @throws RuntimeException if security enabled, but configuration is insecure + */ + private static void checkSecureConfig(Configuration conf, + SecureResources resources) throws RuntimeException { + if (!UserGroupInformation.isSecurityEnabled()) { + return; + } + String dataTransferProtection = conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY); + if (resources != null && dataTransferProtection == null) { + return; + } + if (conf.getBoolean("ignore.secure.ports.for.testing", false)) { + return; + } + if (dataTransferProtection != null && + DFSUtil.getHttpPolicy(conf) == HttpConfig.Policy.HTTPS_ONLY && + resources == null) { + return; + } + throw new RuntimeException("Cannot start secure DataNode without " + + "configuring either privileged resources or SASL RPC data transfer " + + "protection and SSL for HTTP. Using privileged resources in " + + "combination with SASL RPC data transfer protection is not supported."); } public static String generateUuid() { @@ -1619,20 +1675,25 @@ public class DataNode extends Configured NetUtils.connect(sock, curTarget, dnConf.socketTimeout); sock.setSoTimeout(targets.length * dnConf.socketTimeout); + // + // Header info + // + Token accessToken = BlockTokenSecretManager.DUMMY_TOKEN; + if (isBlockTokenEnabled) { + accessToken = blockPoolTokenSecretManager.generateToken(b, + EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE)); + } + long writeTimeout = dnConf.socketWriteTimeout + HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1); OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout); InputStream unbufIn = NetUtils.getInputStream(sock); - if (dnConf.encryptDataTransfer && - !dnConf.trustedChannelResolver.isTrusted(sock.getInetAddress())) { - IOStreamPair encryptedStreams = - DataTransferEncryptor.getEncryptedStreams( - unbufOut, unbufIn, - blockPoolTokenSecretManager.generateDataEncryptionKey( - b.getBlockPoolId())); - unbufOut = encryptedStreams.out; - unbufIn = encryptedStreams.in; - } + DataEncryptionKeyFactory keyFactory = + getDataEncryptionKeyFactoryForBlock(b); + IOStreamPair saslStreams = saslClient.socketSend(sock, unbufOut, + unbufIn, keyFactory, accessToken, bpReg); + unbufOut = saslStreams.out; + unbufIn = saslStreams.in; out = new DataOutputStream(new BufferedOutputStream(unbufOut, HdfsConstants.SMALL_BUFFER_SIZE)); @@ -1641,15 +1702,6 @@ public class DataNode extends Configured false, false, true, DataNode.this, null, cachingStrategy); DatanodeInfo srcNode = new DatanodeInfo(bpReg); - // - // Header info - // - Token accessToken = BlockTokenSecretManager.DUMMY_TOKEN; - if (isBlockTokenEnabled) { - accessToken = blockPoolTokenSecretManager.generateToken(b, - EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE)); - } - new Sender(out).writeBlock(b, accessToken, clientname, targets, srcNode, stage, 0, 0, 0, 0, blockSender.getChecksum(), cachingStrategy); @@ -1692,7 +1744,26 @@ public class DataNode extends Configured } } } - + + /** + * Returns a new DataEncryptionKeyFactory that generates a key from the + * BlockPoolTokenSecretManager, using the block pool ID of the given block. + * + * @param block for which the factory needs to create a key + * @return DataEncryptionKeyFactory for block's block pool ID + */ + DataEncryptionKeyFactory getDataEncryptionKeyFactoryForBlock( + final ExtendedBlock block) { + return new DataEncryptionKeyFactory() { + @Override + public DataEncryptionKey newDataEncryptionKey() { + return dnConf.encryptDataTransfer ? + blockPoolTokenSecretManager.generateDataEncryptionKey( + block.getBlockPoolId()) : null; + } + }; + } + /** * After a block becomes finalized, a datanode increases metric counter, * notifies namenode, and adds it to the block scanner @@ -2299,11 +2370,11 @@ public class DataNode extends Configured @Override // ClientDataNodeProtocol public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException { - checkWriteAccess(block); + checkReadAccess(block); return data.getReplicaVisibleLength(block); } - private void checkWriteAccess(final ExtendedBlock block) throws IOException { + private void checkReadAccess(final ExtendedBlock block) throws IOException { if (isBlockTokenEnabled) { Set tokenIds = UserGroupInformation.getCurrentUser() .getTokenIdentifiers(); Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1610853&r1=1610852&r2=1610853&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Tue Jul 15 21:10:24 2014 @@ -36,11 +36,9 @@ import java.io.IOException; import java.io.InputStream; import java.io.InterruptedIOException; import java.io.OutputStream; -import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Socket; import java.net.SocketException; -import java.net.UnknownHostException; import java.nio.channels.ClosedChannelException; import java.security.MessageDigest; import java.util.Arrays; @@ -52,13 +50,12 @@ import org.apache.hadoop.hdfs.protocol.D import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; -import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor; -import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor.InvalidMagicNumberException; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.Op; import org.apache.hadoop.hdfs.protocol.datatransfer.Receiver; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; +import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto; @@ -85,7 +82,6 @@ import org.apache.hadoop.security.token. import org.apache.hadoop.util.DataChecksum; import com.google.common.base.Preconditions; -import com.google.common.net.InetAddresses; import com.google.protobuf.ByteString; @@ -174,24 +170,11 @@ class DataXceiver extends Receiver imple dataXceiverServer.addPeer(peer, Thread.currentThread()); peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout); InputStream input = socketIn; - if ((!peer.hasSecureChannel()) && dnConf.encryptDataTransfer && - !dnConf.trustedChannelResolver.isTrusted(getClientAddress(peer))){ - IOStreamPair encryptedStreams = null; - try { - encryptedStreams = DataTransferEncryptor.getEncryptedStreams(socketOut, - socketIn, datanode.blockPoolTokenSecretManager, - dnConf.encryptionAlgorithm); - } catch (InvalidMagicNumberException imne) { - LOG.info("Failed to read expected encryption handshake from client " + - "at " + peer.getRemoteAddressString() + ". Perhaps the client " + - "is running an older version of Hadoop which does not support " + - "encryption"); - return; - } - input = encryptedStreams.in; - socketOut = encryptedStreams.out; - } - input = new BufferedInputStream(input, HdfsConstants.SMALL_BUFFER_SIZE); + IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut, + socketIn, datanode.getDatanodeId()); + input = new BufferedInputStream(saslStreams.in, + HdfsConstants.SMALL_BUFFER_SIZE); + socketOut = saslStreams.out; super.initialize(new DataInputStream(input)); @@ -263,19 +246,6 @@ class DataXceiver extends Receiver imple } } } - - /** - * Returns InetAddress from peer - * The getRemoteAddressString is the form /ip-address:port - * The ip-address is extracted from peer and InetAddress is formed - * @param peer - * @return - * @throws UnknownHostException - */ - private static InetAddress getClientAddress(Peer peer) { - return InetAddresses.forString( - peer.getRemoteAddressString().split(":")[0].substring(1)); - } @Override public void requestShortCircuitFds(final ExtendedBlock blk, @@ -656,17 +626,12 @@ class DataXceiver extends Receiver imple OutputStream unbufMirrorOut = NetUtils.getOutputStream(mirrorSock, writeTimeout); InputStream unbufMirrorIn = NetUtils.getInputStream(mirrorSock); - if (dnConf.encryptDataTransfer && - !dnConf.trustedChannelResolver.isTrusted(mirrorSock.getInetAddress())) { - IOStreamPair encryptedStreams = - DataTransferEncryptor.getEncryptedStreams( - unbufMirrorOut, unbufMirrorIn, - datanode.blockPoolTokenSecretManager - .generateDataEncryptionKey(block.getBlockPoolId())); - - unbufMirrorOut = encryptedStreams.out; - unbufMirrorIn = encryptedStreams.in; - } + DataEncryptionKeyFactory keyFactory = + datanode.getDataEncryptionKeyFactoryForBlock(block); + IOStreamPair saslStreams = datanode.saslClient.socketSend(mirrorSock, + unbufMirrorOut, unbufMirrorIn, keyFactory, blockToken, targets[0]); + unbufMirrorOut = saslStreams.out; + unbufMirrorIn = saslStreams.in; mirrorOut = new DataOutputStream(new BufferedOutputStream(unbufMirrorOut, HdfsConstants.SMALL_BUFFER_SIZE)); mirrorIn = new DataInputStream(unbufMirrorIn); @@ -1026,17 +991,12 @@ class DataXceiver extends Receiver imple OutputStream unbufProxyOut = NetUtils.getOutputStream(proxySock, dnConf.socketWriteTimeout); InputStream unbufProxyIn = NetUtils.getInputStream(proxySock); - if (dnConf.encryptDataTransfer && - !dnConf.trustedChannelResolver.isTrusted( - proxySock.getInetAddress())) { - IOStreamPair encryptedStreams = - DataTransferEncryptor.getEncryptedStreams( - unbufProxyOut, unbufProxyIn, - datanode.blockPoolTokenSecretManager - .generateDataEncryptionKey(block.getBlockPoolId())); - unbufProxyOut = encryptedStreams.out; - unbufProxyIn = encryptedStreams.in; - } + DataEncryptionKeyFactory keyFactory = + datanode.getDataEncryptionKeyFactoryForBlock(block); + IOStreamPair saslStreams = datanode.saslClient.socketSend(proxySock, + unbufProxyOut, unbufProxyIn, keyFactory, blockToken, proxySource); + unbufProxyOut = saslStreams.out; + unbufProxyIn = saslStreams.in; proxyOut = new DataOutputStream(new BufferedOutputStream(unbufProxyOut, HdfsConstants.SMALL_BUFFER_SIZE));