Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id D53BDE7D1 for ; Mon, 10 Dec 2012 03:38:28 +0000 (UTC) Received: (qmail 8656 invoked by uid 500); 10 Dec 2012 03:38:28 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 8327 invoked by uid 500); 10 Dec 2012 03:38:27 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 8104 invoked by uid 99); 10 Dec 2012 03:38:27 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 10 Dec 2012 03:38:27 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 10 Dec 2012 03:38:18 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 9AB072388B42; Mon, 10 Dec 2012 03:37:55 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1419193 [1/3] - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/ hadoop-hdfs/ hadoop-hdfs/src/main/java/ had... Date: Mon, 10 Dec 2012 03:37:46 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20121210033755.9AB072388B42@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: szetszwo Date: Mon Dec 10 03:37:32 2012 New Revision: 1419193 URL: http://svn.apache.org/viewvc?rev=1419193&view=rev Log: Merge r1415804 through r1419190 from trunk. Added: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AuditLogger.java - copied unchanged from r1419190, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AuditLogger.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java - copied unchanged from r1419190, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestVolumeId.java - copied unchanged from r1419190, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestVolumeId.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java - copied unchanged from r1419190, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/VolumeId.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed) hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed) hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed) hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed) hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed) hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1415804-1419190 Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java?rev=1419193&r1=1419192&r2=1419193&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java Mon Dec 10 03:37:32 2012 @@ -53,10 +53,9 @@ public class UserProvider extends Abstra public String parseParam(String str) { if (str != null) { int len = str.length(); - if (len < 1 || len > 31) { + if (len < 1) { throw new IllegalArgumentException(MessageFormat.format( - "Parameter [{0}], invalid value [{1}], it's length must be between 1 and 31", - getName(), str)); + "Parameter [{0}], it's length must be at least 1", getName())); } } return super.parseParam(str); Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java?rev=1419193&r1=1419192&r2=1419193&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java Mon Dec 10 03:37:32 2012 @@ -110,13 +110,6 @@ public class TestUserProvider { @Test @TestException(exception = IllegalArgumentException.class) - public void userNameTooLong() { - UserProvider.UserParam userParam = new UserProvider.UserParam("username"); - userParam.parseParam("a123456789012345678901234567890x"); - } - - @Test - @TestException(exception = IllegalArgumentException.class) public void userNameInvalidStart() { UserProvider.UserParam userParam = new UserProvider.UserParam("username"); userParam.parseParam("1x"); @@ -136,12 +129,6 @@ public class TestUserProvider { } @Test - public void userNameMaxLength() { - UserProvider.UserParam userParam = new UserProvider.UserParam("username"); - assertNotNull(userParam.parseParam("a123456789012345678901234567890")); - } - - @Test public void userNameValidDollarSign() { UserProvider.UserParam userParam = new UserProvider.UserParam("username"); assertNotNull(userParam.parseParam("a$")); Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1419193&r1=1419192&r2=1419193&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Dec 10 03:37:32 2012 @@ -92,17 +92,12 @@ Trunk (Unreleased) HDFS-3040. TestMulitipleNNDataBlockScanner is misspelled. (Madhukara Phatak via atm) - HDFS-3049. During the normal NN startup process, fall back on a different - edit log if we see one that is corrupt (Colin Patrick McCabe via todd) - HDFS-3478. Test quotas with Long.Max_Value. (Sujay Rau via eli) HDFS-3498. Support replica removal in BlockPlacementPolicy and make BlockPlacementPolicyDefault extensible for reusing code in subclasses. (Junping Du via szetszwo) - HDFS-3571. Allow EditLogFileInputStream to read from a remote URL (todd) - HDFS-3510. Editlog pre-allocation is performed prior to writing edits to avoid partial edits case disk out of space.(Colin McCabe via suresh) @@ -146,8 +141,6 @@ Trunk (Unreleased) HDFS-4052. BlockManager#invalidateWork should print log outside the lock. (Jing Zhao via suresh) - HDFS-4110. Refine a log printed in JNStorage. (Liang Xie via suresh) - HDFS-4124. Refactor INodeDirectory#getExistingPathINodes() to enable returning more than INode array. (Jing Zhao via suresh) @@ -160,10 +153,6 @@ Trunk (Unreleased) HDFS-4152. Add a new class BlocksMapUpdateInfo for the parameter in INode.collectSubtreeBlocksAndClear(..). (Jing Zhao via szetszwo) - HDFS-4153. Add START_MSG/SHUTDOWN_MSG for JournalNode. (liang xie via atm) - - HDFS-3935. Add JournalNode to the start/stop scripts (Andy Isaacson via todd) - HDFS-4206. Change the fields in INode and its subclasses to private. (szetszwo) @@ -176,6 +165,11 @@ Trunk (Unreleased) HDFS-4209. Clean up the addNode/addChild/addChildNoQuotaCheck methods in FSDirectory and INodeDirectory. (szetszwo) + HDFS-3358. Specify explicitly that the NN UI status total is talking + of persistent objects on heap. (harsh) + + HDFS-4234. Use generic code for choosing datanode in Balancer. (szetszwo) + OPTIMIZATIONS BUG FIXES @@ -268,107 +262,12 @@ Trunk (Unreleased) HDFS-4105. The SPNEGO user for secondary namenode should use the web keytab. (Arpit Gupta via jitendra) - BREAKDOWN OF HDFS-3077 SUBTASKS + HDFS-4240. For nodegroup-aware block placement, when a node is excluded, + the nodes in the same nodegroup should also be excluded. (Junping Du + via szetszwo) - HDFS-3077. Quorum-based protocol for reading and writing edit logs. - (todd, Brandon Li, and Hari Mankude via todd) - - HDFS-3694. Fix getEditLogManifest to fetch httpPort if necessary (todd) - - HDFS-3692. Support purgeEditLogs() call to remotely purge logs on JNs - (todd) - - HDFS-3693. JNStorage should read its storage info even before a writer - becomes active (todd) - - HDFS-3725. Fix QJM startup when individual JNs have gaps (todd) - - HDFS-3741. Exhaustive failure injection test for skipped RPCs (todd) - - HDFS-3773. TestNNWithQJM fails after HDFS-3741. (atm) - - HDFS-3793. Implement genericized format() in QJM (todd) - - HDFS-3795. QJM: validate journal dir at startup (todd) - - HDFS-3798. Avoid throwing NPE when finalizeSegment() is called on invalid - segment (todd) - - HDFS-3799. QJM: handle empty log segments during recovery (todd) - - HDFS-3797. QJM: add segment txid as a parameter to journal() RPC (todd) - - HDFS-3800. improvements to QJM fault testing (todd) - - HDFS-3823. QJM: TestQJMWithFaults fails occasionally because of missed - setting of HTTP port. (todd and atm) - - HDFS-3826. QJM: Some trivial logging / exception text improvements. (todd - and atm) - - HDFS-3839. QJM: hadoop-daemon.sh should be updated to accept "journalnode" - (eli) - - HDFS-3845. Fixes for edge cases in QJM recovery protocol (todd) - - HDFS-3877. QJM: Provide defaults for dfs.journalnode.*address (eli) - - HDFS-3863. Track last "committed" txid in QJM (todd) - - HDFS-3869. Expose non-file journal manager details in web UI (todd) - - HDFS-3884. Journal format() should reset cached values (todd) - - HDFS-3870. Add metrics to JournalNode (todd) - - HDFS-3891. Make selectInputStreams throw IOE instead of RTE (todd) - - HDFS-3726. If a logger misses an RPC, don't retry that logger until next - segment (todd) - - HDFS-3893. QJM: Make QJM work with security enabled. (atm) - - HDFS-3897. QJM: TestBlockToken fails after HDFS-3893. (atm) - - HDFS-3898. QJM: enable TCP_NODELAY for IPC (todd) - - HDFS-3885. QJM: optimize log sync when JN is lagging behind (todd) - - HDFS-3900. QJM: avoid validating log segments on log rolls (todd) - - HDFS-3901. QJM: send 'heartbeat' messages to JNs even when they are - out-of-sync (todd) - - HDFS-3899. QJM: Add client-side metrics (todd) - - HDFS-3914. QJM: acceptRecovery should abort current segment (todd) - - HDFS-3915. QJM: Failover fails with auth error in secure cluster (todd) - - HDFS-3906. QJM: quorum timeout on failover with large log segment (todd) - - HDFS-3840. JournalNodes log JournalNotFormattedException backtrace error - before being formatted (todd) - - HDFS-3894. QJM: testRecoverAfterDoubleFailures can be flaky due to IPC - client caching (todd) - - HDFS-3926. QJM: Add user documentation for QJM. (atm) - - HDFS-3943. QJM: remove currently-unused md5sum field (todd) - - HDFS-3950. QJM: misc TODO cleanup, improved log messages, etc. (todd) - - HDFS-3955. QJM: Make acceptRecovery() atomic. (todd) - - HDFS-3956. QJM: purge temporary files when no longer within retention - period (todd) - - HDFS-4004. TestJournalNode#testJournal fails because of test case execution - order (Chao Shi via todd) - - HDFS-4017. Unclosed FileInputStream in GetJournalEditServlet - (Chao Shi via todd) + HDFS-4260 Fix HDFS tests to set test dir to a valid HDFS path as opposed + to the local build path (Chri Nauroth via Sanjay) Release 2.0.3-alpha - Unreleased @@ -493,6 +392,24 @@ Release 2.0.3-alpha - Unreleased HDFS-4214. OfflineEditsViewer should print out the offset at which it encountered an error. (Colin Patrick McCabe via atm) + HDFS-4199. Provide test for HdfsVolumeId. (Ivan A. Veselovsky via atm) + + HDFS-3049. During the normal NN startup process, fall back on a different + edit log if we see one that is corrupt (Colin Patrick McCabe via todd) + + HDFS-3571. Allow EditLogFileInputStream to read from a remote URL (todd) + + HDFS-4110. Refine a log printed in JNStorage. (Liang Xie via suresh) + + HDFS-4153. Add START_MSG/SHUTDOWN_MSG for JournalNode. (liang xie via atm) + + HDFS-3935. Add JournalNode to the start/stop scripts (Andy Isaacson via todd) + + HDFS-4268. Remove redundant enum NNHAStatusHeartbeat.State. (shv) + + HDFS-3680. Allow customized audit logging in HDFS FSNamesystem. (Marcelo + Vanzin via atm) + OPTIMIZATIONS BUG FIXES @@ -646,6 +563,127 @@ Release 2.0.3-alpha - Unreleased of it is undefined after the iteration or modifications of the map. (szetszwo) + HDFS-4231. BackupNode: Introduce BackupState. (shv) + + HDFS-4243. When replacing an INodeDirectory, the parent pointers of the + children of the child have to be updated to the new child. (Jing Zhao + via szetszwo) + + HDFS-4238. Standby namenode should not do purging of shared + storage edits. (todd) + + HDFS-4282. TestEditLog.testFuzzSequences FAILED in all pre-commit test + (todd) + + HDFS-4236. Remove artificial limit on username length introduced in + HDFS-4171. (tucu via suresh) + + HDFS-4279. NameNode does not initialize generic conf keys when started + with -recover. (Colin Patrick McCabe via atm) + + BREAKDOWN OF HDFS-3077 SUBTASKS + + HDFS-3077. Quorum-based protocol for reading and writing edit logs. + (todd, Brandon Li, and Hari Mankude via todd) + + HDFS-3694. Fix getEditLogManifest to fetch httpPort if necessary (todd) + + HDFS-3692. Support purgeEditLogs() call to remotely purge logs on JNs + (todd) + + HDFS-3693. JNStorage should read its storage info even before a writer + becomes active (todd) + + HDFS-3725. Fix QJM startup when individual JNs have gaps (todd) + + HDFS-3741. Exhaustive failure injection test for skipped RPCs (todd) + + HDFS-3773. TestNNWithQJM fails after HDFS-3741. (atm) + + HDFS-3793. Implement genericized format() in QJM (todd) + + HDFS-3795. QJM: validate journal dir at startup (todd) + + HDFS-3798. Avoid throwing NPE when finalizeSegment() is called on invalid + segment (todd) + + HDFS-3799. QJM: handle empty log segments during recovery (todd) + + HDFS-3797. QJM: add segment txid as a parameter to journal() RPC (todd) + + HDFS-3800. improvements to QJM fault testing (todd) + + HDFS-3823. QJM: TestQJMWithFaults fails occasionally because of missed + setting of HTTP port. (todd and atm) + + HDFS-3826. QJM: Some trivial logging / exception text improvements. (todd + and atm) + + HDFS-3839. QJM: hadoop-daemon.sh should be updated to accept "journalnode" + (eli) + + HDFS-3845. Fixes for edge cases in QJM recovery protocol (todd) + + HDFS-3877. QJM: Provide defaults for dfs.journalnode.*address (eli) + + HDFS-3863. Track last "committed" txid in QJM (todd) + + HDFS-3869. Expose non-file journal manager details in web UI (todd) + + HDFS-3884. Journal format() should reset cached values (todd) + + HDFS-3870. Add metrics to JournalNode (todd) + + HDFS-3891. Make selectInputStreams throw IOE instead of RTE (todd) + + HDFS-3726. If a logger misses an RPC, don't retry that logger until next + segment (todd) + + HDFS-3893. QJM: Make QJM work with security enabled. (atm) + + HDFS-3897. QJM: TestBlockToken fails after HDFS-3893. (atm) + + HDFS-3898. QJM: enable TCP_NODELAY for IPC (todd) + + HDFS-3885. QJM: optimize log sync when JN is lagging behind (todd) + + HDFS-3900. QJM: avoid validating log segments on log rolls (todd) + + HDFS-3901. QJM: send 'heartbeat' messages to JNs even when they are + out-of-sync (todd) + + HDFS-3899. QJM: Add client-side metrics (todd) + + HDFS-3914. QJM: acceptRecovery should abort current segment (todd) + + HDFS-3915. QJM: Failover fails with auth error in secure cluster (todd) + + HDFS-3906. QJM: quorum timeout on failover with large log segment (todd) + + HDFS-3840. JournalNodes log JournalNotFormattedException backtrace error + before being formatted (todd) + + HDFS-3894. QJM: testRecoverAfterDoubleFailures can be flaky due to IPC + client caching (todd) + + HDFS-3926. QJM: Add user documentation for QJM. (atm) + + HDFS-3943. QJM: remove currently-unused md5sum field (todd) + + HDFS-3950. QJM: misc TODO cleanup, improved log messages, etc. (todd) + + HDFS-3955. QJM: Make acceptRecovery() atomic. (todd) + + HDFS-3956. QJM: purge temporary files when no longer within retention + period (todd) + + HDFS-4004. TestJournalNode#testJournal fails because of test case execution + order (Chao Shi via todd) + + HDFS-4017. Unclosed FileInputStream in GetJournalEditServlet + (Chao Shi via todd) + + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES @@ -2035,6 +2073,11 @@ Release 0.23.6 - UNRELEASED BUG FIXES + HDFS-4247. saveNamespace should be tolerant of dangling lease (daryn) + + HDFS-4248. Renaming directories may incorrectly remove the paths in leases + under the tree. (daryn via szetszwo) + Release 0.23.5 - UNRELEASED INCOMPATIBLE CHANGES Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1415804-1419190 Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java?rev=1419193&r1=1419192&r2=1419193&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java Mon Dec 10 03:37:32 2012 @@ -27,26 +27,38 @@ import org.apache.hadoop.classification. * HDFS-specific volume identifier which implements {@link VolumeId}. Can be * used to differentiate between the data directories on a single datanode. This * identifier is only unique on a per-datanode basis. + * + * Note that invalid IDs are represented by {@link VolumeId#INVALID_VOLUME_ID}. */ @InterfaceStability.Unstable @InterfaceAudience.Public public class HdfsVolumeId implements VolumeId { - + private final byte[] id; - private final boolean isValid; - public HdfsVolumeId(byte[] id, boolean isValid) { + public HdfsVolumeId(byte[] id) { + if (id == null) { + throw new NullPointerException("A valid Id can only be constructed " + + "with a non-null byte array."); + } this.id = id; - this.isValid = isValid; } @Override - public boolean isValid() { - return isValid; + public final boolean isValid() { + return true; } @Override public int compareTo(VolumeId arg0) { + if (arg0 == null) { + return 1; + } + if (!arg0.isValid()) { + // any valid ID is greater + // than any invalid ID: + return 1; + } return hashCode() - arg0.hashCode(); } @@ -63,8 +75,10 @@ public class HdfsVolumeId implements Vol if (obj == this) { return true; } - HdfsVolumeId that = (HdfsVolumeId) obj; + // NB: if (!obj.isValid()) { return false; } check is not necessary + // because we have class identity checking above, and for this class + // isValid() is always true. return new EqualsBuilder().append(this.id, that.id).isEquals(); } Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/VolumeId.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/VolumeId.java?rev=1419193&r1=1419192&r2=1419193&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/VolumeId.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/VolumeId.java Mon Dec 10 03:37:32 2012 @@ -29,6 +29,48 @@ import org.apache.hadoop.classification. public interface VolumeId extends Comparable { /** + * Represents an invalid Volume ID (ID for unknown content). + */ + public static final VolumeId INVALID_VOLUME_ID = new VolumeId() { + + @Override + public int compareTo(VolumeId arg0) { + // This object is equal only to itself; + // It is greater than null, and + // is always less than any other VolumeId: + if (arg0 == null) { + return 1; + } + if (arg0 == this) { + return 0; + } else { + return -1; + } + } + + @Override + public boolean equals(Object obj) { + // this object is equal only to itself: + return (obj == this); + } + + @Override + public int hashCode() { + return Integer.MIN_VALUE; + } + + @Override + public boolean isValid() { + return false; + } + + @Override + public String toString() { + return "Invalid VolumeId"; + } + }; + + /** * Indicates if the disk identifier is valid. Invalid identifiers indicate * that the block was not present, or the location could otherwise not be * determined. Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java?rev=1419193&r1=1419192&r2=1419193&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java Mon Dec 10 03:37:32 2012 @@ -202,7 +202,7 @@ class BlockStorageLocationUtil { ArrayList l = new ArrayList(b.getLocations().length); // Start off all IDs as invalid, fill it in later with results from RPCs for (int i = 0; i < b.getLocations().length; i++) { - l.add(new HdfsVolumeId(null, false)); + l.add(VolumeId.INVALID_VOLUME_ID); } blockVolumeIds.put(b, l); } @@ -236,7 +236,7 @@ class BlockStorageLocationUtil { // Get the VolumeId by indexing into the list of VolumeIds // provided by the datanode byte[] volumeId = metaVolumeIds.get(volumeIndex); - HdfsVolumeId id = new HdfsVolumeId(volumeId, true); + HdfsVolumeId id = new HdfsVolumeId(volumeId); // Find out which index we are in the LocatedBlock's replicas LocatedBlock locBlock = extBlockToLocBlock.get(extBlock); DatanodeInfo[] dnInfos = locBlock.getLocations(); Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1419193&r1=1419192&r2=1419193&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Mon Dec 10 03:37:32 2012 @@ -246,6 +246,8 @@ public class DFSConfigKeys extends Commo public static final String DFS_HOSTS = "dfs.hosts"; public static final String DFS_HOSTS_EXCLUDE = "dfs.hosts.exclude"; public static final String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces"; + public static final String DFS_NAMENODE_AUDIT_LOGGERS_KEY = "dfs.namenode.audit.loggers"; + public static final String DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME = "default"; // Much code in hdfs is not yet updated to use these keys. public static final String DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY = "dfs.client.block.write.locateFollowingBlock.retries"; Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1419193&r1=1419192&r2=1419193&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Mon Dec 10 03:37:32 2012 @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.ContentSumma import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; @@ -1232,9 +1233,9 @@ public class PBHelper { if (s == null) return null; switch (s.getState()) { case ACTIVE: - return new NNHAStatusHeartbeat(NNHAStatusHeartbeat.State.ACTIVE, s.getTxid()); + return new NNHAStatusHeartbeat(HAServiceState.ACTIVE, s.getTxid()); case STANDBY: - return new NNHAStatusHeartbeat(NNHAStatusHeartbeat.State.STANDBY, s.getTxid()); + return new NNHAStatusHeartbeat(HAServiceState.STANDBY, s.getTxid()); default: throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + s.getState()); } Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1419193&r1=1419192&r2=1419193&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java Mon Dec 10 03:37:32 2012 @@ -75,6 +75,7 @@ import org.apache.hadoop.hdfs.server.pro import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.net.Node; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; @@ -557,7 +558,7 @@ public class Balancer { } /** Decide if still need to move more bytes */ - protected boolean isMoveQuotaFull() { + protected boolean hasSpaceForScheduling() { return scheduledSize pairs and + /** A matcher interface for matching nodes. */ + private interface Matcher { + /** Given the cluster topology, does the left node match the right node? */ + boolean match(NetworkTopology cluster, Node left, Node right); + } + + /** Match datanodes in the same node group. */ + static final Matcher SAME_NODE_GROUP = new Matcher() { + @Override + public boolean match(NetworkTopology cluster, Node left, Node right) { + return cluster.isOnSameNodeGroup(left, right); + } + }; + + /** Match datanodes in the same rack. */ + static final Matcher SAME_RACK = new Matcher() { + @Override + public boolean match(NetworkTopology cluster, Node left, Node right) { + return cluster.isOnSameRack(left, right); + } + }; + + /** Match any datanode with any other datanode. */ + static final Matcher ANY_OTHER = new Matcher() { + @Override + public boolean match(NetworkTopology cluster, Node left, Node right) { + return left != right; + } + }; + + /** + * Decide all pairs and * the number of bytes to move from a source to a target * Maximum bytes to be moved per node is * Min(1 Band worth of bytes, MAX_SIZE_TO_MOVE). * Return total number of bytes to move in this iteration */ private long chooseNodes() { - // First, match nodes on the same node group if cluster has nodegroup - // awareness + // First, match nodes on the same node group if cluster is node group aware if (cluster.isNodeGroupAware()) { - chooseNodesOnSameNodeGroup(); + chooseNodes(SAME_NODE_GROUP); } // Then, match nodes on the same rack - chooseNodes(true); - // At last, match nodes on different racks - chooseNodes(false); + chooseNodes(SAME_RACK); + // At last, match all remaining nodes + chooseNodes(ANY_OTHER); assert (datanodes.size() >= sources.size()+targets.size()) : "Mismatched number of datanodes (" + @@ -952,57 +983,55 @@ public class Balancer { } return bytesToMove; } - - /** - * Decide all pairs where source and target are - * on the same NodeGroup - */ - private void chooseNodesOnSameNodeGroup() { + /** Decide all pairs according to the matcher. */ + private void chooseNodes(final Matcher matcher) { /* first step: match each overUtilized datanode (source) to - * one or more underUtilized datanodes within same NodeGroup(targets). + * one or more underUtilized datanodes (targets). */ - chooseOnSameNodeGroup(overUtilizedDatanodes, underUtilizedDatanodes); - - /* match each remaining overutilized datanode (source) to below average - * utilized datanodes within the same NodeGroup(targets). + chooseDatanodes(overUtilizedDatanodes, underUtilizedDatanodes, matcher); + + /* match each remaining overutilized datanode (source) to + * below average utilized datanodes (targets). * Note only overutilized datanodes that haven't had that max bytes to move * satisfied in step 1 are selected */ - chooseOnSameNodeGroup(overUtilizedDatanodes, belowAvgUtilizedDatanodes); + chooseDatanodes(overUtilizedDatanodes, belowAvgUtilizedDatanodes, matcher); - /* match each remaining underutilized datanode to above average utilized - * datanodes within the same NodeGroup. + /* match each remaining underutilized datanode (target) to + * above average utilized datanodes (source). * Note only underutilized datanodes that have not had that max bytes to * move satisfied in step 1 are selected. */ - chooseOnSameNodeGroup(underUtilizedDatanodes, aboveAvgUtilizedDatanodes); + chooseDatanodes(underUtilizedDatanodes, aboveAvgUtilizedDatanodes, matcher); } - + /** - * Match two sets of nodes within the same NodeGroup, one should be source - * nodes (utilization > Avg), and the other should be destination nodes - * (utilization < Avg). - * @param datanodes - * @param candidates + * For each datanode, choose matching nodes from the candidates. Either the + * datanodes or the candidates are source nodes with (utilization > Avg), and + * the others are target nodes with (utilization < Avg). */ private void - chooseOnSameNodeGroup(Collection datanodes, Collection candidates) { + chooseDatanodes(Collection datanodes, Collection candidates, + Matcher matcher) { for (Iterator i = datanodes.iterator(); i.hasNext();) { final D datanode = i.next(); - for(; chooseOnSameNodeGroup(datanode, candidates.iterator()); ); - if (!datanode.isMoveQuotaFull()) { + for(; chooseForOneDatanode(datanode, candidates, matcher); ); + if (!datanode.hasSpaceForScheduling()) { i.remove(); } } } - + /** - * Match one datanode with a set of candidates nodes within the same NodeGroup. + * For the given datanode, choose a candidate and then schedule it. + * @return true if a candidate is chosen; false if no candidates is chosen. */ - private boolean chooseOnSameNodeGroup( - BalancerDatanode dn, Iterator candidates) { - final T chosen = chooseCandidateOnSameNodeGroup(dn, candidates); + private boolean chooseForOneDatanode( + BalancerDatanode dn, Collection candidates, Matcher matcher) { + final Iterator i = candidates.iterator(); + final C chosen = chooseCandidate(dn, i, matcher); + if (chosen == null) { return false; } @@ -1011,8 +1040,8 @@ public class Balancer { } else { matchSourceWithTargetToMove((Source)chosen, dn); } - if (!chosen.isMoveQuotaFull()) { - candidates.remove(); + if (!chosen.hasSpaceForScheduling()) { + i.remove(); } return true; } @@ -1029,19 +1058,15 @@ public class Balancer { +source.datanode.getName() + " to " + target.datanode.getName()); } - /** choose a datanode from candidates within the same NodeGroup - * of dn. - */ - private T chooseCandidateOnSameNodeGroup( - BalancerDatanode dn, Iterator candidates) { - if (dn.isMoveQuotaFull()) { + /** Choose a candidate for the given datanode. */ + private + C chooseCandidate(D dn, Iterator candidates, Matcher matcher) { + if (dn.hasSpaceForScheduling()) { for(; candidates.hasNext(); ) { - final T c = candidates.next(); - if (!c.isMoveQuotaFull()) { + final C c = candidates.next(); + if (!c.hasSpaceForScheduling()) { candidates.remove(); - continue; - } - if (cluster.isOnSameNodeGroup(dn.getDatanode(), c.getDatanode())) { + } else if (matcher.match(cluster, dn.getDatanode(), c.getDatanode())) { return c; } } @@ -1049,148 +1074,6 @@ public class Balancer { return null; } - /* if onRack is true, decide all pairs - * where source and target are on the same rack; Otherwise - * decide all pairs where source and target are - * on different racks - */ - private void chooseNodes(boolean onRack) { - /* first step: match each overUtilized datanode (source) to - * one or more underUtilized datanodes (targets). - */ - chooseTargets(underUtilizedDatanodes, onRack); - - /* match each remaining overutilized datanode (source) to - * below average utilized datanodes (targets). - * Note only overutilized datanodes that haven't had that max bytes to move - * satisfied in step 1 are selected - */ - chooseTargets(belowAvgUtilizedDatanodes, onRack); - - /* match each remaining underutilized datanode (target) to - * above average utilized datanodes (source). - * Note only underutilized datanodes that have not had that max bytes to - * move satisfied in step 1 are selected. - */ - chooseSources(aboveAvgUtilizedDatanodes, onRack); - } - - /* choose targets from the target candidate list for each over utilized - * source datanode. OnRackTarget determines if the chosen target - * should be on the same rack as the source - */ - private void chooseTargets( - Collection targetCandidates, boolean onRackTarget ) { - for (Iterator srcIterator = overUtilizedDatanodes.iterator(); - srcIterator.hasNext();) { - Source source = srcIterator.next(); - while (chooseTarget(source, targetCandidates.iterator(), onRackTarget)) { - } - if (!source.isMoveQuotaFull()) { - srcIterator.remove(); - } - } - return; - } - - /* choose sources from the source candidate list for each under utilized - * target datanode. onRackSource determines if the chosen source - * should be on the same rack as the target - */ - private void chooseSources( - Collection sourceCandidates, boolean onRackSource) { - for (Iterator targetIterator = - underUtilizedDatanodes.iterator(); targetIterator.hasNext();) { - BalancerDatanode target = targetIterator.next(); - while (chooseSource(target, sourceCandidates.iterator(), onRackSource)) { - } - if (!target.isMoveQuotaFull()) { - targetIterator.remove(); - } - } - return; - } - - /* For the given source, choose targets from the target candidate list. - * OnRackTarget determines if the chosen target - * should be on the same rack as the source - */ - private boolean chooseTarget(Source source, - Iterator targetCandidates, boolean onRackTarget) { - if (!source.isMoveQuotaFull()) { - return false; - } - boolean foundTarget = false; - BalancerDatanode target = null; - while (!foundTarget && targetCandidates.hasNext()) { - target = targetCandidates.next(); - if (!target.isMoveQuotaFull()) { - targetCandidates.remove(); - continue; - } - if (onRackTarget) { - // choose from on-rack nodes - if (cluster.isOnSameRack(source.datanode, target.datanode)) { - foundTarget = true; - } - } else { - // choose from off-rack nodes - if (!cluster.isOnSameRack(source.datanode, target.datanode)) { - foundTarget = true; - } - } - } - if (foundTarget) { - assert(target != null):"Choose a null target"; - matchSourceWithTargetToMove(source, target); - if (!target.isMoveQuotaFull()) { - targetCandidates.remove(); - } - return true; - } - return false; - } - - /* For the given target, choose sources from the source candidate list. - * OnRackSource determines if the chosen source - * should be on the same rack as the target - */ - private boolean chooseSource(BalancerDatanode target, - Iterator sourceCandidates, boolean onRackSource) { - if (!target.isMoveQuotaFull()) { - return false; - } - boolean foundSource = false; - Source source = null; - while (!foundSource && sourceCandidates.hasNext()) { - source = sourceCandidates.next(); - if (!source.isMoveQuotaFull()) { - sourceCandidates.remove(); - continue; - } - if (onRackSource) { - // choose from on-rack nodes - if ( cluster.isOnSameRack(source.getDatanode(), target.getDatanode())) { - foundSource = true; - } - } else { - // choose from off-rack nodes - if (!cluster.isOnSameRack(source.datanode, target.datanode)) { - foundSource = true; - } - } - } - if (foundSource) { - assert(source != null):"Choose a null source"; - matchSourceWithTargetToMove(source, target); - if ( !source.isMoveQuotaFull()) { - sourceCandidates.remove(); - } - return true; - } - return false; - } - private static class BytesMoved { private long bytesMoved = 0L;; private synchronized void inc( long bytes ) { Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java?rev=1419193&r1=1419192&r2=1419193&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java Mon Dec 10 03:37:32 2012 @@ -152,8 +152,9 @@ public class BlockPlacementPolicyDefault List results = new ArrayList(chosenNodes); - for (Node node:chosenNodes) { - excludedNodes.put(node, node); + for (DatanodeDescriptor node:chosenNodes) { + // add localMachine and related nodes to excludedNodes + addToExcludedNodes(node, excludedNodes); adjustExcludedNodes(excludedNodes, node); } @@ -235,7 +236,7 @@ public class BlockPlacementPolicyDefault + totalReplicasExpected + "\n" + e.getMessage()); if (avoidStaleNodes) { - // ecxludedNodes now has - initial excludedNodes, any nodes that were + // excludedNodes now has - initial excludedNodes, any nodes that were // chosen and nodes that were tried but were not chosen because they // were stale, decommissioned or for any other reason a node is not // chosen for write. Retry again now not avoiding stale node @@ -273,6 +274,8 @@ public class BlockPlacementPolicyDefault if (isGoodTarget(localMachine, blocksize, maxNodesPerRack, false, results, avoidStaleNodes)) { results.add(localMachine); + // add localMachine and related nodes to excludedNode + addToExcludedNodes(localMachine, excludedNodes); return localMachine; } } @@ -281,7 +284,19 @@ public class BlockPlacementPolicyDefault return chooseLocalRack(localMachine, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes); } - + + /** + * Add localMachine and related nodes to excludedNodes + * for next replica choosing. In sub class, we can add more nodes within + * the same failure domain of localMachine + * @return number of new excluded nodes + */ + protected int addToExcludedNodes(DatanodeDescriptor localMachine, + HashMap excludedNodes) { + Node node = excludedNodes.put(localMachine, localMachine); + return node == null?1:0; + } + /* choose one node from the rack that localMachine is on. * if no such node is available, choose one node from the rack where * a second replica is on. @@ -392,6 +407,8 @@ public class BlockPlacementPolicyDefault if (isGoodTarget(chosenNode, blocksize, maxNodesPerRack, results, avoidStaleNodes)) { results.add(chosenNode); + // add chosenNode and related nodes to excludedNode + addToExcludedNodes(chosenNode, excludedNodes); adjustExcludedNodes(excludedNodes, chosenNode); return chosenNode; } else { @@ -441,6 +458,9 @@ public class BlockPlacementPolicyDefault maxNodesPerRack, results, avoidStaleNodes)) { numOfReplicas--; results.add(chosenNode); + // add chosenNode and related nodes to excludedNode + int newExcludedNodes = addToExcludedNodes(chosenNode, excludedNodes); + numOfAvailableNodes -= newExcludedNodes; adjustExcludedNodes(excludedNodes, chosenNode); } else { badTarget = true; Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java?rev=1419193&r1=1419192&r2=1419193&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java Mon Dec 10 03:37:32 2012 @@ -240,6 +240,27 @@ public class BlockPlacementPolicyWithNod String nodeGroupString = cur.getNetworkLocation(); return NetworkTopology.getFirstHalf(nodeGroupString); } + + /** + * Find other nodes in the same nodegroup of localMachine and add them + * into excludeNodes as replica should not be duplicated for nodes + * within the same nodegroup + * @return number of new excluded nodes + */ + protected int addToExcludedNodes(DatanodeDescriptor localMachine, + HashMap excludedNodes) { + int countOfExcludedNodes = 0; + String nodeGroupScope = localMachine.getNetworkLocation(); + List leafNodes = clusterMap.getLeaves(nodeGroupScope); + for (Node leafNode : leafNodes) { + Node node = excludedNodes.put(leafNode, leafNode); + if (node == null) { + // not a existing node in excludedNodes + countOfExcludedNodes++; + } + } + return countOfExcludedNodes; + } /** * Pick up replica node set for deleting replica as over-replicated. Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java?rev=1419193&r1=1419192&r2=1419193&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java Mon Dec 10 03:37:32 2012 @@ -26,6 +26,7 @@ import java.util.concurrent.CopyOnWriteA import org.apache.commons.logging.Log; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -411,7 +412,7 @@ class BPOfferService { final long txid = nnHaState.getTxId(); final boolean nnClaimsActive = - nnHaState.getState() == NNHAStatusHeartbeat.State.ACTIVE; + nnHaState.getState() == HAServiceState.ACTIVE; final boolean bposThinksActive = bpServiceToActive == actor; final boolean isMoreRecentClaim = txid > lastActiveClaimTxId; Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1419193&r1=1419192&r2=1419193&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Mon Dec 10 03:37:32 2012 @@ -24,6 +24,7 @@ import java.net.SocketTimeoutException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.NameNodeProxies; @@ -35,6 +36,7 @@ import org.apache.hadoop.hdfs.protocolPB import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.namenode.ha.HAState; import org.apache.hadoop.hdfs.server.protocol.FenceResponse; import org.apache.hadoop.hdfs.server.protocol.JournalInfo; import org.apache.hadoop.hdfs.server.protocol.JournalProtocol; @@ -414,14 +416,23 @@ public class BackupNode extends NameNode + HdfsConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion(); return nsInfo; } - + @Override + protected String getNameServiceId(Configuration conf) { + return DFSUtil.getBackupNameServiceId(conf); + } + + protected HAState createHAState() { + return new BackupState(); + } + + @Override // NameNode protected NameNodeHAContext createHAContext() { return new BNHAContext(); } - + private class BNHAContext extends NameNodeHAContext { - @Override // NameNode + @Override // NameNodeHAContext public void checkOperation(OperationCategory op) throws StandbyException { if (op == OperationCategory.UNCHECKED || @@ -435,10 +446,42 @@ public class BackupNode extends NameNode throw new StandbyException(msg); } } - } - - @Override - protected String getNameServiceId(Configuration conf) { - return DFSUtil.getBackupNameServiceId(conf); + + @Override // NameNodeHAContext + public void prepareToStopStandbyServices() throws ServiceFailedException { + } + + /** + * Start services for BackupNode. + *

+ * The following services should be muted + * (not run or not pass any control commands to DataNodes) + * on BackupNode: + * {@link LeaseManager.Monitor} protected by SafeMode. + * {@link BlockManager.ReplicationMonitor} protected by SafeMode. + * {@link HeartbeatManager.Monitor} protected by SafeMode. + * {@link DecommissionManager.Monitor} need to prohibit refreshNodes(). + * {@link PendingReplicationBlocks.PendingReplicationMonitor} harmless, + * because ReplicationMonitor is muted. + */ + @Override + public void startActiveServices() throws IOException { + try { + namesystem.startActiveServices(); + } catch (Throwable t) { + doImmediateShutdown(t); + } + } + + @Override + public void stopActiveServices() throws IOException { + try { + if (namesystem != null) { + namesystem.stopActiveServices(); + } + } catch (Throwable t) { + doImmediateShutdown(t); + } + } } } Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1419193&r1=1419192&r2=1419193&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Mon Dec 10 03:37:32 2012 @@ -587,6 +587,8 @@ public class FSDirectory implements Clos // update modification time of dst and the parent of src srcInodes[srcInodes.length-2].setModificationTime(timestamp); dstInodes[dstInodes.length-2].setModificationTime(timestamp); + // update moved leases with new filename + getFSNamesystem().unprotectedChangeLease(src, dst); return true; } } finally { @@ -752,6 +754,8 @@ public class FSDirectory implements Clos } srcInodes[srcInodes.length - 2].setModificationTime(timestamp); dstInodes[dstInodes.length - 2].setModificationTime(timestamp); + // update moved lease with new filename + getFSNamesystem().unprotectedChangeLease(src, dst); // Collect the blocks and remove the lease for previous dst int filesDeleted = 0; @@ -1204,20 +1208,39 @@ public class FSDirectory implements Clos ) throws IOException { writeLock(); try { - replaceINodeUnsynced(path, oldnode, newnode); - - //Currently, oldnode and newnode are assumed to contain the same blocks. - //Otherwise, blocks need to be removed from the blocksMap. - int index = 0; - for (BlockInfo b : newnode.getBlocks()) { - BlockInfo info = getBlockManager().addBlockCollection(b, newnode); - newnode.setBlock(index, info); // inode refers to the block in BlocksMap - index++; - } + unprotectedReplaceNode(path, oldnode, newnode); } finally { writeUnlock(); } } + + void unprotectedReplaceNode(String path, INodeFile oldnode, INodeFile newnode) + throws IOException, UnresolvedLinkException { + assert hasWriteLock(); + INodeDirectory parent = oldnode.parent; + // Remove the node from the namespace + if (!oldnode.removeNode()) { + NameNode.stateChangeLog.warn("DIR* FSDirectory.replaceNode: " + + "failed to remove " + path); + throw new IOException("FSDirectory.replaceNode: " + + "failed to remove " + path); + } + + // Parent should be non-null, otherwise oldnode.removeNode() will return + // false + newnode.setLocalName(oldnode.getLocalNameBytes()); + parent.addChild(newnode, true); + + /* Currently oldnode and newnode are assumed to contain the same + * blocks. Otherwise, blocks need to be removed from the blocksMap. + */ + int index = 0; + for (BlockInfo b : newnode.getBlocks()) { + BlockInfo info = getBlockManager().addBlockCollection(b, newnode); + newnode.setBlock(index, info); // inode refers to the block in BlocksMap + index++; + } + } /** * Get a partial listing of the indicated directory Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1419193&r1=1419192&r2=1419193&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Mon Dec 10 03:37:32 2012 @@ -914,6 +914,11 @@ public class FSEditLog implements LogsPu return journalSet; } + @VisibleForTesting + synchronized void setJournalSetForTesting(JournalSet js) { + this.journalSet = js; + } + /** * Used only by tests. */ @@ -1067,9 +1072,18 @@ public class FSEditLog implements LogsPu /** * Archive any log files that are older than the given txid. + * + * If the edit log is not open for write, then this call returns with no + * effect. */ @Override public synchronized void purgeLogsOlderThan(final long minTxIdToKeep) { + // Should not purge logs unless they are open for write. + // This prevents the SBN from purging logs on shared storage, for example. + if (!isOpenForWrite()) { + return; + } + assert curSegmentTxId == HdfsConstants.INVALID_TXID || // on format this is no-op minTxIdToKeep <= curSegmentTxId : "cannot purge logs older than txid " + minTxIdToKeep + Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1419193&r1=1419192&r2=1419193&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Mon Dec 10 03:37:32 2012 @@ -31,7 +31,6 @@ import org.apache.hadoop.classification. import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; @@ -322,7 +321,7 @@ public class FSEditLogLoader { INodeFileUnderConstruction ucFile = (INodeFileUnderConstruction) oldFile; fsNamesys.leaseManager.removeLeaseWithPrefixPath(addCloseOp.path); INodeFile newFile = ucFile.convertToInodeFile(); - fsDir.replaceNode(addCloseOp.path, ucFile, newFile); + fsDir.unprotectedReplaceNode(addCloseOp.path, ucFile, newFile); } break; } @@ -360,10 +359,8 @@ public class FSEditLogLoader { } case OP_RENAME_OLD: { RenameOldOp renameOp = (RenameOldOp)op; - HdfsFileStatus dinfo = fsDir.getFileInfo(renameOp.dst, false); fsDir.unprotectedRenameTo(renameOp.src, renameOp.dst, renameOp.timestamp); - fsNamesys.unprotectedChangeLease(renameOp.src, renameOp.dst, dinfo); break; } case OP_DELETE: { @@ -433,11 +430,8 @@ public class FSEditLogLoader { } case OP_RENAME: { RenameOp renameOp = (RenameOp)op; - - HdfsFileStatus dinfo = fsDir.getFileInfo(renameOp.dst, false); fsDir.unprotectedRenameTo(renameOp.src, renameOp.dst, renameOp.timestamp, renameOp.options); - fsNamesys.unprotectedChangeLease(renameOp.src, renameOp.dst, dinfo); break; } case OP_GET_DELEGATION_TOKEN: { Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java?rev=1419193&r1=1419192&r2=1419193&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java Mon Dec 10 03:37:32 2012 @@ -197,7 +197,7 @@ public class FSImageSerialization { public static String readString(DataInputStream in) throws IOException { DeprecatedUTF8 ustr = TL_DATA.get().U_STR; ustr.readFields(in); - return ustr.toString(); + return ustr.toStringChecked(); } static String readString_EmptyAsNull(DataInputStream in) throws IOException {