Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 4F474DD84 for ; Wed, 19 Sep 2012 04:36:03 +0000 (UTC) Received: (qmail 90990 invoked by uid 500); 19 Sep 2012 04:36:03 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 90700 invoked by uid 500); 19 Sep 2012 04:36:02 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 90679 invoked by uid 99); 19 Sep 2012 04:36:02 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 19 Sep 2012 04:36:02 +0000 X-ASF-Spam-Status: No, hits=-1998.0 required=5.0 tests=ALL_TRUSTED,FB_GET_MEDS X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 19 Sep 2012 04:35:49 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 5335F2388A40; Wed, 19 Sep 2012 04:35:04 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1387449 [1/2] - in /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ hadoop-hdfs-httpfs/src/site/apt/ hadoop-hdfs-httpfs/src/test/java/org/apache/hado... Date: Wed, 19 Sep 2012 04:35:01 -0000 To: hdfs-commits@hadoop.apache.org From: todd@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120919043504.5335F2388A40@eris.apache.org> Author: todd Date: Wed Sep 19 04:34:55 2012 New Revision: 1387449 URL: http://svn.apache.org/viewvc?rev=1387449&view=rev Log: Merge trunk into branch Removed: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolPB.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolServerSideTranslatorPB.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/GetUserMappingsProtocol.proto Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/pom.xml hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed) hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed) hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed) hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/corrupt_files.jsp hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfsnodelist.jsp hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed) hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed) hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1383030-1387448 Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml Wed Sep 19 04:34:55 2012 @@ -301,6 +301,12 @@ ${project.build.directory}/test-classes/krb5.conf ${kerberos.realm} + + + listener + org.apache.hadoop.test.TimedOutTestsListener + + **/${test.exclude}.java ${test.exclude.pattern} Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java Wed Sep 19 04:34:55 2012 @@ -181,7 +181,7 @@ public abstract class ServerWebApp exten throw new ServerException(ServerException.ERROR.S13, portKey); } try { - InetAddress add = InetAddress.getByName(hostnameKey); + InetAddress add = InetAddress.getByName(host); int portNum = Integer.parseInt(port); return new InetSocketAddress(add, portNum); } catch (UnknownHostException ex) { Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm Wed Sep 19 04:34:55 2012 @@ -81,8 +81,3 @@ Hadoop HDFS over HTTP - Documentation Se * {{{./UsingHttpTools.html}Using HTTP Tools}} -* Current Limitations - - <<>> - operations are not supported. - Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java Wed Sep 19 04:34:55 2012 @@ -24,8 +24,11 @@ import org.apache.hadoop.lib.server.Serv import org.apache.hadoop.test.HTestCase; import org.apache.hadoop.test.TestDir; import org.apache.hadoop.test.TestDirHelper; +import org.junit.Assert; import org.junit.Test; +import java.net.InetSocketAddress; + public class TestServerWebApp extends HTestCase { @Test(expected = IllegalArgumentException.class) @@ -74,4 +77,23 @@ public class TestServerWebApp extends HT server.contextInitialized(null); } + + @Test + @TestDir + public void testResolveAuthority() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + System.setProperty("TestServerWebApp3.home.dir", dir); + System.setProperty("TestServerWebApp3.config.dir", dir); + System.setProperty("TestServerWebApp3.log.dir", dir); + System.setProperty("TestServerWebApp3.temp.dir", dir); + System.setProperty("testserverwebapp3.http.hostname", "localhost"); + System.setProperty("testserverwebapp3.http.port", "14000"); + ServerWebApp server = new ServerWebApp("TestServerWebApp3") { + }; + + InetSocketAddress address = server.resolveAuthority(); + Assert.assertEquals("localhost", address.getHostName()); + Assert.assertEquals(14000, address.getPort()); + } + } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Sep 19 04:34:55 2012 @@ -16,6 +16,10 @@ Trunk (Unreleased) HDFS-3601. Add BlockPlacementPolicyWithNodeGroup to support block placement with 4-layer network topology. (Junping Du via szetszwo) + HDFS-3703. Datanodes are marked stale if heartbeat is not received in + configured timeout and are selected as the last location to read from. + (Jing Zhao via suresh) + IMPROVEMENTS HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants -> @@ -225,11 +229,23 @@ Release 2.0.3-alpha - Unreleased (Jaimin D Jetly and Jing Zhao via szetszwo) IMPROVEMENTS + + HDFS-3925. Prettify PipelineAck#toString() for printing to a log + (Andrew Wang via todd) + + HDFS-3939. NN RPC address cleanup. (eli) OPTIMIZATIONS BUG FIXES + HDFS-3919. MiniDFSCluster:waitClusterUp can hang forever. + (Andy Isaacson via eli) + + HDFS-3924. Multi-byte id in HdfsVolumeId. (Andrew Wang via atm) + + HDFS-3936. MiniDFSCluster shutdown races with BlocksMap usage. (eli) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES @@ -464,6 +480,10 @@ Release 2.0.2-alpha - 2012-09-07 HDFS-3888. Clean up BlockPlacementPolicyDefault. (Jing Zhao via szetszwo) + HDFS-3907. Allow multiple users for local block readers. (eli) + + HDFS-3910. DFSTestUtil#waitReplication should timeout. (eli) + OPTIMIZATIONS HDFS-2982. Startup performance suffers when there are many edit log @@ -750,7 +770,16 @@ Release 2.0.2-alpha - 2012-09-07 HDFS-2757. Cannot read a local block that's being written to when using the local read short circuit. (Jean-Daniel Cryans via eli) - + + HDFS-3664. BlockManager race when stopping active services. + (Colin Patrick McCabe via eli) + + HDFS-3928. MiniDFSCluster should reset the first ExitException on shutdown. (eli) + + HDFS-3938. remove current limitations from HttpFS docs. (tucu) + + HDFS-3944. Httpfs resolveAuthority() is not resolving host correctly. (tucu) + BREAKDOWN OF HDFS-3042 SUBTASKS HDFS-2185. HDFS portion of ZK-based FailoverController (todd) @@ -770,6 +799,9 @@ Release 2.0.2-alpha - 2012-09-07 HDFS-3833. TestDFSShell fails on windows due to concurrent file read/write. (Brandon Li via suresh) + HDFS-3902. TestDatanodeBlockScanner#testBlockCorruptionPolicy is broken. + (Andy Isaacson via eli) + Release 2.0.0-alpha - 05-23-2012 INCOMPATIBLE CHANGES Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/pom.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/pom.xml Wed Sep 19 04:34:55 2012 @@ -189,6 +189,12 @@ http://maven.apache.org/xsd/maven-4.0.0. ${startKdc} ${kdc.resource.dir} + + + listener + org.apache.hadoop.test.TimedOutTestsListener + + Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1383030-1387448 Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java Wed Sep 19 04:34:55 2012 @@ -17,6 +17,7 @@ */ package org.apache.hadoop.fs; +import org.apache.commons.codec.binary.Base64; import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; @@ -31,10 +32,10 @@ import org.apache.hadoop.classification. @InterfaceAudience.Public public class HdfsVolumeId implements VolumeId { - private final byte id; + private final byte[] id; private final boolean isValid; - public HdfsVolumeId(byte id, boolean isValid) { + public HdfsVolumeId(byte[] id, boolean isValid) { this.id = id; this.isValid = isValid; } @@ -69,6 +70,6 @@ public class HdfsVolumeId implements Vol @Override public String toString() { - return Byte.toString(id); + return Base64.encodeBase64String(id); } } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java Wed Sep 19 04:34:55 2012 @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs; import java.io.IOException; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -201,7 +202,7 @@ class BlockStorageLocationUtil { ArrayList l = new ArrayList(b.getLocations().length); // Start off all IDs as invalid, fill it in later with results from RPCs for (int i = 0; i < b.getLocations().length; i++) { - l.add(new HdfsVolumeId((byte)-1, false)); + l.add(new HdfsVolumeId(null, false)); } blockVolumeIds.put(b, l); } @@ -234,8 +235,8 @@ class BlockStorageLocationUtil { } // Get the VolumeId by indexing into the list of VolumeIds // provided by the datanode - HdfsVolumeId id = new HdfsVolumeId(metaVolumeIds.get(volumeIndex)[0], - true); + byte[] volumeId = metaVolumeIds.get(volumeIndex); + HdfsVolumeId id = new HdfsVolumeId(volumeId, true); // Find out which index we are in the LocatedBlock's replicas LocatedBlock locBlock = extBlockToLocBlock.get(extBlock); DatanodeInfo[] dnInfos = locBlock.getLocations(); @@ -255,8 +256,8 @@ class BlockStorageLocationUtil { } // Place VolumeId at the same index as the DN's index in the list of // replicas - List VolumeIds = blockVolumeIds.get(locBlock); - VolumeIds.set(index, id); + List volumeIds = blockVolumeIds.get(locBlock); + volumeIds.set(index, id); } } return blockVolumeIds; Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Wed Sep 19 04:34:55 2012 @@ -174,6 +174,13 @@ public class DFSConfigKeys extends Commo public static final boolean DFS_DATANODE_SYNCONCLOSE_DEFAULT = false; public static final String DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive"; public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000; + + // Whether to enable datanode's stale state detection and usage + public static final String DFS_NAMENODE_CHECK_STALE_DATANODE_KEY = "dfs.namenode.check.stale.datanode"; + public static final boolean DFS_NAMENODE_CHECK_STALE_DATANODE_DEFAULT = false; + // The default value of the time interval for marking datanodes as stale + public static final String DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY = "dfs.namenode.stale.datanode.interval"; + public static final long DFS_NAMENODE_STALE_DATANODE_INTERVAL_MILLI_DEFAULT = 30 * 1000; // 30s // Replication monitoring related keys public static final String DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION = Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Wed Sep 19 04:34:55 2012 @@ -128,6 +128,43 @@ public class DFSUtil { a.isDecommissioned() ? 1 : -1; } }; + + + /** + * Comparator for sorting DataNodeInfo[] based on decommissioned/stale states. + * Decommissioned/stale nodes are moved to the end of the array on sorting + * with this compartor. + */ + @InterfaceAudience.Private + public static class DecomStaleComparator implements Comparator { + private long staleInterval; + + /** + * Constructor of DecomStaleComparator + * + * @param interval + * The time invertal for marking datanodes as stale is passed from + * outside, since the interval may be changed dynamically + */ + public DecomStaleComparator(long interval) { + this.staleInterval = interval; + } + + @Override + public int compare(DatanodeInfo a, DatanodeInfo b) { + // Decommissioned nodes will still be moved to the end of the list + if (a.isDecommissioned()) { + return b.isDecommissioned() ? 0 : 1; + } else if (b.isDecommissioned()) { + return -1; + } + // Stale nodes will be moved behind the normal nodes + boolean aStale = a.isStale(staleInterval); + boolean bStale = b.isStale(staleInterval); + return aStale == bStale ? 0 : (aStale ? 1 : -1); + } + } + /** * Address matcher for matching an address to local address */ Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java Wed Sep 19 04:34:55 2012 @@ -105,4 +105,9 @@ public class HdfsConfiguration extends C deprecate("dfs.federation.nameservices", DFSConfigKeys.DFS_NAMESERVICES); deprecate("dfs.federation.nameservice.id", DFSConfigKeys.DFS_NAMESERVICE_ID); } + + public static void main(String[] args) { + init(); + Configuration.dumpDeprecatedKeys(); + } } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java Wed Sep 19 04:34:55 2012 @@ -36,8 +36,6 @@ import org.apache.hadoop.hdfs.protocol.C import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB; -import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolPB; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; @@ -67,6 +65,8 @@ import org.apache.hadoop.security.Securi import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; import org.apache.hadoop.tools.GetUserMappingsProtocol; +import org.apache.hadoop.tools.GetUserMappingsProtocolPB; +import org.apache.hadoop.tools.impl.pb.client.GetUserMappingsProtocolPBClientImpl; import com.google.common.base.Preconditions; @@ -218,7 +218,7 @@ public class NameNodeProxies { throws IOException { GetUserMappingsProtocolPB proxy = (GetUserMappingsProtocolPB) createNameNodeProxy(address, conf, ugi, GetUserMappingsProtocolPB.class, 0); - return new GetUserMappingsProtocolClientSideTranslatorPB(proxy); + return new GetUserMappingsProtocolPBClientImpl(proxy); } private static NamenodeProtocol createNNProxyWithNamenodeProtocol( Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java Wed Sep 19 04:34:55 2012 @@ -27,6 +27,7 @@ import org.apache.hadoop.net.NetworkTopo import org.apache.hadoop.net.Node; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; /** * This class extends the primary identifier of a Datanode with ephemeral @@ -321,7 +322,24 @@ public class DatanodeInfo extends Datano } return adminState; } - + + /** + * Check if the datanode is in stale state. Here if + * the namenode has not received heartbeat msg from a + * datanode for more than staleInterval (default value is + * {@link DFSConfigKeys#DFS_NAMENODE_STALE_DATANODE_INTERVAL_MILLI_DEFAULT}), + * the datanode will be treated as stale node. + * + * @param staleInterval + * the time interval for marking the node as stale. If the last + * update time is beyond the given time interval, the node will be + * marked as stale. + * @return true if the node is stale + */ + public boolean isStale(long staleInterval) { + return (Time.now() - lastUpdate) >= staleInterval; + } + /** * Sets the admin state of this node. */ Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java Wed Sep 19 04:34:55 2012 @@ -30,6 +30,8 @@ import org.apache.hadoop.hdfs.protocol.p import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; +import com.google.protobuf.TextFormat; + /** Pipeline Acknowledgment **/ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -120,6 +122,6 @@ public class PipelineAck { @Override //Object public String toString() { - return proto.toString(); + return TextFormat.shortDebugString(proto); } } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Wed Sep 19 04:34:55 2012 @@ -363,11 +363,10 @@ public class BlockManager { replicationThread.join(3000); } } catch (InterruptedException ie) { - } finally { - if (pendingReplications != null) pendingReplications.stop(); - blocksMap.close(); - datanodeManager.close(); } + datanodeManager.close(); + pendingReplications.stop(); + blocksMap.close(); } /** @return the datanodeManager */ Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java Wed Sep 19 04:34:55 2012 @@ -94,7 +94,7 @@ class BlocksMap { } void close() { - blocks = null; + // Empty blocks once GSet#clear is implemented (HDFS-3940) } BlockCollection getBlockCollection(Block b) { Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Wed Sep 19 04:34:55 2012 @@ -25,6 +25,7 @@ import java.net.InetAddress; import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Comparator; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -132,6 +133,11 @@ public class DatanodeManager { */ private boolean hasClusterEverBeenMultiRack = false; + /** Whether or not to check the stale datanodes */ + private volatile boolean checkForStaleNodes; + /** The time interval for detecting stale datanodes */ + private volatile long staleInterval; + DatanodeManager(final BlockManager blockManager, final Namesystem namesystem, final Configuration conf ) throws IOException { @@ -175,6 +181,21 @@ public class DatanodeManager { DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit); LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY + "=" + this.blockInvalidateLimit); + // set the value of stale interval based on configuration + this.checkForStaleNodes = conf.getBoolean( + DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, + DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_DEFAULT); + if (this.checkForStaleNodes) { + this.staleInterval = conf.getLong( + DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, + DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_MILLI_DEFAULT); + if (this.staleInterval < DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_MILLI_DEFAULT) { + LOG.warn("The given interval for marking stale datanode = " + + this.staleInterval + ", which is smaller than the default value " + + DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_MILLI_DEFAULT + + "."); + } + } } private Daemon decommissionthread = null; @@ -192,7 +213,13 @@ public class DatanodeManager { } void close() { - if (decommissionthread != null) decommissionthread.interrupt(); + if (decommissionthread != null) { + decommissionthread.interrupt(); + try { + decommissionthread.join(3000); + } catch (InterruptedException e) { + } + } heartbeatManager.close(); } @@ -225,14 +252,17 @@ public class DatanodeManager { if (rName != null) client = new NodeBase(rName + NodeBase.PATH_SEPARATOR_STR + targethost); } + + Comparator comparator = checkForStaleNodes ? + new DFSUtil.DecomStaleComparator(staleInterval) : + DFSUtil.DECOM_COMPARATOR; for (LocatedBlock b : locatedblocks) { networktopology.pseudoSortByDistance(client, b.getLocations()); - - // Move decommissioned datanodes to the bottom - Arrays.sort(b.getLocations(), DFSUtil.DECOM_COMPARATOR); + // Move decommissioned/stale datanodes to the bottom + Arrays.sort(b.getLocations(), comparator); } } - + CyclicIteration getDatanodeCyclicIteration( final String firstkey) { return new CyclicIteration( Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java Wed Sep 19 04:34:55 2012 @@ -74,6 +74,11 @@ class HeartbeatManager implements Datano void close() { heartbeatThread.interrupt(); + try { + // This will no effect if the thread hasn't yet been started. + heartbeatThread.join(3000); + } catch (InterruptedException e) { + } } synchronized int getLiveDatanodeCount() { Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java Wed Sep 19 04:34:55 2012 @@ -374,7 +374,8 @@ class BlockPoolSliceScanner { throttler.setBandwidth(Math.min(bw, MAX_SCAN_RATE)); } - private void verifyBlock(ExtendedBlock block) { + @VisibleForTesting + void verifyBlock(ExtendedBlock block) { BlockSender blockSender = null; /* In case of failure, attempt to read second time to reduce Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java Wed Sep 19 04:34:55 2012 @@ -172,7 +172,8 @@ public class DataBlockScanner implements return blockPoolScannerMap.size(); } - private synchronized BlockPoolSliceScanner getBPScanner(String bpid) { + @VisibleForTesting + synchronized BlockPoolSliceScanner getBPScanner(String bpid) { return blockPoolScannerMap.get(bpid); } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Wed Sep 19 04:34:55 2012 @@ -277,7 +277,7 @@ public class DataNode extends Configured private AbstractList dataDirs; private Configuration conf; - private final String userWithLocalPathAccess; + private final List usersWithLocalPathAccess; private boolean connectToDnViaHostname; ReadaheadPool readaheadPool; private final boolean getHdfsBlockLocationsEnabled; @@ -300,8 +300,8 @@ public class DataNode extends Configured final SecureResources resources) throws IOException { super(conf); - this.userWithLocalPathAccess = - conf.get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY); + this.usersWithLocalPathAccess = Arrays.asList( + conf.getTrimmedStrings(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY)); this.connectToDnViaHostname = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT); @@ -1012,7 +1012,7 @@ public class DataNode extends Configured private void checkBlockLocalPathAccess() throws IOException { checkKerberosAuthMethod("getBlockLocalPathInfo()"); String currentUser = UserGroupInformation.getCurrentUser().getShortUserName(); - if (!currentUser.equals(this.userWithLocalPathAccess)) { + if (!usersWithLocalPathAccess.contains(currentUser)) { throw new AccessControlException( "Can't continue with getBlockLocalPathInfo() " + "authorization. The user " + currentUser Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java Wed Sep 19 04:34:55 2012 @@ -24,6 +24,7 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.RandomAccessFile; +import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.ArrayList; import java.util.Collection; @@ -1676,10 +1677,10 @@ class FsDatasetImpl implements FsDataset List blocksVolumeIds = new ArrayList(volumes.volumes.size()); // List of indexes into the list of VolumeIds, pointing at the VolumeId of // the volume that the block is on - List blocksVolumendexes = new ArrayList(blocks.size()); + List blocksVolumeIndexes = new ArrayList(blocks.size()); // Initialize the list of VolumeIds simply by enumerating the volumes for (int i = 0; i < volumes.volumes.size(); i++) { - blocksVolumeIds.add(new byte[] { (byte) i }); + blocksVolumeIds.add(ByteBuffer.allocate(4).putInt(i).array()); } // Determine the index of the VolumeId of each block's volume, by comparing // the block's volume against the enumerated volumes @@ -1700,10 +1701,10 @@ class FsDatasetImpl implements FsDataset if (!isValid) { volumeIndex = Integer.MAX_VALUE; } - blocksVolumendexes.add(volumeIndex); + blocksVolumeIndexes.add(volumeIndex); } return new HdfsBlocksMetadata(blocks.toArray(new ExtendedBlock[] {}), - blocksVolumeIds, blocksVolumendexes); + blocksVolumeIds, blocksVolumeIndexes); } @Override Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Sep 19 04:34:55 2012 @@ -1173,6 +1173,14 @@ public class FSNamesystem implements Nam if (blocks != null) { blockManager.getDatanodeManager().sortLocatedBlocks( clientMachine, blocks.getLocatedBlocks()); + + LocatedBlock lastBlock = blocks.getLastLocatedBlock(); + if (lastBlock != null) { + ArrayList lastBlockList = new ArrayList(); + lastBlockList.add(lastBlock); + blockManager.getDatanodeManager().sortLocatedBlocks( + clientMachine, lastBlockList); + } } return blocks; } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java Wed Sep 19 04:34:55 2012 @@ -71,7 +71,7 @@ public class FileChecksumServlets { String tokenString = ugi.getTokens().iterator().next().encodeToUrlString(); dtParam = JspHelper.getDelegationTokenUrlParam(tokenString); } - String addr = NetUtils.getHostPortString(nn.getNameNodeAddress()); + String addr = nn.getNameNodeAddressHostPortString(); String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr); return new URL(scheme, hostname, port, Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java Wed Sep 19 04:34:55 2012 @@ -34,7 +34,6 @@ import org.apache.hadoop.hdfs.protocol.D import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.common.JspHelper; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ServletUtil; @@ -74,7 +73,7 @@ public class FileDataServlet extends Dfs // Add namenode address to the url params NameNode nn = NameNodeHttpServer.getNameNodeFromContext( getServletContext()); - String addr = NetUtils.getHostPortString(nn.getNameNodeAddress()); + String addr = nn.getNameNodeAddressHostPortString(); String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr); return new URL(scheme, hostname, port, Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Wed Sep 19 04:34:55 2012 @@ -35,7 +35,6 @@ import org.apache.hadoop.ha.HAServicePro import org.apache.hadoop.ha.HAServiceStatus; import org.apache.hadoop.ha.HealthCheckFailedException; import org.apache.hadoop.ha.ServiceFailedException; -import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Trash; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; @@ -489,9 +488,9 @@ public class NameNode { LOG.warn("ServicePlugin " + p + " could not be started", t); } } - LOG.info(getRole() + " up at: " + rpcServer.getRpcAddress()); + LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress()); if (rpcServer.getServiceRpcAddress() != null) { - LOG.info(getRole() + " service server is up at: " + LOG.info(getRole() + " service RPC up at: " + rpcServer.getServiceRpcAddress()); } } @@ -617,7 +616,7 @@ public class NameNode { */ public void join() { try { - this.rpcServer.join(); + rpcServer.join(); } catch (InterruptedException ie) { LOG.info("Caught interrupted exception ", ie); } @@ -665,27 +664,31 @@ public class NameNode { } /** - * Returns the address on which the NameNodes is listening to. - * @return namenode rpc address + * @return NameNode RPC address */ public InetSocketAddress getNameNodeAddress() { return rpcServer.getRpcAddress(); } - + + /** + * @return NameNode RPC address in "host:port" string form + */ + public String getNameNodeAddressHostPortString() { + return NetUtils.getHostPortString(rpcServer.getRpcAddress()); + } + /** - * Returns namenode service rpc address, if set. Otherwise returns - * namenode rpc address. - * @return namenode service rpc address used by datanodes + * @return NameNode service RPC address if configured, the + * NameNode RPC address otherwise */ public InetSocketAddress getServiceRpcAddress() { - return rpcServer.getServiceRpcAddress() != null ? rpcServer.getServiceRpcAddress() : rpcServer.getRpcAddress(); + final InetSocketAddress serviceAddr = rpcServer.getServiceRpcAddress(); + return serviceAddr == null ? rpcServer.getRpcAddress() : serviceAddr; } /** - * Returns the address of the NameNodes http server, - * which is used to access the name-node web UI. - * - * @return the http address. + * @return NameNode HTTP address, used by the Web UI, image transfer, + * and HTTP-based file system clients like Hftp and WebHDFS */ public InetSocketAddress getHttpAddress() { return httpServer.getHttpAddress(); @@ -1171,10 +1174,12 @@ public class NameNode { NAMESERVICE_SPECIFIC_KEYS); } + // If the RPC address is set use it to (re-)configure the default FS if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) { URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY)); conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString()); + LOG.info("Setting " + FS_DEFAULT_NAME_KEY + " to " + defaultUri.toString()); } } @@ -1196,8 +1201,9 @@ public class NameNode { try { StringUtils.startupShutdownMessage(NameNode.class, argv, LOG); NameNode namenode = createNameNode(argv, null); - if (namenode != null) + if (namenode != null) { namenode.join(); + } } catch (Throwable e) { LOG.fatal("Exception in namenode join", e); terminate(1, e); Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java Wed Sep 19 04:34:55 2012 @@ -49,12 +49,9 @@ public class NameNodeHttpServer { private final Configuration conf; private final NameNode nn; - private final Log LOG = NameNode.LOG; private InetSocketAddress httpAddress; - private InetSocketAddress bindAddress; - public static final String NAMENODE_ADDRESS_ATTRIBUTE_KEY = "name.node.address"; public static final String FSIMAGE_ATTRIBUTE_KEY = "name.system.image"; protected static final String NAMENODE_ATTRIBUTE_KEY = "name.node"; @@ -68,12 +65,6 @@ public class NameNodeHttpServer { this.bindAddress = bindAddress; } - private String getDefaultServerPrincipal() throws IOException { - return SecurityUtil.getServerPrincipal( - conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), - nn.getNameNodeAddress().getHostName()); - } - public void start() throws IOException { final String infoHost = bindAddress.getHostName(); int infoPort = bindAddress.getPort(); Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Wed Sep 19 04:34:55 2012 @@ -66,7 +66,6 @@ import org.apache.hadoop.hdfs.protocol.U import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService; -import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService; import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService; import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshAuthorizationPolicyProtocolService; import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService; @@ -74,8 +73,6 @@ import org.apache.hadoop.hdfs.protocolPB import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB; -import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolPB; -import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolPB; @@ -119,6 +116,9 @@ import org.apache.hadoop.security.author import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.tools.GetUserMappingsProtocolPB; +import org.apache.hadoop.tools.impl.pb.service.GetUserMappingsProtocolPBServiceImpl; +import org.apache.hadoop.tools.proto.GetUserMappingsProtocol.GetUserMappingsProtocolService; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.VersionUtil; @@ -159,10 +159,11 @@ class NameNodeRpcServer implements Namen int handlerCount = conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY, DFS_NAMENODE_HANDLER_COUNT_DEFAULT); - InetSocketAddress socAddr = nn.getRpcServerAddress(conf); - RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, - ProtobufRpcEngine.class); - ClientNamenodeProtocolServerSideTranslatorPB + + RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, + ProtobufRpcEngine.class); + + ClientNamenodeProtocolServerSideTranslatorPB clientProtocolServerTranslator = new ClientNamenodeProtocolServerSideTranslatorPB(this); BlockingService clientNNPbService = ClientNamenodeProtocol. @@ -188,8 +189,8 @@ class NameNodeRpcServer implements Namen BlockingService refreshUserMappingService = RefreshUserMappingsProtocolService .newReflectiveBlockingService(refreshUserMappingXlator); - GetUserMappingsProtocolServerSideTranslatorPB getUserMappingXlator = - new GetUserMappingsProtocolServerSideTranslatorPB(this); + GetUserMappingsProtocolPBServiceImpl getUserMappingXlator = + new GetUserMappingsProtocolPBServiceImpl(this); BlockingService getUserMappingService = GetUserMappingsProtocolService .newReflectiveBlockingService(getUserMappingXlator); @@ -199,22 +200,24 @@ class NameNodeRpcServer implements Namen .newReflectiveBlockingService(haServiceProtocolXlator); WritableRpcEngine.ensureInitialized(); - - InetSocketAddress dnSocketAddr = nn.getServiceRpcServerAddress(conf); - if (dnSocketAddr != null) { + + InetSocketAddress serviceRpcAddr = nn.getServiceRpcServerAddress(conf); + if (serviceRpcAddr != null) { int serviceHandlerCount = conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY, DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT); - // Add all the RPC protocols that the namenode implements - this.serviceRpcServer = new RPC.Builder(conf) + serviceRpcServer = new RPC.Builder(conf) .setProtocol( org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class) .setInstance(clientNNPbService) - .setBindAddress(dnSocketAddr.getHostName()) - .setPort(dnSocketAddr.getPort()).setNumHandlers(serviceHandlerCount) + .setBindAddress(serviceRpcAddr.getHostName()) + .setPort(serviceRpcAddr.getPort()) + .setNumHandlers(serviceHandlerCount) .setVerbose(false) .setSecretManager(namesystem.getDelegationTokenSecretManager()) .build(); + + // Add all the RPC protocols that the namenode implements DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService, serviceRpcServer); DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService, @@ -228,20 +231,26 @@ class NameNodeRpcServer implements Namen DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, getUserMappingService, serviceRpcServer); - this.serviceRPCAddress = this.serviceRpcServer.getListenerAddress(); + serviceRPCAddress = serviceRpcServer.getListenerAddress(); nn.setRpcServiceServerAddress(conf, serviceRPCAddress); } else { serviceRpcServer = null; serviceRPCAddress = null; } - // Add all the RPC protocols that the namenode implements - this.clientRpcServer = new RPC.Builder(conf) + + InetSocketAddress rpcAddr = nn.getRpcServerAddress(conf); + clientRpcServer = new RPC.Builder(conf) .setProtocol( org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class) - .setInstance(clientNNPbService).setBindAddress(socAddr.getHostName()) - .setPort(socAddr.getPort()).setNumHandlers(handlerCount) + .setInstance(clientNNPbService) + .setBindAddress(rpcAddr.getHostName()) + .setPort(rpcAddr.getPort()) + .setNumHandlers(handlerCount) .setVerbose(false) - .setSecretManager(namesystem.getDelegationTokenSecretManager()).build(); + .setSecretManager(namesystem.getDelegationTokenSecretManager()) + .build(); + + // Add all the RPC protocols that the namenode implements DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService, clientRpcServer); DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService, @@ -259,44 +268,51 @@ class NameNodeRpcServer implements Namen if (serviceAuthEnabled = conf.getBoolean( CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { - this.clientRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); - if (this.serviceRpcServer != null) { - this.serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); + clientRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); + if (serviceRpcServer != null) { + serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); } } // The rpc-server port can be ephemeral... ensure we have the correct info - this.clientRpcAddress = this.clientRpcServer.getListenerAddress(); + clientRpcAddress = clientRpcServer.getListenerAddress(); nn.setRpcServerAddress(conf, clientRpcAddress); - this.minimumDataNodeVersion = conf.get( + minimumDataNodeVersion = conf.get( DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY, DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT); // Set terse exception whose stack trace won't be logged - this.clientRpcServer.addTerseExceptions(SafeModeException.class); + clientRpcServer.addTerseExceptions(SafeModeException.class); } /** - * Actually start serving requests. + * Start client and service RPC servers. */ void start() { - clientRpcServer.start(); //start RPC server + clientRpcServer.start(); if (serviceRpcServer != null) { serviceRpcServer.start(); } } /** - * Wait until the RPC server has shut down. + * Wait until the client RPC server has shutdown. */ void join() throws InterruptedException { - this.clientRpcServer.join(); + clientRpcServer.join(); } - + + /** + * Stop client and service RPC servers. + */ void stop() { - if(clientRpcServer != null) clientRpcServer.stop(); - if(serviceRpcServer != null) serviceRpcServer.stop(); + if (clientRpcServer != null) { + clientRpcServer.stop(); + } + if (serviceRpcServer != null) { + serviceRpcServer.stop(); + } } InetSocketAddress getServiceRpcAddress() { @@ -333,8 +349,9 @@ class NameNodeRpcServer implements Namen namesystem.checkOperation(OperationCategory.UNCHECKED); verifyRequest(registration); LOG.info("Error report from " + registration + ": " + msg); - if(errorCode == FATAL) + if (errorCode == FATAL) { namesystem.releaseBackupNode(registration); + } } @Override // NamenodeProtocol Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Wed Sep 19 04:34:55 2012 @@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.io.Text; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -444,7 +443,7 @@ class NamenodeJspHelper { nodeToRedirect = nn.getHttpAddress().getHostName(); redirectPort = nn.getHttpAddress().getPort(); } - String addr = NetUtils.getHostPortString(nn.getNameNodeAddress()); + String addr = nn.getNameNodeAddressHostPortString(); String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName(); redirectLocation = HttpConfig.getSchemePrefix() + fqdn + ":" + redirectPort + "/browseDirectory.jsp?namenodeInfoPort=" @@ -615,8 +614,9 @@ class NamenodeJspHelper { final List dead = new ArrayList(); dm.fetchDatanodes(live, dead, true); - InetSocketAddress nnSocketAddress = (InetSocketAddress) context - .getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY); + InetSocketAddress nnSocketAddress = + (InetSocketAddress)context.getAttribute( + NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY); String nnaddr = nnSocketAddress.getAddress().getHostAddress() + ":" + nnSocketAddress.getPort(); Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1383030-1387448 Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Wed Sep 19 04:34:55 2012 @@ -966,6 +966,30 @@ + dfs.namenode.check.stale.datanode + false + + Indicate whether or not to check "stale" datanodes whose + heartbeat messages have not been received by the namenode + for more than a specified time interval. If this configuration + parameter is set as true, the stale datanodes will be moved to + the end of the target node list for reading. The writing will + also try to avoid stale nodes. + + + + + dfs.namenode.stale.datanode.interval + 30000 + + Default time interval for marking a datanode as "stale", i.e., if + the namenode has not received heartbeat msg from a datanode for + more than this time interval, the datanode will be marked and treated + as "stale" by default. + + + + dfs.namenode.invalidate.work.pct.per.iteration 0.32f Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1383030-1387448 Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1383030-1387448 Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/corrupt_files.jsp URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/corrupt_files.jsp?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/corrupt_files.jsp (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/corrupt_files.jsp Wed Sep 19 04:34:55 2012 @@ -34,8 +34,7 @@ HAServiceState nnHAState = nn.getServiceState(); boolean isActive = (nnHAState == HAServiceState.ACTIVE); String namenodeRole = nn.getRole().toString(); - String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" - + nn.getNameNodeAddress().getPort(); + String namenodeLabel = nn.getNameNodeAddressHostPortString(); Collection corruptFileBlocks = fsn.listCorruptFileBlocks("/", null); int corruptFileCount = corruptFileBlocks.size(); Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp Wed Sep 19 04:34:55 2012 @@ -34,7 +34,7 @@ boolean isActive = (nnHAState == HAServiceState.ACTIVE); String namenodeRole = nn.getRole().toString(); String namenodeState = nnHAState.toString(); - String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort(); + String namenodeLabel = nn.getNameNodeAddressHostPortString(); %> Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfsnodelist.jsp URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfsnodelist.jsp?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfsnodelist.jsp (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfsnodelist.jsp Wed Sep 19 04:34:55 2012 @@ -33,7 +33,7 @@ String namenodeRole = nn.getRole().toStr FSNamesystem fsn = nn.getNamesystem(); HAServiceState nnHAState = nn.getServiceState(); boolean isActive = (nnHAState == HAServiceState.ACTIVE); -String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort(); +String namenodeLabel = nn.getNameNodeAddressHostPortString(); %> Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1383030-1387448 Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1383030-1387448 Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Wed Sep 19 04:34:55 2012 @@ -274,7 +274,7 @@ public class DFSTestUtil { * specified target. */ public void waitReplication(FileSystem fs, String topdir, short value) - throws IOException { + throws IOException, InterruptedException, TimeoutException { Path root = new Path(topdir); /** wait for the replication factor to settle down */ @@ -499,36 +499,44 @@ public class DFSTestUtil { return fileNames; } } - - /** wait for the file's replication to be done */ - public static void waitReplication(FileSystem fs, Path fileName, - short replFactor) throws IOException { - boolean good; + + /** + * Wait for the given file to reach the given replication factor. + * @throws TimeoutException if we fail to sufficiently replicate the file + */ + public static void waitReplication(FileSystem fs, Path fileName, short replFactor) + throws IOException, InterruptedException, TimeoutException { + boolean correctReplFactor; + final int ATTEMPTS = 20; + int count = 0; + do { - good = true; + correctReplFactor = true; BlockLocation locs[] = fs.getFileBlockLocations( fs.getFileStatus(fileName), 0, Long.MAX_VALUE); + count++; for (int j = 0; j < locs.length; j++) { String[] hostnames = locs[j].getNames(); if (hostnames.length != replFactor) { - String hostNameList = ""; - for (String h : hostnames) hostNameList += h + " "; - System.out.println("Block " + j + " of file " + fileName - + " has replication factor " + hostnames.length + "; locations " - + hostNameList); - good = false; - try { - System.out.println("Waiting for replication factor to drain"); - Thread.sleep(100); - } catch (InterruptedException e) {} + correctReplFactor = false; + System.out.println("Block " + j + " of file " + fileName + + " has replication factor " + hostnames.length + + " (desired " + replFactor + "); locations " + + Joiner.on(' ').join(hostnames)); + Thread.sleep(1000); break; } } - if (good) { + if (correctReplFactor) { System.out.println("All blocks of file " + fileName + " verified to have replication factor " + replFactor); } - } while(!good); + } while (!correctReplFactor && count < ATTEMPTS); + + if (count == ATTEMPTS) { + throw new TimeoutException("Timed out waiting for " + fileName + + " to reach " + replFactor + " replicas"); + } } /** delete directory and everything underneath it.*/ Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Wed Sep 19 04:34:55 2012 @@ -857,8 +857,8 @@ public class MiniDFSCluster { // After the NN has started, set back the bound ports into // the conf conf.set(DFSUtil.addKeySuffixes( - DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId), NetUtils - .getHostPortString(nn.getNameNodeAddress())); + DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId), + nn.getNameNodeAddressHostPortString()); conf.set(DFSUtil.addKeySuffixes( DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId, nnId), NetUtils .getHostPortString(nn.getHttpAddress())); @@ -880,8 +880,8 @@ public class MiniDFSCluster { * @return URI of the given namenode in MiniDFSCluster */ public URI getURI(int nnIndex) { - InetSocketAddress addr = nameNodes[nnIndex].nameNode.getNameNodeAddress(); - String hostPort = NetUtils.getHostPortString(addr); + String hostPort = + nameNodes[nnIndex].nameNode.getNameNodeAddressHostPortString(); URI uri = null; try { uri = new URI("hdfs://" + hostPort); @@ -918,7 +918,8 @@ public class MiniDFSCluster { /** * wait for the cluster to get out of safemode. */ - public void waitClusterUp() { + public void waitClusterUp() throws IOException { + int i = 0; if (numDataNodes > 0) { while (!isClusterUp()) { try { @@ -926,6 +927,9 @@ public class MiniDFSCluster { Thread.sleep(1000); } catch (InterruptedException e) { } + if (++i > 10) { + throw new IOException("Timed out waiting for Mini HDFS Cluster to start"); + } } } } @@ -1354,6 +1358,7 @@ public class MiniDFSCluster { if (ExitUtil.terminateCalled()) { LOG.fatal("Test resulted in an unexpected exit", ExitUtil.getFirstExitException()); + ExitUtil.resetFirstExitException(); throw new AssertionError("Test resulted in an unexpected exit"); } } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java Wed Sep 19 04:34:55 2012 @@ -61,7 +61,7 @@ public class TestBlockReaderLocal { * of this class might immediately issue a retry on failure, so it's polite. */ @Test - public void testStablePositionAfterCorruptRead() throws IOException { + public void testStablePositionAfterCorruptRead() throws Exception { final short REPL_FACTOR = 1; final long FILE_LENGTH = 512L; cluster.waitActive(); Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java Wed Sep 19 04:34:55 2012 @@ -24,6 +24,7 @@ import java.io.IOException; import java.io.PrintStream; import java.io.RandomAccessFile; import java.util.Random; +import java.util.concurrent.TimeoutException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -199,11 +200,11 @@ public class TestClientReportBadBlock { } /** - * create a file with one block and corrupt some/all of the block replicas. + * Create a file with one block and corrupt some/all of the block replicas. */ private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl, int corruptBlockCount) throws IOException, AccessControlException, - FileNotFoundException, UnresolvedLinkException { + FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException { DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0); DFSTestUtil.waitReplication(dfs, filePath, repl); // Locate the file blocks by asking name node Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Wed Sep 19 04:34:55 2012 @@ -789,8 +789,7 @@ public class TestDFSClientRetries { * way. See HDFS-3067. */ @Test - public void testRetryOnChecksumFailure() - throws UnresolvedLinkException, IOException { + public void testRetryOnChecksumFailure() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); @@ -831,7 +830,7 @@ public class TestDFSClientRetries { } /** Test client retry with namenode restarting. */ - @Test + @Test(timeout=300000) public void testNamenodeRestart() throws Exception { namenodeRestartTest(new Configuration(), false); } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java Wed Sep 19 04:34:55 2012 @@ -34,14 +34,19 @@ import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Time; +import org.apache.log4j.Level; import org.junit.Test; /** @@ -59,6 +64,10 @@ public class TestDatanodeBlockScanner { private static Pattern pattern_blockVerify = Pattern.compile(".*?(SCAN_PERIOD)\\s*:\\s*(\\d+.*?)"); + + static { + ((Log4JLogger)FSNamesystem.auditLog).getLogger().setLevel(Level.WARN); + } /** * This connects to datanode and fetches block verification data. * It repeats this until the given block has a verification time > newTime. @@ -173,7 +182,7 @@ public class TestDatanodeBlockScanner { } @Test - public void testBlockCorruptionPolicy() throws IOException { + public void testBlockCorruptionPolicy() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L); Random random = new Random(); @@ -206,12 +215,12 @@ public class TestDatanodeBlockScanner { assertTrue(MiniDFSCluster.corruptReplica(1, block)); assertTrue(MiniDFSCluster.corruptReplica(2, block)); - // Read the file to trigger reportBadBlocks by client - try { - IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), - conf, true); - } catch (IOException e) { - // Ignore exception + // Trigger each of the DNs to scan this block immediately. + // The block pool scanner doesn't run frequently enough on its own + // to notice these, and due to HDFS-1371, the client won't report + // bad blocks to the NN when all replicas are bad. + for (DataNode dn : cluster.getDataNodes()) { + DataNodeTestUtils.runBlockScannerForBlock(dn, block); } // We now have the blocks to be marked as corrupt and we get back all