Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id ECAE6102EB for ; Fri, 11 Jul 2014 03:06:38 +0000 (UTC) Received: (qmail 79754 invoked by uid 500); 11 Jul 2014 03:06:38 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 79696 invoked by uid 500); 11 Jul 2014 03:06:38 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 79685 invoked by uid 99); 11 Jul 2014 03:06:38 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 11 Jul 2014 03:06:38 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 11 Jul 2014 03:06:38 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 2D866238890D; Fri, 11 Jul 2014 03:06:12 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1609618 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/bin/ src/main/java/org/apache/hadoop/hdfs/server/datanode/ src/site/apt/ src/test/java/org/apache/hadoop/hdfs/server/datanode/ Date: Fri, 11 Jul 2014 03:06:11 -0000 To: hdfs-commits@hadoop.apache.org From: cnauroth@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140711030612.2D866238890D@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: cnauroth Date: Fri Jul 11 03:06:11 2014 New Revision: 1609618 URL: http://svn.apache.org/r1609618 Log: HDFS-5202. Support Centralized Cache Management on Windows. Contributed by Chris Nauroth. Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1609618&r1=1609617&r2=1609618&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Jul 11 03:06:11 2014 @@ -284,6 +284,8 @@ Release 2.6.0 - UNRELEASED HDFS-6640. Syntax for MKDIRS, CREATESYMLINK, and SETXATTR are given wrongly in WebHdfs document (missed webhdfs/v1). (Stephen Chu via jing9) + HDFS-5202. Support Centralized Cache Management on Windows. (cnauroth) + OPTIMIZATIONS BUG FIXES Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd?rev=1609618&r1=1609617&r2=1609618&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd Fri Jul 11 03:06:11 2014 @@ -47,7 +47,7 @@ if "%1" == "--config" ( goto print_usage ) - set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir + set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin for %%i in ( %hdfscommands% ) do ( if %hdfs-command% == %%i set hdfscommand=true ) @@ -146,6 +146,10 @@ goto :eof set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir goto :eof +:cacheadmin + set CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin + goto :eof + @rem This changes %1, %2 etc. Hence those cannot be used after calling this. :make_command_arguments if "%1" == "--config" ( @@ -193,6 +197,7 @@ goto :eof @echo current directory contents with a snapshot @echo lsSnapshottableDir list all snapshottable dirs owned by the current user @echo Use -help to see options + @echo cacheadmin configure the HDFS cache @echo. @echo Most commands print help when invoked w/o parameters. Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1609618&r1=1609617&r2=1609618&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri Jul 11 03:06:11 2014 @@ -745,15 +745,19 @@ public class DataNode extends Configured " size (%s) is greater than zero and native code is not available.", DFS_DATANODE_MAX_LOCKED_MEMORY_KEY)); } - long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit(); - if (dnConf.maxLockedMemory > ulimit) { - throw new RuntimeException(String.format( - "Cannot start datanode because the configured max locked memory" + - " size (%s) of %d bytes is more than the datanode's available" + - " RLIMIT_MEMLOCK ulimit of %d bytes.", - DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, - dnConf.maxLockedMemory, - ulimit)); + if (Path.WINDOWS) { + NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory); + } else { + long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit(); + if (dnConf.maxLockedMemory > ulimit) { + throw new RuntimeException(String.format( + "Cannot start datanode because the configured max locked memory" + + " size (%s) of %d bytes is more than the datanode's available" + + " RLIMIT_MEMLOCK ulimit of %d bytes.", + DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, + dnConf.maxLockedMemory, + ulimit)); + } } } LOG.info("Starting DataNode with maxLockedMemory = " + Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm?rev=1609618&r1=1609617&r2=1609618&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm Fri Jul 11 03:06:11 2014 @@ -270,7 +270,7 @@ Centralized Cache Management in HDFS ** {Native Libraries} In order to lock block files into memory, the DataNode relies on native JNI - code found in <<>>. Be sure to + code found in <<>> or <<>> on Windows. Be sure to {{{../hadoop-common/NativeLibraries.html}enable JNI}} if you are using HDFS centralized cache management. @@ -283,11 +283,11 @@ Centralized Cache Management in HDFS * dfs.datanode.max.locked.memory This determines the maximum amount of memory a DataNode will use for caching. - The "locked-in-memory size" ulimit (<<>>) of the DataNode user - also needs to be increased to match this parameter (see below section on - {{OS Limits}}). When setting this value, please remember that you will need - space in memory for other things as well, such as the DataNode and - application JVM heaps and the operating system page cache. + On Unix-like systems, the "locked-in-memory size" ulimit (<<>>) of + the DataNode user also needs to be increased to match this parameter (see + below section on {{OS Limits}}). When setting this value, please remember + that you will need space in memory for other things as well, such as the + DataNode and application JVM heaps and the operating system page cache. *** Optional @@ -339,3 +339,6 @@ Centralized Cache Management in HDFS "unlimited," indicating that there is no limit. Note that it's typical for <<>> to output the memory lock limit in KB, but dfs.datanode.max.locked.memory must be specified in bytes. + + This information does not apply to deployments on Windows. Windows has no + direct equivalent of <<>>. Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java?rev=1609618&r1=1609617&r2=1609618&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java Fri Jul 11 03:06:11 2014 @@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.da import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeTrue; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.anyLong; @@ -68,6 +67,7 @@ import org.apache.hadoop.hdfs.server.pro import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator; import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator; @@ -114,7 +114,6 @@ public class TestFsDatasetCache { @Before public void setUp() throws Exception { - assumeTrue(!Path.WINDOWS); conf = new HdfsConfiguration(); conf.setLong( DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100); @@ -143,6 +142,9 @@ public class TestFsDatasetCache { @After public void tearDown() throws Exception { + // Verify that each test uncached whatever it cached. This cleanup is + // required so that file descriptors are not leaked across tests. + DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd); if (fs != null) { fs.close(); } @@ -205,9 +207,16 @@ public class TestFsDatasetCache { String bpid = loc.getLocatedBlock().getBlock().getBlockPoolId(); Block block = loc.getLocatedBlock().getBlock().getLocalBlock(); ExtendedBlock extBlock = new ExtendedBlock(bpid, block); - FileChannel blockChannel = - ((FileInputStream)fsd.getBlockInputStream(extBlock, 0)).getChannel(); - sizes[i] = blockChannel.size(); + FileInputStream blockInputStream = null; + FileChannel blockChannel = null; + try { + blockInputStream = + (FileInputStream)fsd.getBlockInputStream(extBlock, 0); + blockChannel = blockInputStream.getChannel(); + sizes[i] = blockChannel.size(); + } finally { + IOUtils.cleanup(LOG, blockChannel, blockInputStream); + } } return sizes; } @@ -571,5 +580,7 @@ public class TestFsDatasetCache { return true; } }, 1000, 30000); + + dfs.removeCacheDirective(shortCacheDirectiveId); } }