Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id EF3EC10752 for ; Wed, 20 Nov 2013 00:48:28 +0000 (UTC) Received: (qmail 16523 invoked by uid 500); 20 Nov 2013 00:48:28 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 16483 invoked by uid 500); 20 Nov 2013 00:48:28 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 16472 invoked by uid 99); 20 Nov 2013 00:48:28 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 20 Nov 2013 00:48:28 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 20 Nov 2013 00:48:26 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 275A923889D5; Wed, 20 Nov 2013 00:48:06 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1543676 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/datanode/ src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ src/test/java/org/apache/hadoop/hdfs/ src/test... Date: Wed, 20 Nov 2013 00:48:05 -0000 To: hdfs-commits@hadoop.apache.org From: cmccabe@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20131120004806.275A923889D5@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: cmccabe Date: Wed Nov 20 00:48:05 2013 New Revision: 1543676 URL: http://svn.apache.org/r1543676 Log: HDFS-5511. improve CacheManipulator interface to allow better unit testing (cmccabe) Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1543676&r1=1543675&r2=1543676&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Nov 20 00:48:05 2013 @@ -199,6 +199,9 @@ Trunk (Unreleased) HDFS-5366. recaching improvements (cmccabe) + HDFS-5511. improve CacheManipulator interface to allow better unit testing + (cmccabe) + OPTIMIZATIONS HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe) Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1543676&r1=1543675&r2=1543676&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Wed Nov 20 00:48:05 2013 @@ -657,8 +657,9 @@ class BlockReceiver implements Closeable // long dropPos = lastCacheManagementOffset - CACHE_DROP_LAG_BYTES; if (dropPos > 0 && dropCacheBehindWrites) { - NativeIO.POSIX.posixFadviseIfPossible(block.getBlockName(), - outFd, 0, dropPos, NativeIO.POSIX.POSIX_FADV_DONTNEED); + NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible( + block.getBlockName(), outFd, 0, dropPos, + NativeIO.POSIX.POSIX_FADV_DONTNEED); } lastCacheManagementOffset = offsetInBlock; } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java?rev=1543676&r1=1543675&r2=1543676&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java Wed Nov 20 00:48:05 2013 @@ -375,8 +375,9 @@ class BlockSender implements java.io.Clo ((dropCacheBehindAllReads) || (dropCacheBehindLargeReads && isLongRead()))) { try { - NativeIO.POSIX.posixFadviseIfPossible(block.getBlockName(), - blockInFd, lastCacheDropOffset, offset - lastCacheDropOffset, + NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible( + block.getBlockName(), blockInFd, lastCacheDropOffset, + offset - lastCacheDropOffset, NativeIO.POSIX.POSIX_FADV_DONTNEED); } catch (Exception e) { LOG.warn("Unable to drop cache on file close", e); @@ -674,8 +675,9 @@ class BlockSender implements java.io.Clo if (isLongRead() && blockInFd != null) { // Advise that this file descriptor will be accessed sequentially. - NativeIO.POSIX.posixFadviseIfPossible(block.getBlockName(), - blockInFd, 0, 0, NativeIO.POSIX.POSIX_FADV_SEQUENTIAL); + NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible( + block.getBlockName(), blockInFd, 0, 0, + NativeIO.POSIX.POSIX_FADV_SEQUENTIAL); } // Trigger readahead of beginning of file if configured. @@ -761,9 +763,9 @@ class BlockSender implements java.io.Clo long nextCacheDropOffset = lastCacheDropOffset + CACHE_DROP_INTERVAL_BYTES; if (offset >= nextCacheDropOffset) { long dropLength = offset - lastCacheDropOffset; - NativeIO.POSIX.posixFadviseIfPossible(block.getBlockName(), - blockInFd, lastCacheDropOffset, dropLength, - NativeIO.POSIX.POSIX_FADV_DONTNEED); + NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible( + block.getBlockName(), blockInFd, lastCacheDropOffset, + dropLength, NativeIO.POSIX.POSIX_FADV_DONTNEED); lastCacheDropOffset = offset; } } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1543676&r1=1543675&r2=1543676&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Wed Nov 20 00:48:05 2013 @@ -667,7 +667,7 @@ public class DataNode extends Configured " size (%s) is greater than zero and native code is not available.", DFS_DATANODE_MAX_LOCKED_MEMORY_KEY)); } - long ulimit = NativeIO.getMemlockLimit(); + long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit(); if (dnConf.maxLockedMemory > ulimit) { throw new RuntimeException(String.format( "Cannot start datanode because the configured max locked memory" + Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java?rev=1543676&r1=1543675&r2=1543676&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java Wed Nov 20 00:48:05 2013 @@ -163,7 +163,8 @@ public class FsDatasetCache { private final UsedBytesCount usedBytesCount; public static class PageRounder { - private final long osPageSize = NativeIO.getOperatingSystemPageSize(); + private final long osPageSize = + NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize(); /** * Round up a number to the operating system page size. Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java?rev=1543676&r1=1543675&r2=1543676&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java Wed Nov 20 00:48:05 2013 @@ -82,7 +82,7 @@ public class MappableBlock implements Cl throw new IOException("Block InputStream has no FileChannel."); } mmap = blockChannel.map(MapMode.READ_ONLY, 0, length); - NativeIO.POSIX.cacheManipulator.mlock(blockFileName, mmap, length); + NativeIO.POSIX.getCacheManipulator().mlock(blockFileName, mmap, length); verifyChecksum(length, metaIn, blockChannel, blockFileName); mappableBlock = new MappableBlock(mmap, length); } finally { Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java?rev=1543676&r1=1543675&r2=1543676&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java Wed Nov 20 00:48:05 2013 @@ -113,7 +113,8 @@ public class TestDatanodeConfig { @Test(timeout=60000) public void testMemlockLimit() throws Exception { assumeTrue(NativeIO.isAvailable()); - final long memlockLimit = NativeIO.getMemlockLimit(); + final long memlockLimit = + NativeIO.POSIX.getCacheManipulator().getMemlockLimit(); // Can't increase the memlock limit past the maximum. assumeTrue(memlockLimit != Long.MAX_VALUE); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java?rev=1543676&r1=1543675&r2=1543676&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java Wed Nov 20 00:48:05 2013 @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode; +import java.io.FileDescriptor; import java.io.IOException; import java.util.Arrays; import java.util.Map; @@ -36,7 +37,8 @@ import org.apache.hadoop.hdfs.protocol.E import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; -import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheTracker; +import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator; +import org.apache.hadoop.io.nativeio.NativeIOException; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; @@ -54,7 +56,7 @@ public class TestCachingStrategy { EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); // Track calls to posix_fadvise. - NativeIO.POSIX.cacheTracker = tracker; + NativeIO.POSIX.setCacheManipulator(tracker); // Normally, we wait for a few megabytes of data to be read or written // before dropping the cache. This is to avoid an excessive number of @@ -106,12 +108,13 @@ public class TestCachingStrategy { } } - private static class TestRecordingCacheTracker implements CacheTracker { + private static class TestRecordingCacheTracker extends CacheManipulator { private final Map map = new TreeMap(); @Override - synchronized public void fadvise(String name, - long offset, long len, int flags) { + public void posixFadviseIfPossible(String name, + FileDescriptor fd, long offset, long len, int flags) + throws NativeIOException { if ((len < 0) || (len > Integer.MAX_VALUE)) { throw new RuntimeException("invalid length of " + len + " passed to posixFadviseIfPossible"); @@ -126,6 +129,7 @@ public class TestCachingStrategy { map.put(name, stats); } stats.fadvise((int)offset, (int)len, flags); + super.posixFadviseIfPossible(name, fd, offset, len, flags); } synchronized void clear() { Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java?rev=1543676&r1=1543675&r2=1543676&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java Wed Nov 20 00:48:05 2013 @@ -63,6 +63,7 @@ import org.apache.hadoop.hdfs.server.pro import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator; +import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.MetricsAsserts; @@ -99,7 +100,6 @@ public class TestFsDatasetCache { @Before public void setUp() throws Exception { assumeTrue(!Path.WINDOWS); - assumeTrue(NativeIO.getMemlockLimit() >= CACHE_CAPACITY); conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY, true); conf.setLong(DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_RETRY_INTERVAL_MS, @@ -122,18 +122,8 @@ public class TestFsDatasetCache { spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn); - prevCacheManipulator = NativeIO.POSIX.cacheManipulator; - - // Save the current CacheManipulator and replace it at the end of the test - // Stub out mlock calls to avoid failing when not enough memory is lockable - // by the operating system. - NativeIO.POSIX.cacheManipulator = new CacheManipulator() { - @Override - public void mlock(String identifier, - ByteBuffer mmap, long length) throws IOException { - LOG.info("mlocking " + identifier); - } - }; + prevCacheManipulator = NativeIO.POSIX.getCacheManipulator(); + NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator()); } @After @@ -145,7 +135,7 @@ public class TestFsDatasetCache { cluster.shutdown(); } // Restore the original CacheManipulator - NativeIO.POSIX.cacheManipulator = prevCacheManipulator; + NativeIO.POSIX.setCacheManipulator(prevCacheManipulator); } private static void setHeartbeatResponse(DatanodeCommand[] cmds) @@ -222,7 +212,8 @@ public class TestFsDatasetCache { if (tries++ > 10) { LOG.info("verifyExpectedCacheUsage: expected " + expected + ", got " + curDnCacheUsed + "; " + - "memlock limit = " + NativeIO.getMemlockLimit() + + "memlock limit = " + + NativeIO.POSIX.getCacheManipulator().getMemlockLimit() + ". Waiting..."); } return false; @@ -297,40 +288,31 @@ public class TestFsDatasetCache { */ @Test(timeout=600000) public void testCacheAndUncacheBlockWithRetries() throws Exception { - CacheManipulator prevCacheManipulator = NativeIO.POSIX.cacheManipulator; - - try { - NativeIO.POSIX.cacheManipulator = new CacheManipulator() { - private final Set seenIdentifiers = new HashSet(); - - @Override - public void mlock(String identifier, - ByteBuffer mmap, long length) throws IOException { - if (seenIdentifiers.contains(identifier)) { - // mlock succeeds the second time. - LOG.info("mlocking " + identifier); - return; - } - seenIdentifiers.add(identifier); - throw new IOException("injecting IOException during mlock of " + - identifier); + // We don't have to save the previous cacheManipulator + // because it will be reinstalled by the @After function. + NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator() { + private final Set seenIdentifiers = new HashSet(); + + @Override + public void mlock(String identifier, + ByteBuffer mmap, long length) throws IOException { + if (seenIdentifiers.contains(identifier)) { + // mlock succeeds the second time. + LOG.info("mlocking " + identifier); + return; } - }; - testCacheAndUncacheBlock(); - } finally { - NativeIO.POSIX.cacheManipulator = prevCacheManipulator; - } + seenIdentifiers.add(identifier); + throw new IOException("injecting IOException during mlock of " + + identifier); + } + }); + testCacheAndUncacheBlock(); } @Test(timeout=600000) public void testFilesExceedMaxLockedMemory() throws Exception { LOG.info("beginning testFilesExceedMaxLockedMemory"); - // We don't want to deal with page rounding issues, so skip this - // test if page size is weird - long osPageSize = NativeIO.getOperatingSystemPageSize(); - assumeTrue(osPageSize == 4096); - // Create some test files that will exceed total cache capacity final int numFiles = 5; final long fileSize = 15000; @@ -411,7 +393,7 @@ public class TestFsDatasetCache { assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity); assertEquals("Unexpected amount of cache used", current, cacheUsed); - NativeIO.POSIX.cacheManipulator = new NativeIO.POSIX.CacheManipulator() { + NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator() { @Override public void mlock(String identifier, ByteBuffer mmap, long length) throws IOException { @@ -422,7 +404,7 @@ public class TestFsDatasetCache { Assert.fail(); } } - }; + }); // Starting caching each block in succession. The usedBytes amount // should increase, even though caching doesn't complete on any of them. for (int i=0; i