Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 38036F678 for ; Fri, 29 Mar 2013 21:34:23 +0000 (UTC) Received: (qmail 56852 invoked by uid 500); 29 Mar 2013 21:34:22 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 56762 invoked by uid 500); 29 Mar 2013 21:34:22 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 56643 invoked by uid 99); 29 Mar 2013 21:34:22 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 29 Mar 2013 21:34:22 +0000 X-ASF-Spam-Status: No, hits=-1998.0 required=5.0 tests=ALL_TRUSTED,FB_GET_MEDS X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 29 Mar 2013 21:34:06 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 8772B23889DE; Fri, 29 Mar 2013 21:33:43 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1462652 [5/6] - in /hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs: ./ src/ src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/ src/contrib/libwebhdfs/ src/contrib/libwebhdfs/src/ src/main/bin/ src/mai... Date: Fri, 29 Mar 2013 21:33:37 -0000 To: hdfs-commits@hadoop.apache.org From: todd@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20130329213343.8772B23889DE@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java Fri Mar 29 21:33:35 2013 @@ -18,12 +18,15 @@ package org.apache.hadoop.hdfs; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; import java.io.EOFException; import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; +import java.net.URI; import java.nio.ByteBuffer; +import java.security.PrivilegedExceptionAction; import java.util.concurrent.TimeoutException; import org.apache.hadoop.conf.Configuration; @@ -33,6 +36,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; +import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -42,6 +46,7 @@ import org.apache.hadoop.hdfs.server.dat import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.TemporarySocketDirectory; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; @@ -108,9 +113,22 @@ public class TestShortCircuitLocalRead { } } } + + private static String getCurrentUser() throws IOException { + return UserGroupInformation.getCurrentUser().getShortUserName(); + } - static void checkFileContent(FileSystem fs, Path name, byte[] expected, - int readOffset) throws IOException { + /** Check file content, reading as user {@code readingUser} */ + static void checkFileContent(URI uri, Path name, byte[] expected, + int readOffset, String readingUser, Configuration conf, + boolean legacyShortCircuitFails) + throws IOException, InterruptedException { + // Ensure short circuit is enabled + DistributedFileSystem fs = getFileSystem(readingUser, uri, conf); + if (legacyShortCircuitFails) { + assertTrue(fs.getClient().useLegacyBlockReaderLocal()); + } + FSDataInputStream stm = fs.open(name); byte[] actual = new byte[expected.length-readOffset]; stm.readFully(readOffset, actual); @@ -135,6 +153,10 @@ public class TestShortCircuitLocalRead { nread += nbytes; } checkData(actual, readOffset, expected, "Read 3"); + + if (legacyShortCircuitFails) { + assertFalse(fs.getClient().useLegacyBlockReaderLocal()); + } stm.close(); } @@ -146,11 +168,17 @@ public class TestShortCircuitLocalRead { return arr; } - /** - * Verifies that reading a file with the direct read(ByteBuffer) api gives the expected set of bytes. - */ - static void checkFileContentDirect(FileSystem fs, Path name, byte[] expected, - int readOffset) throws IOException { + /** Check the file content, reading as user {@code readingUser} */ + static void checkFileContentDirect(URI uri, Path name, byte[] expected, + int readOffset, String readingUser, Configuration conf, + boolean legacyShortCircuitFails) + throws IOException, InterruptedException { + // Ensure short circuit is enabled + DistributedFileSystem fs = getFileSystem(readingUser, uri, conf); + if (legacyShortCircuitFails) { + assertTrue(fs.getClient().useLegacyBlockReaderLocal()); + } + HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name); ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset); @@ -180,15 +208,33 @@ public class TestShortCircuitLocalRead { nread += nbytes; } checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3"); + if (legacyShortCircuitFails) { + assertFalse(fs.getClient().useLegacyBlockReaderLocal()); + } stm.close(); } + public void doTestShortCircuitReadLegacy(boolean ignoreChecksum, int size, + int readOffset, String shortCircuitUser, String readingUser, + boolean legacyShortCircuitFails) throws IOException, InterruptedException { + doTestShortCircuitReadImpl(ignoreChecksum, size, readOffset, + shortCircuitUser, readingUser, legacyShortCircuitFails); + } + + public void doTestShortCircuitRead(boolean ignoreChecksum, int size, + int readOffset) throws IOException, InterruptedException { + String shortCircuitUser = getCurrentUser(); + doTestShortCircuitReadImpl(ignoreChecksum, size, readOffset, + null, getCurrentUser(), false); + } + /** * Test that file data can be read by reading the block file * directly from the local store. */ - public void doTestShortCircuitRead(boolean ignoreChecksum, int size, - int readOffset) throws IOException { + public void doTestShortCircuitReadImpl(boolean ignoreChecksum, int size, + int readOffset, String shortCircuitUser, String readingUser, + boolean legacyShortCircuitFails) throws IOException, InterruptedException { Configuration conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, @@ -196,6 +242,11 @@ public class TestShortCircuitLocalRead { conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(), "TestShortCircuitLocalRead._PORT.sock").getAbsolutePath()); + if (shortCircuitUser != null) { + conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, + shortCircuitUser); + conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true); + } if (simulatedStorage) { SimulatedFSDataset.setFactory(conf); } @@ -208,53 +259,94 @@ public class TestShortCircuitLocalRead { assertTrue("/ should be a directory", fs.getFileStatus(path) .isDirectory() == true); - byte[] fileData = AppendTestUtil.randomBytes(seed, size); // create a new file in home directory. Do not close it. - Path file1 = new Path("filelocal.dat"); + byte[] fileData = AppendTestUtil.randomBytes(seed, size); + Path file1 = fs.makeQualified(new Path("filelocal.dat")); FSDataOutputStream stm = createFile(fs, file1, 1); - - // write to file stm.write(fileData); stm.close(); - checkFileContent(fs, file1, fileData, readOffset); - checkFileContentDirect(fs, file1, fileData, readOffset); + + URI uri = cluster.getURI(); + checkFileContent(uri, file1, fileData, readOffset, readingUser, conf, + legacyShortCircuitFails); + checkFileContentDirect(uri, file1, fileData, readOffset, readingUser, + conf, legacyShortCircuitFails); } finally { fs.close(); cluster.shutdown(); } } - @Test - public void testFileLocalReadNoChecksum() throws IOException { + @Test(timeout=10000) + public void testFileLocalReadNoChecksum() throws Exception { doTestShortCircuitRead(true, 3*blockSize+100, 0); } - @Test - public void testFileLocalReadChecksum() throws IOException { + @Test(timeout=10000) + public void testFileLocalReadChecksum() throws Exception { doTestShortCircuitRead(false, 3*blockSize+100, 0); } - @Test - public void testSmallFileLocalRead() throws IOException { + @Test(timeout=10000) + public void testSmallFileLocalRead() throws Exception { doTestShortCircuitRead(false, 13, 0); doTestShortCircuitRead(false, 13, 5); doTestShortCircuitRead(true, 13, 0); doTestShortCircuitRead(true, 13, 5); } - @Test - public void testReadFromAnOffset() throws IOException { + @Test(timeout=10000) + public void testLocalReadLegacy() throws Exception { + doTestShortCircuitReadLegacy(true, 13, 0, getCurrentUser(), + getCurrentUser(), false); + } + + /** + * Try a short circuit from a reader that is not allowed to + * to use short circuit. The test ensures reader falls back to non + * shortcircuit reads when shortcircuit is disallowed. + */ + @Test(timeout=10000) + public void testLocalReadFallback() throws Exception { + doTestShortCircuitReadLegacy(true, 13, 0, getCurrentUser(), "notallowed", true); + } + + @Test(timeout=10000) + public void testReadFromAnOffset() throws Exception { doTestShortCircuitRead(false, 3*blockSize+100, 777); doTestShortCircuitRead(true, 3*blockSize+100, 777); } - @Test - public void testLongFile() throws IOException { + @Test(timeout=10000) + public void testLongFile() throws Exception { doTestShortCircuitRead(false, 10*blockSize+100, 777); doTestShortCircuitRead(true, 10*blockSize+100, 777); } - @Test + private ClientDatanodeProtocol getProxy(UserGroupInformation ugi, + final DatanodeID dnInfo, final Configuration conf) throws IOException, + InterruptedException { + return ugi.doAs(new PrivilegedExceptionAction() { + @Override + public ClientDatanodeProtocol run() throws Exception { + return DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, + false); + } + }); + } + + private static DistributedFileSystem getFileSystem(String user, final URI uri, + final Configuration conf) throws InterruptedException, IOException { + UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user); + return ugi.doAs(new PrivilegedExceptionAction() { + @Override + public DistributedFileSystem run() throws Exception { + return (DistributedFileSystem)FileSystem.get(uri, conf); + } + }); + } + + @Test(timeout=10000) public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException, InterruptedException { final Configuration conf = new Configuration(); @@ -287,7 +379,7 @@ public class TestShortCircuitLocalRead { } } - @Test + @Test(timeout=10000) public void testSkipWithVerifyChecksum() throws IOException { int size = blockSize; Configuration conf = new Configuration(); @@ -417,7 +509,7 @@ public class TestShortCircuitLocalRead { } /** - * Test to run benchmarks between shortcircuit read vs regular read with + * Test to run benchmarks between short circuit read vs regular read with * specified number of threads simultaneously reading. *
* Run this using the following command: @@ -435,7 +527,7 @@ public class TestShortCircuitLocalRead { int threadCount = Integer.valueOf(args[2]); // Setup create a file - Configuration conf = new Configuration(); + final Configuration conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, shortcircuit); conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, "/tmp/TestShortCircuitLocalRead._PORT"); @@ -463,9 +555,13 @@ public class TestShortCircuitLocalRead { public void run() { for (int i = 0; i < iteration; i++) { try { - checkFileContent(fs, file1, dataToWrite, 0); + String user = getCurrentUser(); + checkFileContent(fs.getUri(), file1, dataToWrite, 0, user, conf, + true); } catch (IOException e) { e.printStackTrace(); + } catch (InterruptedException e) { + e.printStackTrace(); } } } Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java Fri Mar 29 21:33:35 2013 @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.qjournal; import static org.junit.Assert.*; +import static org.junit.Assume.*; import java.io.File; import java.io.IOException; @@ -43,7 +44,7 @@ import org.junit.Test; public class TestNNWithQJM { Configuration conf = new HdfsConfiguration(); - private MiniJournalCluster mjc; + private MiniJournalCluster mjc = null; private Path TEST_PATH = new Path("/test-dir"); private Path TEST_PATH_2 = new Path("/test-dir"); @@ -61,10 +62,11 @@ public class TestNNWithQJM { public void stopJNs() throws Exception { if (mjc != null) { mjc.shutdown(); + mjc = null; } } - @Test + @Test (timeout = 30000) public void testLogAndRestart() throws IOException { conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image"); @@ -93,9 +95,12 @@ public class TestNNWithQJM { cluster.shutdown(); } } - - @Test + + @Test (timeout = 30000) public void testNewNamenodeTakesOverWriter() throws Exception { + // Skip the test on Windows. See HDFS-4584. + assumeTrue(!Path.WINDOWS); + File nn1Dir = new File( MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image-nn1"); File nn2Dir = new File( @@ -154,7 +159,7 @@ public class TestNNWithQJM { } } - @Test + @Test (timeout = 30000) public void testMismatchedNNIsRejected() throws Exception { conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image"); @@ -188,8 +193,8 @@ public class TestNNWithQJM { "Unable to start log segment 1: too few journals", ioe); } } - - @Test + + @Test (timeout = 30000) public void testWebPageHasQjmInfo() throws Exception { conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image"); Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java Fri Mar 29 21:33:35 2013 @@ -36,10 +36,7 @@ import org.apache.hadoop.hdfs.server.com import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Assume; -import org.junit.Before; -import org.junit.Test; +import org.junit.*; import org.mockito.Mockito; public class TestJournal { @@ -77,7 +74,7 @@ public class TestJournal { IOUtils.closeStream(journal); } - @Test + @Test (timeout = 10000) public void testEpochHandling() throws Exception { assertEquals(0, journal.getLastPromisedEpoch()); NewEpochResponseProto newEpoch = @@ -110,7 +107,7 @@ public class TestJournal { } } - @Test + @Test (timeout = 10000) public void testMaintainCommittedTxId() throws Exception { journal.newEpoch(FAKE_NSINFO, 1); journal.startLogSegment(makeRI(1), 1); @@ -125,7 +122,7 @@ public class TestJournal { assertEquals(3, journal.getCommittedTxnIdForTests()); } - @Test + @Test (timeout = 10000) public void testRestartJournal() throws Exception { journal.newEpoch(FAKE_NSINFO, 1); journal.startLogSegment(makeRI(1), 1); @@ -149,7 +146,7 @@ public class TestJournal { assertEquals(1, newEpoch.getLastSegmentTxId()); } - @Test + @Test (timeout = 10000) public void testFormatResetsCachedValues() throws Exception { journal.newEpoch(FAKE_NSINFO, 12345L); journal.startLogSegment(new RequestInfo(JID, 12345L, 1L, 0L), 1L); @@ -158,6 +155,8 @@ public class TestJournal { assertEquals(12345L, journal.getLastWriterEpoch()); assertTrue(journal.isFormatted()); + // Close the journal in preparation for reformatting it. + journal.close(); journal.format(FAKE_NSINFO_2); assertEquals(0, journal.getLastPromisedEpoch()); @@ -170,7 +169,7 @@ public class TestJournal { * before any transactions are written, that the next newEpoch() call * returns the prior segment txid as its most recent segment. */ - @Test + @Test (timeout = 10000) public void testNewEpochAtBeginningOfSegment() throws Exception { journal.newEpoch(FAKE_NSINFO, 1); journal.startLogSegment(makeRI(1), 1); @@ -182,7 +181,7 @@ public class TestJournal { assertEquals(1, resp.getLastSegmentTxId()); } - @Test + @Test (timeout = 10000) public void testJournalLocking() throws Exception { Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported()); StorageDirectory sd = journal.getStorage().getStorageDir(0); @@ -206,13 +205,14 @@ public class TestJournal { // Hence, should be able to create a new Journal in the same dir. Journal journal2 = new Journal(TEST_LOG_DIR, JID, mockErrorReporter); journal2.newEpoch(FAKE_NSINFO, 2); + journal2.close(); } /** * Test finalizing a segment after some batch of edits were missed. * This should fail, since we validate the log before finalization. */ - @Test + @Test (timeout = 10000) public void testFinalizeWhenEditsAreMissed() throws Exception { journal.newEpoch(FAKE_NSINFO, 1); journal.startLogSegment(makeRI(1), 1); @@ -246,7 +246,7 @@ public class TestJournal { * Ensure that finalizing a segment which doesn't exist throws the * appropriate exception. */ - @Test + @Test (timeout = 10000) public void testFinalizeMissingSegment() throws Exception { journal.newEpoch(FAKE_NSINFO, 1); try { @@ -267,7 +267,7 @@ public class TestJournal { * Eventually, the connection comes back, and the NN tries to start a new * segment at a higher txid. This should abort the old one and succeed. */ - @Test + @Test (timeout = 10000) public void testAbortOldSegmentIfFinalizeIsMissed() throws Exception { journal.newEpoch(FAKE_NSINFO, 1); @@ -296,7 +296,7 @@ public class TestJournal { * Test behavior of startLogSegment() when a segment with the * same transaction ID already exists. */ - @Test + @Test (timeout = 10000) public void testStartLogSegmentWhenAlreadyExists() throws Exception { journal.newEpoch(FAKE_NSINFO, 1); @@ -345,7 +345,7 @@ public class TestJournal { return new RequestInfo(JID, 1, serial, 0); } - @Test + @Test (timeout = 10000) public void testNamespaceVerification() throws Exception { journal.newEpoch(FAKE_NSINFO, 1); Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java Fri Mar 29 21:33:35 2013 @@ -46,6 +46,7 @@ import org.apache.hadoop.metrics2.Metric import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.MetricsAsserts; +import org.apache.hadoop.util.Shell; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -65,6 +66,8 @@ public class TestJournalNode { private Configuration conf = new Configuration(); private IPCLoggerChannel ch; private String journalId; + private File TEST_BUILD_DATA = + new File(System.getProperty("test.build.data", "build/test/data")); static { // Avoid an error when we double-initialize JvmMetrics @@ -96,7 +99,7 @@ public class TestJournalNode { jn.stop(0); } - @Test + @Test(timeout=100000) public void testJournal() throws Exception { MetricsRecordBuilder metrics = MetricsAsserts.getMetrics( journal.getMetricsForTests().getName()); @@ -129,7 +132,7 @@ public class TestJournalNode { } - @Test + @Test(timeout=100000) public void testReturnsSegmentInfoAtEpochTransition() throws Exception { ch.newEpoch(1).get(); ch.setEpoch(1); @@ -157,7 +160,7 @@ public class TestJournalNode { assertEquals(1, response.getLastSegmentTxId()); } - @Test + @Test(timeout=100000) public void testHttpServer() throws Exception { InetSocketAddress addr = jn.getBoundHttpAddress(); assertTrue(addr.getPort() > 0); @@ -210,7 +213,7 @@ public class TestJournalNode { * Test that the JournalNode performs correctly as a Paxos * Acceptor process. */ - @Test + @Test(timeout=100000) public void testAcceptRecoveryBehavior() throws Exception { // We need to run newEpoch() first, or else we have no way to distinguish // different proposals for the same decision. @@ -270,20 +273,27 @@ public class TestJournalNode { } } - @Test + @Test(timeout=100000) public void testFailToStartWithBadConfig() throws Exception { Configuration conf = new Configuration(); conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, "non-absolute-path"); assertJNFailsToStart(conf, "should be an absolute path"); // Existing file which is not a directory - conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, "/dev/null"); - assertJNFailsToStart(conf, "is not a directory"); + File existingFile = new File(TEST_BUILD_DATA, "testjournalnodefile"); + assertTrue(existingFile.createNewFile()); + try { + conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, + existingFile.getAbsolutePath()); + assertJNFailsToStart(conf, "Not a directory"); + } finally { + existingFile.delete(); + } // Directory which cannot be created - conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, "/proc/does-not-exist"); - assertJNFailsToStart(conf, "Could not create"); - + conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, + Shell.WINDOWS ? "\\\\cannotBeCreated" : "/proc/does-not-exist"); + assertJNFailsToStart(conf, "Can not create directory"); } private static void assertJNFailsToStart(Configuration conf, Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java Fri Mar 29 21:33:35 2013 @@ -104,7 +104,7 @@ public class TestNodeCount { while (iter.hasNext()) { DatanodeDescriptor dn = iter.next(); Collection blocks = bm.excessReplicateMap.get(dn.getStorageID()); - if (blocks == null || !blocks.contains(block) ) { + if (blocks == null || !blocks.contains(block.getLocalBlock()) ) { nonExcessDN = dn; break; } Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Fri Mar 29 21:33:35 2013 @@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.DFSConfigK import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -45,7 +46,6 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; import org.apache.hadoop.util.Time; -import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.log4j.spi.LoggingEvent; @@ -419,7 +419,7 @@ public class TestReplicationPolicy { (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0); } - final TestAppender appender = new TestAppender(); + final LogVerificationAppender appender = new LogVerificationAppender(); final Logger logger = Logger.getRootLogger(); logger.addAppender(appender); @@ -446,28 +446,6 @@ public class TestReplicationPolicy { HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0); } } - - class TestAppender extends AppenderSkeleton { - private final List log = new ArrayList(); - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - protected void append(final LoggingEvent loggingEvent) { - log.add(loggingEvent); - } - - @Override - public void close() { - } - - public List getLog() { - return new ArrayList(log); - } - } private boolean containsWithinRange(DatanodeDescriptor target, DatanodeDescriptor[] nodes, int startIndex, int endIndex) { Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java Fri Mar 29 21:33:35 2013 @@ -27,33 +27,26 @@ import java.util.List; import org.junit.Test; import static org.junit.Assert.*; import static org.mockito.Mockito.*; -import static org.apache.hadoop.test.MockitoMaker.*; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.server.datanode.DataNode.DataNodeDiskChecker; public class TestDataDirs { - @Test public void testGetDataDirsFromURIs() throws Throwable { - File localDir = make(stub(File.class).returning(true).from.exists()); - when(localDir.mkdir()).thenReturn(true); - FsPermission normalPerm = new FsPermission("700"); - FsPermission badPerm = new FsPermission("000"); - FileStatus stat = make(stub(FileStatus.class) - .returning(normalPerm, normalPerm, badPerm).from.getPermission()); - when(stat.isDirectory()).thenReturn(true); - LocalFileSystem fs = make(stub(LocalFileSystem.class) - .returning(stat).from.getFileStatus(any(Path.class))); - when(fs.pathToFile(any(Path.class))).thenReturn(localDir); + @Test (timeout = 10000) + public void testGetDataDirsFromURIs() throws Throwable { + + DataNodeDiskChecker diskChecker = mock(DataNodeDiskChecker.class); + doThrow(new IOException()).doThrow(new IOException()).doNothing() + .when(diskChecker).checkDir(any(LocalFileSystem.class), any(Path.class)); + LocalFileSystem fs = mock(LocalFileSystem.class); Collection uris = Arrays.asList(new URI("file:/p1/"), new URI("file:/p2/"), new URI("file:/p3/")); - List dirs = DataNode.getDataDirsFromURIs(uris, fs, normalPerm); - - verify(fs, times(2)).setPermission(any(Path.class), eq(normalPerm)); - verify(fs, times(6)).getFileStatus(any(Path.class)); - assertEquals("number of valid data dirs", dirs.size(), 1); + List dirs = DataNode.getDataDirsFromURIs(uris, fs, diskChecker); + assertEquals("number of valid data dirs", 1, dirs.size()); + String validDir = dirs.iterator().next().getPath(); + assertEquals("p3 should be valid", new File("/p3").getPath(), validDir); } } Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Fri Mar 29 21:33:35 2013 @@ -272,15 +272,15 @@ public abstract class FSImageTestUtil { for (File dir : dirs) { FSImageTransactionalStorageInspector inspector = inspectStorageDirectory(dir, NameNodeDirType.IMAGE); - FSImageFile latestImage = inspector.getLatestImage(); - assertNotNull("No image in " + dir, latestImage); - long thisTxId = latestImage.getCheckpointTxId(); + List latestImages = inspector.getLatestImages(); + assert(!latestImages.isEmpty()); + long thisTxId = latestImages.get(0).getCheckpointTxId(); if (imageTxId != -1 && thisTxId != imageTxId) { fail("Storage directory " + dir + " does not have the same " + "last image index " + imageTxId + " as another"); } imageTxId = thisTxId; - imageFiles.add(inspector.getLatestImage().getFile()); + imageFiles.add(inspector.getLatestImages().get(0).getFile()); } assertFileContentsSame(imageFiles.toArray(new File[0])); @@ -424,7 +424,7 @@ public abstract class FSImageTestUtil { new FSImageTransactionalStorageInspector(); inspector.inspectDirectory(sd); - return inspector.getLatestImage().getFile(); + return inspector.getLatestImages().get(0).getFile(); } /** @@ -439,8 +439,8 @@ public abstract class FSImageTestUtil { new FSImageTransactionalStorageInspector(); inspector.inspectDirectory(sd); - FSImageFile latestImage = inspector.getLatestImage(); - return (latestImage == null) ? null : latestImage.getFile(); + List latestImages = inspector.getLatestImages(); + return (latestImages.isEmpty()) ? null : latestImages.get(0).getFile(); } /** Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java Fri Mar 29 21:33:35 2013 @@ -143,7 +143,7 @@ public class OfflineEditsViewerHelper { (DistributedFileSystem)cluster.getFileSystem(); FileContext fc = FileContext.getFileContext(cluster.getURI(0), config); // OP_ADD 0, OP_SET_GENSTAMP 10 - Path pathFileCreate = new Path("/file_create"); + Path pathFileCreate = new Path("/file_create_u\1F431"); FSDataOutputStream s = dfs.create(pathFileCreate); // OP_CLOSE 9 s.close(); Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java Fri Mar 29 21:33:35 2013 @@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.web.WebHdf import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.log4j.Level; +import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import org.apache.log4j.PatternLayout; import org.apache.log4j.RollingFileAppender; @@ -233,9 +234,15 @@ public class TestAuditLogs { /** Sets up log4j logger for auditlogs */ private void setupAuditLogs() throws IOException { + // Shutdown the LogManager to release all logger open file handles. + // Unfortunately, Apache commons logging library does not provide + // means to release underlying loggers. For additional info look up + // commons library FAQ. + LogManager.shutdown(); + File file = new File(auditLogFile); if (file.exists()) { - file.delete(); + assertTrue(file.delete()); } Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); logger.setLevel(Level.INFO); Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Fri Mar 29 21:33:35 2013 @@ -74,6 +74,8 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.ExitUtil.ExitException; import org.apache.hadoop.util.StringUtils; import org.apache.log4j.Level; import org.junit.After; @@ -227,6 +229,111 @@ public class TestCheckpoint { } /* + * Simulate exception during edit replay. + */ + @Test(timeout=30000) + public void testReloadOnEditReplayFailure () throws IOException { + Configuration conf = new HdfsConfiguration(); + FSDataOutputStream fos = null; + SecondaryNameNode secondary = null; + MiniDFSCluster cluster = null; + FileSystem fs = null; + + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + secondary = startSecondaryNameNode(conf); + fos = fs.create(new Path("tmpfile0")); + fos.write(new byte[] { 0, 1, 2, 3 }); + secondary.doCheckpoint(); + fos.write(new byte[] { 0, 1, 2, 3 }); + fos.hsync(); + + // Cause merge to fail in next checkpoint. + Mockito.doThrow(new IOException( + "Injecting failure during merge")) + .when(faultInjector).duringMerge(); + + try { + secondary.doCheckpoint(); + fail("Fault injection failed."); + } catch (IOException ioe) { + // This is expected. + } + Mockito.reset(faultInjector); + + // The error must be recorded, so next checkpoint will reload image. + fos.write(new byte[] { 0, 1, 2, 3 }); + fos.hsync(); + + assertTrue("Another checkpoint should have reloaded image", + secondary.doCheckpoint()); + } finally { + if (secondary != null) { + secondary.shutdown(); + } + if (fs != null) { + fs.close(); + } + if (cluster != null) { + cluster.shutdown(); + } + Mockito.reset(faultInjector); + } + } + + /* + * Simulate 2NN exit due to too many merge failures. + */ + @Test(timeout=10000) + public void testTooManyEditReplayFailures() throws IOException { + Configuration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY, "1"); + conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, "1"); + + FSDataOutputStream fos = null; + SecondaryNameNode secondary = null; + MiniDFSCluster cluster = null; + FileSystem fs = null; + + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .checkExitOnShutdown(false).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + fos = fs.create(new Path("tmpfile0")); + fos.write(new byte[] { 0, 1, 2, 3 }); + + // Cause merge to fail in next checkpoint. + Mockito.doThrow(new IOException( + "Injecting failure during merge")) + .when(faultInjector).duringMerge(); + + secondary = startSecondaryNameNode(conf); + secondary.doWork(); + // Fail if we get here. + fail("2NN did not exit."); + } catch (ExitException ee) { + // ignore + ExitUtil.resetFirstExitException(); + assertEquals("Max retries", 1, secondary.getMergeErrorCount() - 1); + } finally { + if (secondary != null) { + secondary.shutdown(); + } + if (fs != null) { + fs.close(); + } + if (cluster != null) { + cluster.shutdown(); + } + Mockito.reset(faultInjector); + } + } + + /* * Simulate namenode crashing after rolling edit log. */ @Test @@ -1305,6 +1412,60 @@ public class TestCheckpoint { } /** + * Test NN restart if a failure happens in between creating the fsimage + * MD5 file and renaming the fsimage. + */ + @Test(timeout=30000) + public void testFailureBeforeRename () throws IOException { + Configuration conf = new HdfsConfiguration(); + FSDataOutputStream fos = null; + SecondaryNameNode secondary = null; + MiniDFSCluster cluster = null; + FileSystem fs = null; + NameNode namenode = null; + + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .build(); + cluster.waitActive(); + namenode = cluster.getNameNode(); + fs = cluster.getFileSystem(); + secondary = startSecondaryNameNode(conf); + fos = fs.create(new Path("tmpfile0")); + fos.write(new byte[] { 0, 1, 2, 3 }); + secondary.doCheckpoint(); + fos.write(new byte[] { 0, 1, 2, 3 }); + fos.hsync(); + + // Cause merge to fail in next checkpoint. + Mockito.doThrow(new IOException( + "Injecting failure after MD5Rename")) + .when(faultInjector).afterMD5Rename(); + + try { + secondary.doCheckpoint(); + fail("Fault injection failed."); + } catch (IOException ioe) { + // This is expected. + } + Mockito.reset(faultInjector); + // Namenode should still restart successfully + cluster.restartNameNode(); + } finally { + if (secondary != null) { + secondary.shutdown(); + } + if (fs != null) { + fs.close(); + } + if (cluster != null) { + cluster.shutdown(); + } + Mockito.reset(faultInjector); + } + } + + /** * Test case where two secondary namenodes are checkpointing the same * NameNode. This differs from {@link #testMultipleSecondaryNamenodes()} * since that test runs against two distinct NNs. Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Fri Mar 29 21:33:35 2013 @@ -861,6 +861,11 @@ public class TestEditLog { public boolean isInProgress() { return true; } + + @Override + public void setMaxOpSize(int maxOpSize) { + reader.setMaxOpSize(maxOpSize); + } } @Test Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java Fri Mar 29 21:33:35 2013 @@ -57,7 +57,7 @@ public class TestFSImageStorageInspector inspector.inspectDirectory(mockDir); assertEquals(2, inspector.foundImages.size()); - FSImageFile latestImage = inspector.getLatestImage(); + FSImageFile latestImage = inspector.getLatestImages().get(0); assertEquals(456, latestImage.txId); assertSame(mockDir, latestImage.sd); assertTrue(inspector.isUpgradeFinalized()); Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java Fri Mar 29 21:33:35 2013 @@ -120,12 +120,13 @@ public class TestHostsFiles { InetSocketAddress nnHttpAddress = cluster.getNameNode().getHttpAddress(); LOG.info("nnaddr = '" + nnHttpAddress + "'"); - URL nnjsp = new URL("http://" + nnHttpAddress.getHostName() + ":" + nnHttpAddress.getPort() + "/dfshealth.jsp"); + String nnHostName = nnHttpAddress.getHostName(); + URL nnjsp = new URL("http://" + nnHostName + ":" + nnHttpAddress.getPort() + "/dfshealth.jsp"); LOG.info("fetching " + nnjsp); String dfshealthPage = StringEscapeUtils.unescapeHtml(DFSTestUtil.urlGet(nnjsp)); LOG.info("got " + dfshealthPage); - assertTrue("dfshealth should contain localhost, got:" + dfshealthPage, - dfshealthPage.contains("localhost")); + assertTrue("dfshealth should contain " + nnHostName + ", got:" + dfshealthPage, + dfshealthPage.contains(nnHostName)); } finally { cluster.shutdown(); Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Fri Mar 29 21:33:35 2013 @@ -184,34 +184,41 @@ public class TestINodeFile { long fileLen = 1024; replication = 3; Configuration conf = new Configuration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes( - replication).build(); - cluster.waitActive(); - FSNamesystem fsn = cluster.getNamesystem(); - FSDirectory fsdir = fsn.getFSDirectory(); - DistributedFileSystem dfs = cluster.getFileSystem(); - - // Create a file for test - final Path dir = new Path("/dir"); - final Path file = new Path(dir, "file"); - DFSTestUtil.createFile(dfs, file, fileLen, replication, 0L); - - // Check the full path name of the INode associating with the file - INode fnode = fsdir.getINode(file.toString()); - assertEquals(file.toString(), fnode.getFullPathName()); - - // Call FSDirectory#unprotectedSetQuota which calls - // INodeDirectory#replaceChild - dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10); - final Path newDir = new Path("/newdir"); - final Path newFile = new Path(newDir, "file"); - // Also rename dir - dfs.rename(dir, newDir, Options.Rename.OVERWRITE); - // /dir/file now should be renamed to /newdir/file - fnode = fsdir.getINode(newFile.toString()); - // getFullPathName can return correct result only if the parent field of - // child node is set correctly - assertEquals(newFile.toString(), fnode.getFullPathName()); + MiniDFSCluster cluster = null; + try { + cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(replication).build(); + cluster.waitActive(); + FSNamesystem fsn = cluster.getNamesystem(); + FSDirectory fsdir = fsn.getFSDirectory(); + DistributedFileSystem dfs = cluster.getFileSystem(); + + // Create a file for test + final Path dir = new Path("/dir"); + final Path file = new Path(dir, "file"); + DFSTestUtil.createFile(dfs, file, fileLen, replication, 0L); + + // Check the full path name of the INode associating with the file + INode fnode = fsdir.getINode(file.toString()); + assertEquals(file.toString(), fnode.getFullPathName()); + + // Call FSDirectory#unprotectedSetQuota which calls + // INodeDirectory#replaceChild + dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10); + final Path newDir = new Path("/newdir"); + final Path newFile = new Path(newDir, "file"); + // Also rename dir + dfs.rename(dir, newDir, Options.Rename.OVERWRITE); + // /dir/file now should be renamed to /newdir/file + fnode = fsdir.getINode(newFile.toString()); + // getFullPathName can return correct result only if the parent field of + // child node is set correctly + assertEquals(newFile.toString(), fnode.getFullPathName()); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } } @Test @@ -385,41 +392,47 @@ public class TestINodeFile { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) - .build(); - cluster.waitActive(); - - FSNamesystem fsn = cluster.getNamesystem(); - long lastId = fsn.getLastInodeId(); - - assertTrue(lastId == 1001); - - // Create one directory and the last inode id should increase to 1002 - FileSystem fs = cluster.getFileSystem(); - Path path = new Path("/test1"); - assertTrue(fs.mkdirs(path)); - assertTrue(fsn.getLastInodeId() == 1002); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); - // Use namenode rpc to create a file - NamenodeProtocols nnrpc = cluster.getNameNodeRpc(); - HdfsFileStatus fileStatus = nnrpc.create("/test1/file", new FsPermission( - (short) 0755), "client", - new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)), true, - (short) 1, 128 * 1024 * 1024L); - assertTrue(fsn.getLastInodeId() == 1003); - assertTrue(fileStatus.getFileId() == 1003); + FSNamesystem fsn = cluster.getNamesystem(); + long lastId = fsn.getLastInodeId(); - // Rename doesn't increase inode id - Path renamedPath = new Path("/test2"); - fs.rename(path, renamedPath); - assertTrue(fsn.getLastInodeId() == 1003); + assertTrue(lastId == 1001); - cluster.restartNameNode(); - cluster.waitActive(); - // Make sure empty editlog can be handled - cluster.restartNameNode(); - cluster.waitActive(); - assertTrue(fsn.getLastInodeId() == 1003); + // Create one directory and the last inode id should increase to 1002 + FileSystem fs = cluster.getFileSystem(); + Path path = new Path("/test1"); + assertTrue(fs.mkdirs(path)); + assertTrue(fsn.getLastInodeId() == 1002); + + // Use namenode rpc to create a file + NamenodeProtocols nnrpc = cluster.getNameNodeRpc(); + HdfsFileStatus fileStatus = nnrpc.create("/test1/file", new FsPermission( + (short) 0755), "client", + new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)), true, + (short) 1, 128 * 1024 * 1024L); + assertTrue(fsn.getLastInodeId() == 1003); + assertTrue(fileStatus.getFileId() == 1003); + + // Rename doesn't increase inode id + Path renamedPath = new Path("/test2"); + fs.rename(path, renamedPath); + assertTrue(fsn.getLastInodeId() == 1003); + + cluster.restartNameNode(); + cluster.waitActive(); + // Make sure empty editlog can be handled + cluster.restartNameNode(); + cluster.waitActive(); + assertTrue(fsn.getLastInodeId() == 1003); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } } @Test Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java Fri Mar 29 21:33:35 2013 @@ -83,6 +83,7 @@ public class TestNameNodeRecovery { elfos.close(); elfos = null; elfis = new EditLogFileInputStream(TEST_LOG_NAME); + elfis.setMaxOpSize(elts.getMaxOpSize()); // reading through normally will get you an exception Set validTxIds = elts.getValidTxIds(); @@ -143,7 +144,7 @@ public class TestNameNodeRecovery { /** * A test scenario for the edit log */ - private interface EditLogTestSetup { + private static abstract class EditLogTestSetup { /** * Set up the edit log. */ @@ -162,6 +163,13 @@ public class TestNameNodeRecovery { * edit log. **/ abstract public Set getValidTxIds(); + + /** + * Return the maximum opcode size we will use for input. + */ + public int getMaxOpSize() { + return DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT; + } } static void padEditLog(EditLogOutputStream elos, int paddingLength) @@ -182,10 +190,10 @@ public class TestNameNodeRecovery { } static void addDeleteOpcode(EditLogOutputStream elos, - OpInstanceCache cache) throws IOException { + OpInstanceCache cache, long txId, String path) throws IOException { DeleteOp op = DeleteOp.getInstance(cache); - op.setTransactionId(0x0); - op.setPath("/foo"); + op.setTransactionId(txId); + op.setPath(path); op.setTimestamp(0); elos.write(op); } @@ -198,7 +206,7 @@ public class TestNameNodeRecovery { * able to handle any amount of padding (including no padding) without * throwing an exception. */ - private static class EltsTestEmptyLog implements EditLogTestSetup { + private static class EltsTestEmptyLog extends EditLogTestSetup { private int paddingLength; public EltsTestEmptyLog(int paddingLength) { @@ -243,13 +251,49 @@ public class TestNameNodeRecovery { } /** + * Test using a non-default maximum opcode length. + */ + private static class EltsTestNonDefaultMaxOpSize extends EditLogTestSetup { + public EltsTestNonDefaultMaxOpSize() { + } + + @Override + public void addTransactionsToLog(EditLogOutputStream elos, + OpInstanceCache cache) throws IOException { + addDeleteOpcode(elos, cache, 0, "/foo"); + addDeleteOpcode(elos, cache, 1, + "/supercalifragalisticexpialadocius.supercalifragalisticexpialadocius"); + } + + @Override + public long getLastValidTxId() { + return 0; + } + + @Override + public Set getValidTxIds() { + return Sets.newHashSet(0L); + } + + public int getMaxOpSize() { + return 30; + } + } + + /** Test an empty edit log with extra-long padding */ + @Test(timeout=180000) + public void testNonDefaultMaxOpSize() throws IOException { + runEditLogTest(new EltsTestNonDefaultMaxOpSize()); + } + + /** * Test the scenario where an edit log contains some padding (0xff) bytes * followed by valid opcode data. * * These edit logs are corrupt, but all the opcodes should be recoverable * with recovery mode. */ - private static class EltsTestOpcodesAfterPadding implements EditLogTestSetup { + private static class EltsTestOpcodesAfterPadding extends EditLogTestSetup { private int paddingLength; public EltsTestOpcodesAfterPadding(int paddingLength) { @@ -260,7 +304,7 @@ public class TestNameNodeRecovery { public void addTransactionsToLog(EditLogOutputStream elos, OpInstanceCache cache) throws IOException { padEditLog(elos, paddingLength); - addDeleteOpcode(elos, cache); + addDeleteOpcode(elos, cache, 0, "/foo"); } @Override @@ -286,7 +330,7 @@ public class TestNameNodeRecovery { 3 * EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH)); } - private static class EltsTestGarbageInEditLog implements EditLogTestSetup { + private static class EltsTestGarbageInEditLog extends EditLogTestSetup { final private long BAD_TXID = 4; final private long MAX_TXID = 10; Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java Fri Mar 29 21:33:35 2013 @@ -158,7 +158,7 @@ public class TestProcessCorruptBlocks { * (corrupt replica should be removed since number of good * replicas (1) is equal to replication factor (1)) */ - @Test + @Test(timeout=20000) public void testWithReplicationFactorAsOne() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L); @@ -183,9 +183,14 @@ public class TestProcessCorruptBlocks { namesystem.setReplication(fileName.toString(), (short) 1); // wait for 3 seconds so that all block reports are processed. - try { - Thread.sleep(3000); - } catch (InterruptedException ignored) { + for (int i = 0; i < 10; i++) { + try { + Thread.sleep(1000); + } catch (InterruptedException ignored) { + } + if (countReplicas(namesystem, block).corruptReplicas() == 0) { + break; + } } assertEquals(1, countReplicas(namesystem, block).liveReplicas()); Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java Fri Mar 29 21:33:35 2013 @@ -41,6 +41,7 @@ import org.apache.commons.logging.impl.L import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -219,7 +220,7 @@ public class TestSaveNamespace { * Verify that a saveNamespace command brings faulty directories * in fs.name.dir and fs.edit.dir back online. */ - @Test + @Test (timeout=30000) public void testReinsertnamedirsInSavenamespace() throws Exception { // create a configuration with the key to restore error // directories in fs.name.dir @@ -237,10 +238,13 @@ public class TestSaveNamespace { FSImage spyImage = spy(originalImage); fsn.dir.fsImage = spyImage; + FileSystem fs = FileSystem.getLocal(conf); File rootDir = storage.getStorageDir(0).getRoot(); - rootDir.setExecutable(false); - rootDir.setWritable(false); - rootDir.setReadable(false); + Path rootPath = new Path(rootDir.getPath(), "current"); + final FsPermission permissionNone = new FsPermission((short) 0); + final FsPermission permissionAll = new FsPermission( + FsAction.ALL, FsAction.READ_EXECUTE, FsAction.READ_EXECUTE); + fs.setPermission(rootPath, permissionNone); try { doAnEdit(fsn, 1); @@ -257,9 +261,7 @@ public class TestSaveNamespace { " bad directories.", storage.getRemovedStorageDirs().size() == 1); - rootDir.setExecutable(true); - rootDir.setWritable(true); - rootDir.setReadable(true); + fs.setPermission(rootPath, permissionAll); // The next call to savenamespace should try inserting the // erroneous directory back to fs.name.dir. This command should @@ -290,9 +292,7 @@ public class TestSaveNamespace { LOG.info("Reloaded image is good."); } finally { if (rootDir.exists()) { - rootDir.setExecutable(true); - rootDir.setWritable(true); - rootDir.setReadable(true); + fs.setPermission(rootPath, permissionAll); } if (fsn != null) { @@ -305,27 +305,27 @@ public class TestSaveNamespace { } } - @Test + @Test (timeout=30000) public void testRTEWhileSavingSecondImage() throws Exception { saveNamespaceWithInjectedFault(Fault.SAVE_SECOND_FSIMAGE_RTE); } - @Test + @Test (timeout=30000) public void testIOEWhileSavingSecondImage() throws Exception { saveNamespaceWithInjectedFault(Fault.SAVE_SECOND_FSIMAGE_IOE); } - @Test + @Test (timeout=30000) public void testCrashInAllImageDirs() throws Exception { saveNamespaceWithInjectedFault(Fault.SAVE_ALL_FSIMAGES); } - @Test + @Test (timeout=30000) public void testCrashWhenWritingVersionFiles() throws Exception { saveNamespaceWithInjectedFault(Fault.WRITE_STORAGE_ALL); } - @Test + @Test (timeout=30000) public void testCrashWhenWritingVersionFileInOneDir() throws Exception { saveNamespaceWithInjectedFault(Fault.WRITE_STORAGE_ONE); } @@ -337,7 +337,7 @@ public class TestSaveNamespace { * failed checkpoint since it only affected ".ckpt" files, not * valid image files */ - @Test + @Test (timeout=30000) public void testFailedSaveNamespace() throws Exception { doTestFailedSaveNamespace(false); } @@ -347,7 +347,7 @@ public class TestSaveNamespace { * the operator restores the directories and calls it again. * This should leave the NN in a clean state for next start. */ - @Test + @Test (timeout=30000) public void testFailedSaveNamespaceWithRecovery() throws Exception { doTestFailedSaveNamespace(true); } @@ -421,7 +421,7 @@ public class TestSaveNamespace { } } - @Test + @Test (timeout=30000) public void testSaveWhileEditsRolled() throws Exception { Configuration conf = getConf(); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); @@ -457,7 +457,7 @@ public class TestSaveNamespace { } } - @Test + @Test (timeout=30000) public void testTxIdPersistence() throws Exception { Configuration conf = getConf(); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); @@ -580,7 +580,7 @@ public class TestSaveNamespace { * open lease and destination directory exist. * This test is a regression for HDFS-2827 */ - @Test + @Test (timeout=30000) public void testSaveNamespaceWithRenamedLease() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration()) .numDataNodes(1).build(); @@ -603,7 +603,7 @@ public class TestSaveNamespace { } } - @Test + @Test (timeout=30000) public void testSaveNamespaceWithDanglingLease() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration()) .numDataNodes(1).build(); Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java Fri Mar 29 21:33:35 2013 @@ -31,12 +31,10 @@ import java.net.URI; import java.util.ArrayList; import java.util.Iterator; import java.util.List; -import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; @@ -46,17 +44,21 @@ import org.apache.hadoop.fs.permission.P import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.MD5Hash; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -111,11 +113,12 @@ public class TestStartup { } } - /** - * start MiniDFScluster, create a file (to create edits) and do a checkpoint + /** + * Create a number of fsimage checkpoints + * @param count number of checkpoints to create * @throws IOException */ - public void createCheckPoint() throws IOException { + public void createCheckPoint(int count) throws IOException { LOG.info("--starting mini cluster"); // manage dirs parameter set to false MiniDFSCluster cluster = null; @@ -133,15 +136,18 @@ public class TestStartup { sn = new SecondaryNameNode(config); assertNotNull(sn); - // create a file - FileSystem fileSys = cluster.getFileSystem(); - Path file1 = new Path("t1"); - DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, - (short) 1, seed); - - LOG.info("--doing checkpoint"); - sn.doCheckpoint(); // this shouldn't fail - LOG.info("--done checkpoint"); + // Create count new files and checkpoints + for (int i=0; i nameDirs = (List)FSNamesystem.getNamespaceDirs(config); + // Corrupt the md5 files in all the namedirs + for (URI uri: nameDirs) { + // Directory layout looks like: + // test/data/dfs/nameN/current/{fsimage,edits,...} + File nameDir = new File(uri.getPath()); + File dfsDir = nameDir.getParentFile(); + assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir + // Set the md5 file to all zeros + File imageFile = new File(nameDir, + Storage.STORAGE_DIR_CURRENT + "/" + + NNStorage.getImageFileName(0)); + MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16])); + // Only need to corrupt one if !corruptAll + if (!corruptAll) { + break; + } } } @@ -165,7 +200,7 @@ public class TestStartup { // get name dir and its length, then delete and recreate the directory File dir = new File(nameDirs.get(0).getPath()); // has only one - this.fsimageLength = new File(new File(dir, "current"), + this.fsimageLength = new File(new File(dir, Storage.STORAGE_DIR_CURRENT), NameNodeFile.IMAGE.getName()).length(); if(dir.exists() && !(FileUtil.fullyDelete(dir))) @@ -178,7 +213,7 @@ public class TestStartup { dir = new File( nameEditsDirs.get(0).getPath()); //has only one - this.editsLength = new File(new File(dir, "current"), + this.editsLength = new File(new File(dir, Storage.STORAGE_DIR_CURRENT), NameNodeFile.EDITS.getName()).length(); if(dir.exists() && !(FileUtil.fullyDelete(dir))) @@ -262,7 +297,7 @@ public class TestStartup { config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(hdfsDir, "chkpt")).toString()); - createCheckPoint(); + createCheckPoint(1); corruptNameNodeFiles(); checkNameNodeFiles(); @@ -289,7 +324,7 @@ public class TestStartup { config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(hdfsDir, "chkpt")).toString()); - createCheckPoint(); + createCheckPoint(1); corruptNameNodeFiles(); checkNameNodeFiles(); } @@ -447,20 +482,18 @@ public class TestStartup { FileSystem fs = cluster.getFileSystem(); fs.mkdirs(new Path("/test")); - // Directory layout looks like: - // test/data/dfs/nameN/current/{fsimage,edits,...} - File nameDir = new File(cluster.getNameDirs(0).iterator().next().getPath()); - File dfsDir = nameDir.getParentFile(); - assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir - LOG.info("Shutting down cluster #1"); cluster.shutdown(); cluster = null; - // Corrupt the md5 file to all 0s - File imageFile = new File(nameDir, "current/" + NNStorage.getImageFileName(0)); - MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16])); - + // Corrupt the md5 files in all the namedirs + corruptFSImageMD5(true); + + // Attach our own log appender so we can verify output + final LogVerificationAppender appender = new LogVerificationAppender(); + final Logger logger = Logger.getRootLogger(); + logger.addAppender(appender); + // Try to start a new cluster LOG.info("\n===========================================\n" + "Starting same cluster after simulated crash"); @@ -471,9 +504,12 @@ public class TestStartup { .build(); fail("Should not have successfully started with corrupt image"); } catch (IOException ioe) { - if (!ioe.getCause().getMessage().contains("is corrupt with MD5")) { - throw ioe; - } + GenericTestUtils.assertExceptionContains( + "Failed to load an FSImage file!", ioe); + int md5failures = appender.countExceptionsWithMessage( + " is corrupt with MD5 checksum of "); + // Two namedirs, so should have seen two failures + assertEquals(2, md5failures); } } finally { if (cluster != null) { @@ -482,6 +518,21 @@ public class TestStartup { } } + @Test(timeout=30000) + public void testCorruptImageFallback() throws IOException { + // Create two checkpoints + createCheckPoint(2); + // Delete a single md5sum + corruptFSImageMD5(false); + // Should still be able to start + MiniDFSCluster cluster = new MiniDFSCluster.Builder(config) + .format(false) + .manageDataDfsDirs(false) + .manageNameDfsDirs(false) + .build(); + cluster.waitActive(); +} + /** * This test tests hosts include list contains host names. After namenode * restarts, the still alive datanodes should not have any trouble in getting Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java Fri Mar 29 21:33:35 2013 @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.na import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; @@ -26,6 +27,8 @@ import java.io.OutputStream; import java.net.URI; import java.util.List; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -43,7 +46,10 @@ import org.apache.hadoop.hdfs.util.Cance import org.apache.hadoop.io.compress.CompressionCodecFactory; import org.apache.hadoop.io.compress.CompressionOutputStream; import org.apache.hadoop.io.compress.GzipCodec; +import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; +import org.apache.hadoop.util.ThreadUtil; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -59,6 +65,8 @@ public class TestStandbyCheckpoints { protected MiniDFSCluster cluster; protected NameNode nn0, nn1; protected FileSystem fs; + + private static final Log LOG = LogFactory.getLog(TestStandbyCheckpoints.class); @SuppressWarnings("rawtypes") @Before @@ -231,6 +239,49 @@ public class TestStandbyCheckpoints { assertTrue(canceledOne); } + + /** + * Make sure that clients will receive StandbyExceptions even when a + * checkpoint is in progress on the SBN, and therefore the StandbyCheckpointer + * thread will have FSNS lock. Regression test for HDFS-4591. + */ + @Test(timeout=120000) + public void testStandbyExceptionThrownDuringCheckpoint() throws Exception { + + // Set it up so that we know when the SBN checkpoint starts and ends. + FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nn1); + DelayAnswer answerer = new DelayAnswer(LOG); + Mockito.doAnswer(answerer).when(spyImage1) + .saveNamespace(Mockito.any(FSNamesystem.class), + Mockito.any(Canceler.class)); + + // Perform some edits and wait for a checkpoint to start on the SBN. + doEdits(0, 2000); + nn0.getRpcServer().rollEditLog(); + answerer.waitForCall(); + answerer.proceed(); + assertTrue("SBN is not performing checkpoint but it should be.", + answerer.getFireCount() == 1 && answerer.getResultCount() == 0); + + // Make sure that the lock has actually been taken by the checkpointing + // thread. + ThreadUtil.sleepAtLeastIgnoreInterrupts(1000); + try { + // Perform an RPC to the SBN and make sure it throws a StandbyException. + nn1.getRpcServer().getFileInfo("/"); + fail("Should have thrown StandbyException, but instead succeeded."); + } catch (StandbyException se) { + GenericTestUtils.assertExceptionContains("is not supported", se); + } + + // Make sure that the checkpoint is still going on, implying that the client + // RPC to the SBN happened during the checkpoint. + assertTrue("SBN should have still been checkpointing.", + answerer.getFireCount() == 1 && answerer.getResultCount() == 0); + answerer.waitForResult(); + assertTrue("SBN should have finished checkpointing.", + answerer.getFireCount() == 1 && answerer.getResultCount() == 1); + } private void doEdits(int start, int stop) throws IOException { for (int i = start; i < stop; i++) { Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java Fri Mar 29 21:33:35 2013 @@ -143,6 +143,7 @@ public class TestStandbyIsHot { conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024); // We read from the standby to watch block locations HAUtil.setAllowStandbyReads(conf, true); + conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Fri Mar 29 21:33:35 2013 @@ -123,7 +123,25 @@ public class TestNameNodeMetrics { stm.read(buffer,0,4); stm.close(); } - + + /** + * Test that capacity metrics are exported and pass + * basic sanity tests. + */ + @Test (timeout = 1800) + public void testCapacityMetrics() throws Exception { + MetricsRecordBuilder rb = getMetrics(NS_METRICS); + long capacityTotal = MetricsAsserts.getLongGauge("CapacityTotal", rb); + assert(capacityTotal != 0); + long capacityUsed = MetricsAsserts.getLongGauge("CapacityUsed", rb); + long capacityRemaining = + MetricsAsserts.getLongGauge("CapacityRemaining", rb); + long capacityUsedNonDFS = + MetricsAsserts.getLongGauge("CapacityUsedNonDFS", rb); + assert(capacityUsed + capacityRemaining + capacityUsedNonDFS == + capacityTotal); + } + /** Test metrics indicating the number of stale DataNodes */ @Test public void testStaleNodes() throws Exception { Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java Fri Mar 29 21:33:35 2013 @@ -224,7 +224,7 @@ public class TestGetConf { /** * Test empty configuration */ - @Test + @Test(timeout=10000) public void testEmptyConf() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(false); // Verify getting addresses fails @@ -247,7 +247,7 @@ public class TestGetConf { /** * Test invalid argument to the tool */ - @Test + @Test(timeout=10000) public void testInvalidArgument() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); String[] args = {"-invalidArgument"}; @@ -259,7 +259,7 @@ public class TestGetConf { * Tests to make sure the returned addresses are correct in case of default * configuration with no federation */ - @Test + @Test(timeout=10000) public void testNonFederation() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(false); @@ -294,7 +294,7 @@ public class TestGetConf { * Tests to make sure the returned addresses are correct in case of federation * of setup. */ - @Test + @Test(timeout=10000) public void testFederation() throws Exception { final int nsCount = 10; HdfsConfiguration conf = new HdfsConfiguration(false); @@ -333,15 +333,16 @@ public class TestGetConf { verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses); } - @Test + @Test(timeout=10000) public void testGetSpecificKey() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); conf.set("mykey", " myval "); String[] args = {"-confKey", "mykey"}; - assertTrue(runTool(conf, args, true).equals("myval\n")); + String toolResult = runTool(conf, args, true); + assertEquals(String.format("myval%n"), toolResult); } - @Test + @Test(timeout=10000) public void testExtraArgsThrowsError() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); conf.set("mykey", "myval"); @@ -354,7 +355,7 @@ public class TestGetConf { * Tests commands other than {@link Command#NAMENODE}, {@link Command#BACKUP}, * {@link Command#SECONDARY} and {@link Command#NNRPCADDRESSES} */ - @Test + @Test(timeout=10000) public void testTool() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(false); for (Command cmd : Command.values()) { Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestGSet.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestGSet.java?rev=1462652&r1=1462651&r2=1462652&view=diff ============================================================================== --- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestGSet.java (original) +++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestGSet.java Fri Mar 29 21:33:35 2013 @@ -21,6 +21,7 @@ import java.util.ConcurrentModificationE import java.util.Iterator; import java.util.Random; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.util.Time; import org.junit.Assert; import org.junit.Test; @@ -452,4 +453,81 @@ public class TestGSet { next = e; } } + + /** + * Test for {@link LightWeightGSet#computeCapacity(double, String)} + * with invalid percent less than 0. + */ + @Test(expected=HadoopIllegalArgumentException.class) + public void testComputeCapacityNegativePercent() { + LightWeightGSet.computeCapacity(1024, -1.0, "testMap"); + } + + /** + * Test for {@link LightWeightGSet#computeCapacity(double, String)} + * with invalid percent greater than 100. + */ + @Test(expected=HadoopIllegalArgumentException.class) + public void testComputeCapacityInvalidPercent() { + LightWeightGSet.computeCapacity(1024, 101.0, "testMap"); + } + + /** + * Test for {@link LightWeightGSet#computeCapacity(double, String)} + * with invalid negative max memory + */ + @Test(expected=HadoopIllegalArgumentException.class) + public void testComputeCapacityInvalidMemory() { + LightWeightGSet.computeCapacity(-1, 50.0, "testMap"); + } + + private static boolean isPowerOfTwo(int num) { + return num == 0 || (num > 0 && Integer.bitCount(num) == 1); + } + + /** Return capacity as percentage of total memory */ + private static int getPercent(long total, int capacity) { + // Reference size in bytes + double referenceSize = + System.getProperty("sun.arch.data.model").equals("32") ? 4.0 : 8.0; + return (int)(((capacity * referenceSize)/total) * 100.0); + } + + /** Return capacity as percentage of total memory */ + private static void testCapacity(long maxMemory, double percent) { + int capacity = LightWeightGSet.computeCapacity(maxMemory, percent, "map"); + LightWeightGSet.LOG.info("Validating - total memory " + maxMemory + " percent " + + percent + " returned capacity " + capacity); + // Returned capacity is zero or power of two + Assert.assertTrue(isPowerOfTwo(capacity)); + + // Ensure the capacity returned is the nearest to the asked perecentage + int capacityPercent = getPercent(maxMemory, capacity); + if (capacityPercent == percent) { + return; + } else if (capacityPercent > percent) { + Assert.assertTrue(getPercent(maxMemory, capacity * 2) > percent); + } else { + Assert.assertTrue(getPercent(maxMemory, capacity / 2) < percent); + } + } + + /** + * Test for {@link LightWeightGSet#computeCapacity(double, String)} + */ + @Test + public void testComputeCapacity() { + // Tests for boundary conditions where percent or memory are zero + testCapacity(0, 0.0); + testCapacity(100, 0.0); + testCapacity(0, 100.0); + + // Compute capacity for some 100 random max memory and percentage + Random r = new Random(); + for (int i = 0; i < 100; i++) { + long maxMemory = r.nextInt(Integer.MAX_VALUE); + double percent = r.nextInt(101); + testCapacity(maxMemory, percent); + } + } }