Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id DAE451847C for ; Mon, 22 Feb 2016 19:47:22 +0000 (UTC) Received: (qmail 70578 invoked by uid 500); 22 Feb 2016 19:47:20 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 70390 invoked by uid 500); 22 Feb 2016 19:47:20 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 70229 invoked by uid 99); 22 Feb 2016 19:47:20 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 22 Feb 2016 19:47:20 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 61CE2E0414; Mon, 22 Feb 2016 19:47:20 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: arp@apache.org To: common-commits@hadoop.apache.org Date: Mon, 22 Feb 2016 19:47:24 -0000 Message-Id: <1b331bbe0a01428ca105cce1d94b87b1@git.apache.org> In-Reply-To: <7839982659ae4ff6bd79ac8b2f5987b7@git.apache.org> References: <7839982659ae4ff6bd79ac8b2f5987b7@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [05/50] [abbrv] hadoop git commit: HDFS-9773. Remove dead code related to SimulatedFSDataset in tests. Contributed by Brahma Reddy Battula. HDFS-9773. Remove dead code related to SimulatedFSDataset in tests. Contributed by Brahma Reddy Battula. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0738ae6 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0738ae6 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0738ae6 Branch: refs/heads/HDFS-1312 Commit: b0738ae673e4a95060b33498c049cd7790d6e371 Parents: ac5da11 Author: Akira Ajisaka Authored: Mon Feb 15 15:35:06 2016 +0900 Committer: Akira Ajisaka Committed: Mon Feb 15 15:35:06 2016 +0900 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../org/apache/hadoop/hdfs/TestFileAppend.java | 23 +------------------- .../org/apache/hadoop/hdfs/TestFileAppend2.java | 8 ------- .../org/apache/hadoop/hdfs/TestFileAppend4.java | 5 ----- .../org/apache/hadoop/hdfs/TestLargeBlock.java | 5 ----- .../shortcircuit/TestShortCircuitLocalRead.java | 7 ------ 6 files changed, 4 insertions(+), 47 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0738ae6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index eb2b21e..d144ba7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1925,6 +1925,9 @@ Release 2.8.0 - UNRELEASED HDFS-9797. Log Standby exceptions thrown by RequestHedgingProxyProvider at DEBUG Level (Inigo Goiri via asuresh) + HDFS-9773. Remove dead code related to SimulatedFSDataset in tests. + (Brahma Reddy Battula via aajisaka) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0738ae6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java index 56b6590..2768d21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; import org.apache.hadoop.io.IOUtils; @@ -59,8 +58,6 @@ import org.junit.Test; public class TestFileAppend{ private static final long RANDOM_TEST_RUNTIME = 10000; - final boolean simulatedStorage = false; - private static byte[] fileContents = null; // @@ -101,13 +98,7 @@ public class TestFileAppend{ } byte[] expected = new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE]; - if (simulatedStorage) { - LocatedBlocks lbs = fileSys.getClient().getLocatedBlocks(name.toString(), - 0, AppendTestUtil.FILE_SIZE); - DFSTestUtil.fillExpectedBuf(lbs, expected); - } else { - System.arraycopy(fileContents, 0, expected, 0, expected.length); - } + System.arraycopy(fileContents, 0, expected, 0, expected.length); // do a sanity check. Read the file // do not check file status since the file is not yet closed. AppendTestUtil.checkFullFile(fileSys, name, @@ -118,9 +109,6 @@ public class TestFileAppend{ @Test public void testBreakHardlinksIfNeeded() throws IOException { Configuration conf = new HdfsConfiguration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); InetSocketAddress addr = new InetSocketAddress("localhost", @@ -186,9 +174,6 @@ public class TestFileAppend{ @Test public void testSimpleFlush() throws IOException { Configuration conf = new HdfsConfiguration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); DistributedFileSystem fs = cluster.getFileSystem(); @@ -242,9 +227,6 @@ public class TestFileAppend{ @Test public void testComplexFlush() throws IOException { Configuration conf = new HdfsConfiguration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); DistributedFileSystem fs = cluster.getFileSystem(); @@ -293,9 +275,6 @@ public class TestFileAppend{ @Test(expected = FileNotFoundException.class) public void testFileNotFound() throws IOException { Configuration conf = new HdfsConfiguration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); try { http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0738ae6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java index 3c72db3..cd1b851 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java @@ -38,7 +38,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -59,7 +58,6 @@ public class TestFileAppend2 { } static final int numBlocks = 5; - final boolean simulatedStorage = false; private byte[] fileContents = null; @@ -81,9 +79,6 @@ public class TestFileAppend2 { @Test public void testSimpleAppend() throws IOException { final Configuration conf = new HdfsConfiguration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50); fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); @@ -233,9 +228,6 @@ public class TestFileAppend2 { @Test public void testSimpleAppend2() throws Exception { final Configuration conf = new HdfsConfiguration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50); fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0738ae6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java index 62f6cd2..265b510 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; @@ -67,7 +66,6 @@ public class TestFileAppend4 { MiniDFSCluster cluster; Path file1; FSDataOutputStream stm; - final boolean simulatedStorage = false; { DFSTestUtil.setNameNodeLogLevel(Level.ALL); @@ -78,9 +76,6 @@ public class TestFileAppend4 { @Before public void setUp() throws Exception { this.conf = new Configuration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } // lower heartbeat interval for fast recognition of DN death conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0738ae6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java index 0896120..a37da35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java @@ -30,7 +30,6 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.junit.Test; /** @@ -51,7 +50,6 @@ public class TestLargeBlock { // should we verify the data read back from the file? (slow) static final boolean verifyData = true; static final byte[] pattern = { 'D', 'E', 'A', 'D', 'B', 'E', 'E', 'F'}; - static final boolean simulatedStorage = false; // creates a file static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl, @@ -177,9 +175,6 @@ public class TestLargeBlock { final long fileSize = blockSize + 1L; Configuration conf = new Configuration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); try { http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0738ae6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java index a069003..f4fbebc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java @@ -97,7 +97,6 @@ public class TestShortCircuitLocalRead { static final long seed = 0xDEADBEEFL; static final int blockSize = 5120; - final boolean simulatedStorage = false; // creates a file but does not close it static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl) @@ -268,9 +267,6 @@ public class TestShortCircuitLocalRead { conf.setBoolean( HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true); } - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) .format(true).build(); FileSystem fs = cluster.getFileSystem(); @@ -399,9 +395,6 @@ public class TestShortCircuitLocalRead { new File(sockDir.getDir(), "testSkipWithVerifyChecksum._PORT.sock").getAbsolutePath()); DomainSocket.disableBindPathValidation(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) .format(true).build(); FileSystem fs = cluster.getFileSystem();