Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id AE41A18F27 for ; Wed, 4 Nov 2015 23:38:10 +0000 (UTC) Received: (qmail 86887 invoked by uid 500); 4 Nov 2015 23:38:02 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 85853 invoked by uid 500); 4 Nov 2015 23:38:01 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 82039 invoked by uid 99); 4 Nov 2015 23:37:59 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 04 Nov 2015 23:37:59 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 6BAE0E152E; Wed, 4 Nov 2015 23:37:59 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: aengineer@apache.org To: common-commits@hadoop.apache.org Date: Wed, 04 Nov 2015 23:38:25 -0000 Message-Id: <684ece2bb6904783a46ce1bb035d6ab4@git.apache.org> In-Reply-To: <2914b5a96bcc436bbb66bd3010b203fd@git.apache.org> References: <2914b5a96bcc436bbb66bd3010b203fd@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [27/50] [abbrv] hadoop git commit: HDFS-9312. Fix TestReplication to be FsDataset-agnostic. (lei) HDFS-9312. Fix TestReplication to be FsDataset-agnostic. (lei) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76324094 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76324094 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76324094 Branch: refs/heads/HDFS-7240 Commit: 7632409482aaf06ecc6fe370a9f519afb969ad30 Parents: 78d6890 Author: Lei Xu Authored: Mon Nov 2 17:09:39 2015 -0800 Committer: Lei Xu Committed: Mon Nov 2 17:09:39 2015 -0800 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../org/apache/hadoop/hdfs/TestReplication.java | 115 +++++-------------- .../server/datanode/FsDatasetTestUtils.java | 7 ++ .../fsdataset/impl/FsDatasetImplTestUtils.java | 25 ++++ 4 files changed, 64 insertions(+), 85 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/76324094/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index fea4106..c13a725 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1656,6 +1656,8 @@ Release 2.8.0 - UNRELEASED HDFS-9168. Move client side unit test to hadoop-hdfs-client. (wheat9) + HDFS-9312. Fix TestReplication to be FsDataset-agnostic. (lei) + BUG FIXES HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs. http://git-wip-us.apache.org/repos/asf/hadoop/blob/76324094/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java index 6424bc3..d9c96ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java @@ -20,22 +20,14 @@ package org.apache.hadoop.hdfs; import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import com.google.common.base.Supplier; -import java.io.File; import java.io.IOException; import java.io.OutputStream; -import java.io.RandomAccessFile; import java.net.InetSocketAddress; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.Iterator; import java.util.List; @@ -50,7 +42,6 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -62,6 +53,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -367,7 +359,7 @@ public class TestReplication { for (int i=0; i replicas = new ArrayList<>(); + for (int dnIndex=0; dnIndex<3; dnIndex++) { + replicas.add(cluster.getMaterializedReplica(dnIndex, block)); } - + assertEquals(3, replicas.size()); + + cluster.shutdown(); + int fileCount = 0; // Choose 3 copies of block file - delete 1 and corrupt the remaining 2 - for (int dnIndex=0; dnIndex<3; dnIndex++) { - File blockFile = cluster.getBlockFile(dnIndex, block); - LOG.info("Checking for file " + blockFile); - - if (blockFile != null && blockFile.exists()) { - if (fileCount == 0) { - LOG.info("Deleting file " + blockFile); - assertTrue(blockFile.delete()); - } else { - // corrupt it. - LOG.info("Corrupting file " + blockFile); - long len = blockFile.length(); - assertTrue(len > 50); - RandomAccessFile blockOut = new RandomAccessFile(blockFile, "rw"); - try { - blockOut.seek(len/3); - blockOut.write(buffer, 0, 25); - } finally { - blockOut.close(); - } - } - fileCount++; + for (MaterializedReplica replica : replicas) { + if (fileCount == 0) { + LOG.info("Deleting block " + replica); + replica.deleteData(); + } else { + // corrupt it. + LOG.info("Corrupting file " + replica); + replica.corruptData(); } + fileCount++; } - assertEquals(3, fileCount); - + /* Start the MiniDFSCluster with more datanodes since once a writeBlock * to a datanode node fails, same block can not be written to it * immediately. In our case some replication attempts will fail. @@ -530,63 +510,28 @@ public class TestReplication { DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fs = cluster.getFileSystem(); - FSDataOutputStream create = fs.create(new Path("/test")); - fs.setReplication(new Path("/test"), (short) 1); + Path filePath = new Path("/test"); + FSDataOutputStream create = fs.create(filePath); + fs.setReplication(filePath, (short) 1); create.write(new byte[1024]); create.close(); - List nonParticipatedNodeDirs = new ArrayList(); - File participatedNodeDirs = null; - for (int i = 0; i < cluster.getDataNodes().size(); i++) { - File storageDir = cluster.getInstanceStorageDir(i, 0); - String bpid = cluster.getNamesystem().getBlockPoolId(); - File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); - if (data_dir.listFiles().length == 0) { - nonParticipatedNodeDirs.add(data_dir); - } else { - assertNull("participatedNodeDirs has already been set.", - participatedNodeDirs); - participatedNodeDirs = data_dir; - } - } - assertEquals(2, nonParticipatedNodeDirs.size()); - - String blockFile = null; - final List listFiles = new ArrayList<>(); - Files.walkFileTree(participatedNodeDirs.toPath(), - new SimpleFileVisitor() { - @Override - public FileVisitResult visitFile( - java.nio.file.Path file, BasicFileAttributes attrs) - throws IOException { - listFiles.add(file.toFile()); - return FileVisitResult.CONTINUE; - } - } - ); - assertFalse(listFiles.isEmpty()); + ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath); int numReplicaCreated = 0; - for (File file : listFiles) { - if (file.getName().startsWith(Block.BLOCK_FILE_PREFIX) - && !file.getName().endsWith("meta")) { - blockFile = file.getName(); - for (File file1 : nonParticipatedNodeDirs) { - file1.mkdirs(); - new File(file1, blockFile).createNewFile(); - new File(file1, blockFile + "_1000.meta").createNewFile(); - numReplicaCreated++; - } - break; + for (final DataNode dn : cluster.getDataNodes()) { + if (!dn.getFSDataset().contains(block)) { + cluster.getFsDatasetTestUtils(dn).injectCorruptReplica(block); + numReplicaCreated++; } } assertEquals(2, numReplicaCreated); - fs.setReplication(new Path("/test"), (short) 3); + fs.setReplication(filePath, (short) 3); cluster.restartDataNodes(); // Lets detect all DNs about dummy copied // blocks cluster.waitActive(); cluster.triggerBlockReports(); - DFSTestUtil.waitReplication(fs, new Path("/test"), (short) 3); + DFSTestUtil.waitReplication(fs, filePath, (short) 3); } finally { if (cluster != null) { cluster.shutdown(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/76324094/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java index eb986ff..40c4438 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java @@ -199,4 +199,11 @@ public interface FsDatasetTestUtils { * @throws IOException */ void checkStoredReplica(final Replica replica) throws IOException; + + /** + * Create dummy replicas for block data and metadata. + * @param block the block of which replica to be created. + * @throws IOException on I/O error. + */ + void injectCorruptReplica(ExtendedBlock block) throws IOException; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/76324094/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java index ed32fae..e8e4532 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; import com.google.common.base.Preconditions; +import org.apache.commons.io.FileExistsException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -292,4 +293,28 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils { ReplicaInfo r = (ReplicaInfo) replica; FsDatasetImpl.checkReplicaFiles(r); } + + @Override + public void injectCorruptReplica(ExtendedBlock block) throws IOException { + Preconditions.checkState(!dataset.contains(block), + "Block " + block + " already exists on dataset."); + try (FsVolumeReferences volRef = dataset.getFsVolumeReferences()) { + FsVolumeImpl volume = (FsVolumeImpl) volRef.get(0); + FinalizedReplica finalized = new FinalizedReplica( + block.getLocalBlock(), + volume, + volume.getFinalizedDir(block.getBlockPoolId())); + File blockFile = finalized.getBlockFile(); + if (!blockFile.createNewFile()) { + throw new FileExistsException( + "Block file " + blockFile + " already exists."); + } + File metaFile = FsDatasetUtil.getMetaFile(blockFile, 1000); + if (!metaFile.createNewFile()) { + throw new FileExistsException( + "Meta file " + metaFile + " already exists." + ); + } + } + } }