Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 26B9A1795A for ; Mon, 13 Apr 2015 21:08:55 +0000 (UTC) Received: (qmail 51900 invoked by uid 500); 13 Apr 2015 21:08:32 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 51807 invoked by uid 500); 13 Apr 2015 21:08:32 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 51790 invoked by uid 99); 13 Apr 2015 21:08:32 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 13 Apr 2015 21:08:32 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 4A924E0BFF; Mon, 13 Apr 2015 21:08:32 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: zhz@apache.org To: common-commits@hadoop.apache.org Date: Mon, 13 Apr 2015 21:08:36 -0000 Message-Id: <24ceb339fd0649b8ae1c56352ebd49f4@git.apache.org> In-Reply-To: <22a38c1a8f8e42fab0db1f8687768b04@git.apache.org> References: <22a38c1a8f8e42fab0db1f8687768b04@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [05/12] hadoop git commit: HDFS-8077. Erasure coding: fix bugs in EC zone and symlinks. Contributed by Jing Zhao and Zhe Zhang. HDFS-8077. Erasure coding: fix bugs in EC zone and symlinks. Contributed by Jing Zhao and Zhe Zhang. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58d9f264 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58d9f264 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58d9f264 Branch: refs/heads/HDFS-7285 Commit: 58d9f264645bbb10c522ce5df7d69ed6ec8b3e2d Parents: cc2202c Author: Jing Zhao Authored: Thu Apr 9 17:53:22 2015 -0700 Committer: Zhe Zhang Committed: Mon Apr 13 14:08:19 2015 -0700 ---------------------------------------------------------------------- .../BlockInfoStripedUnderConstruction.java | 2 +- .../hdfs/server/blockmanagement/BlockManager.java | 12 ++++++------ .../server/namenode/ErasureCodingZoneManager.java | 7 +++++++ .../hadoop/hdfs/server/namenode/FSDirectory.java | 4 ++-- .../hdfs/server/namenode/FSEditLogLoader.java | 11 ++++++----- .../hdfs/server/namenode/FSImageSerialization.java | 4 ++-- .../hadoop/hdfs/server/namenode/INodeFile.java | 17 ++++------------- .../hdfs/server/namenode/TestFSEditLogLoader.java | 4 ++-- .../hadoop/hdfs/server/namenode/TestFSImage.java | 2 +- .../server/namenode/TestRecoverStripedBlocks.java | 2 +- 10 files changed, 32 insertions(+), 33 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/58d9f264/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java index cfaf3a0..0373314 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java @@ -96,7 +96,7 @@ public class BlockInfoStripedUnderConstruction extends BlockInfoStriped for(int i = 0; i < numLocations; i++) { // when creating a new block we simply sequentially assign block index to // each storage - Block blk = new Block(this.getBlockId() + i, this.getGenerationStamp(), 0); + Block blk = new Block(this.getBlockId() + i, 0, this.getGenerationStamp()); replicas[i] = new ReplicaUnderConstruction(blk, targets[i], ReplicaState.RBW); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/58d9f264/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index fcf1421..94aafc7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2450,12 +2450,12 @@ public class BlockManager { case COMMITTED: if (storedBlock.getGenerationStamp() != reported.getGenerationStamp()) { final long reportedGS = reported.getGenerationStamp(); - return new BlockToMarkCorrupt(reported, storedBlock, reportedGS, + return new BlockToMarkCorrupt(new Block(reported), storedBlock, reportedGS, "block is " + ucState + " and reported genstamp " + reportedGS + " does not match genstamp in block map " + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); } else if (storedBlock.getNumBytes() != reported.getNumBytes()) { - return new BlockToMarkCorrupt(reported, storedBlock, + return new BlockToMarkCorrupt(new Block(reported), storedBlock, "block is " + ucState + " and reported length " + reported.getNumBytes() + " does not match " + "length in block map " + storedBlock.getNumBytes(), @@ -2466,7 +2466,7 @@ public class BlockManager { case UNDER_CONSTRUCTION: if (storedBlock.getGenerationStamp() > reported.getGenerationStamp()) { final long reportedGS = reported.getGenerationStamp(); - return new BlockToMarkCorrupt(reported, storedBlock, reportedGS, + return new BlockToMarkCorrupt(new Block(reported), storedBlock, reportedGS, "block is " + ucState + " and reported state " + reportedState + ", But reported genstamp " + reportedGS + " does not match genstamp in block map " @@ -2482,7 +2482,7 @@ public class BlockManager { return null; // not corrupt } else if (storedBlock.getGenerationStamp() != reported.getGenerationStamp()) { final long reportedGS = reported.getGenerationStamp(); - return new BlockToMarkCorrupt(reported, storedBlock, reportedGS, + return new BlockToMarkCorrupt(new Block(reported), storedBlock, reportedGS, "reported " + reportedState + " replica with genstamp " + reportedGS + " does not match COMPLETE block's genstamp in block map " + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); @@ -2497,7 +2497,7 @@ public class BlockManager { "complete with the same genstamp"); return null; } else { - return new BlockToMarkCorrupt(reported, storedBlock, + return new BlockToMarkCorrupt(new Block(reported), storedBlock, "reported replica has invalid state " + reportedState, Reason.INVALID_STATE); } @@ -2510,7 +2510,7 @@ public class BlockManager { " on " + dn + " size " + storedBlock.getNumBytes(); // log here at WARN level since this is really a broken HDFS invariant LOG.warn(msg); - return new BlockToMarkCorrupt(reported, storedBlock, msg, + return new BlockToMarkCorrupt(new Block(reported), storedBlock, msg, Reason.INVALID_STATE); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/58d9f264/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java index d4ff7c5..606e804 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java @@ -59,6 +59,13 @@ public class ErasureCodingZoneManager { if (inode == null) { continue; } + // We don't allow symlinks in an EC zone, or pointing to a file/dir in + // an EC. Therefore if a symlink is encountered, the dir shouldn't have + // EC + // TODO: properly support symlinks in EC zones + if (inode.isSymlink()) { + return false; + } final List xAttrs = inode.getXAttrFeature() == null ? new ArrayList(0) : inode.getXAttrFeature().getXAttrs(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/58d9f264/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 53d2040..d68128a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -468,8 +468,8 @@ public class FSDirectory implements Closeable { try { INodesInPath iip = addINode(existing, newNode); if (iip != null) { - // TODO: we will no longer use storage policy for "Erasure Coding Zone" - if (newNode.isStriped()) { + // check if the file is in an EC zone + if (getECPolicy(iip)) { newNode.addStripedBlocksFeature(); } if (aclEntries != null) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/58d9f264/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 89cfe05..f530772 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -417,7 +417,7 @@ public class FSEditLogLoader { newFile.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID); newFile.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID); // TODO whether the file is striped should later be retrieved from iip - updateBlocks(fsDir, addCloseOp, iip, newFile, newFile.isStriped()); + updateBlocks(fsDir, addCloseOp, iip, newFile, fsDir.getECPolicy(iip)); break; } case OP_CLOSE: { @@ -438,7 +438,7 @@ public class FSEditLogLoader { file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID); file.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID); // TODO whether the file is striped should later be retrieved from iip - updateBlocks(fsDir, addCloseOp, iip, file, file.isStriped()); + updateBlocks(fsDir, addCloseOp, iip, file, fsDir.getECPolicy(iip)); // Now close the file if (!file.isUnderConstruction() && @@ -497,7 +497,7 @@ public class FSEditLogLoader { INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path); // Update in-memory data structures // TODO whether the file is striped should later be retrieved from iip - updateBlocks(fsDir, updateOp, iip, oldFile, oldFile.isStriped()); + updateBlocks(fsDir, updateOp, iip, oldFile, fsDir.getECPolicy(iip)); if (toAddRetryCache) { fsNamesys.addCacheEntry(updateOp.rpcClientId, updateOp.rpcCallId); @@ -511,10 +511,11 @@ public class FSEditLogLoader { FSNamesystem.LOG.debug(op.opCode + ": " + path + " new block id : " + addBlockOp.getLastBlock().getBlockId()); } - INodeFile oldFile = INodeFile.valueOf(fsDir.getINode(path), path); + INodesInPath iip = fsDir.getINodesInPath(path, true); + INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path); // add the new block to the INodeFile // TODO whether the file is striped should later be retrieved from iip - addNewBlock(addBlockOp, oldFile, oldFile.isStriped()); + addNewBlock(addBlockOp, oldFile, fsDir.getECPolicy(iip)); break; } case OP_SET_REPLICATION: { http://git-wip-us.apache.org/repos/asf/hadoop/blob/58d9f264/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index 1e58858..58244e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -207,7 +207,7 @@ public class FSImageSerialization { out.writeLong(cons.getModificationTime()); out.writeLong(cons.getPreferredBlockSize()); // whether the file has striped blocks - out.writeBoolean(cons.isWithStripedBlocks()); + out.writeBoolean(cons.isStriped()); writeBlocks(cons.getBlocks(), out); cons.getPermissionStatus().write(out); @@ -233,7 +233,7 @@ public class FSImageSerialization { out.writeLong(file.getAccessTime()); out.writeLong(file.getPreferredBlockSize()); // whether the file has striped blocks - out.writeBoolean(file.isWithStripedBlocks()); + out.writeBoolean(file.isStriped()); writeBlocks(file.getBlocks(), out); SnapshotFSImageFormat.saveFileDiffList(file, out); http://git-wip-us.apache.org/repos/asf/hadoop/blob/58d9f264/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index f95e54e..b5c510e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -185,17 +185,13 @@ public class INodeFile extends INodeWithAdditionalFields public FileWithStripedBlocksFeature addStripedBlocksFeature() { assert blocks == null || blocks.length == 0: "The file contains contiguous blocks"; - assert !isWithStripedBlocks(); + assert !isStriped(); this.setFileReplication((short) 0); FileWithStripedBlocksFeature sb = new FileWithStripedBlocksFeature(); addFeature(sb); return sb; } - public boolean isWithStripedBlocks() { - return getStripedBlocksFeature() != null; - } - /** Used to make sure there is no contiguous block related info */ private boolean hasNoContiguousBlock() { return (blocks == null || blocks.length == 0) && getFileReplication() == 0; @@ -431,7 +427,7 @@ public class INodeFile extends INodeWithAdditionalFields /** Set the replication factor of this file. */ public final INodeFile setFileReplication(short replication, int latestSnapshotId) throws QuotaExceededException { - Preconditions.checkState(!isWithStripedBlocks(), + Preconditions.checkState(!isStriped(), "Cannot set replication to a file with striped blocks"); recordModification(latestSnapshotId); setFileReplication(replication); @@ -653,7 +649,7 @@ public class INodeFile extends INodeWithAdditionalFields long nsDelta = 1; final long ssDeltaNoReplication; short replication; - if (isWithStripedBlocks()) { + if (isStriped()) { return computeQuotaUsageWithStriped(bsps, counts); } FileWithSnapshotFeature sf = getFileWithSnapshotFeature(); @@ -695,11 +691,6 @@ public class INodeFile extends INodeWithAdditionalFields /** * Compute quota of striped file - * @param bsps - * @param counts - * @param useCache - * @param lastSnapshotId - * @return quota counts */ public final QuotaCounts computeQuotaUsageWithStriped( BlockStoragePolicySuite bsps, QuotaCounts counts) { @@ -828,7 +819,7 @@ public class INodeFile extends INodeWithAdditionalFields * Use preferred block size for the last block if it is under construction. */ public final long storagespaceConsumed() { - if (isWithStripedBlocks()) { + if (isStriped()) { return storagespaceConsumedWithStriped(); } else { return storagespaceConsumedNoReplication() * getBlockReplication(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/58d9f264/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index 407d07e..0eeb7f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -472,7 +472,7 @@ public class TestFSEditLogLoader { INodeFile inodeLoaded = (INodeFile)fns.getFSDirectory() .getINode(testFilePath); - assertTrue(inodeLoaded.isWithStripedBlocks()); + assertTrue(inodeLoaded.isStriped()); BlockInfoStriped[] blks = (BlockInfoStriped[])inodeLoaded.getBlocks(); assertEquals(1, blks.length); @@ -551,7 +551,7 @@ public class TestFSEditLogLoader { INodeFile inodeLoaded = (INodeFile)fns.getFSDirectory() .getINode(testFilePath); - assertTrue(inodeLoaded.isWithStripedBlocks()); + assertTrue(inodeLoaded.isStriped()); BlockInfoStriped[] blks = (BlockInfoStriped[])inodeLoaded.getBlocks(); assertEquals(1, blks.length); http://git-wip-us.apache.org/repos/asf/hadoop/blob/58d9f264/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index 83f01c6..a456cad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -199,7 +199,7 @@ public class TestFSImage { assertEquals(mtime, fileByLoaded.getModificationTime()); assertEquals(isUC ? mtime : atime, fileByLoaded.getAccessTime()); assertEquals(0, fileByLoaded.getContiguousBlocks().length); - assertEquals(0, fileByLoaded.getBlockReplication()); + assertEquals(0, fileByLoaded.getFileReplication()); assertEquals(preferredBlockSize, fileByLoaded.getPreferredBlockSize()); //check the BlockInfoStriped http://git-wip-us.apache.org/repos/asf/hadoop/blob/58d9f264/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRecoverStripedBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRecoverStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRecoverStripedBlocks.java index b2ff6c8..4292f9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRecoverStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRecoverStripedBlocks.java @@ -84,7 +84,7 @@ public class TestRecoverStripedBlocks { final INodeFile fileNode = cluster.getNamesystem().getFSDirectory() .getINode4Write(filePath.toString()).asFile(); assertFalse(fileNode.isUnderConstruction()); - assertTrue(fileNode.isWithStripedBlocks()); + assertTrue(fileNode.isStriped()); BlockInfo[] blocks = fileNode.getBlocks(); assertEquals(numBlocks, blocks.length); for (BlockInfo blk : blocks) {