Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id CB5CAD559 for ; Tue, 26 Feb 2013 22:05:05 +0000 (UTC) Received: (qmail 75709 invoked by uid 500); 26 Feb 2013 22:05:05 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 75517 invoked by uid 500); 26 Feb 2013 22:05:05 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 75359 invoked by uid 99); 26 Feb 2013 22:05:05 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 26 Feb 2013 22:05:05 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 26 Feb 2013 22:04:58 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id AA61123889B3; Tue, 26 Feb 2013 22:04:36 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1450477 - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/ src/test/java/org/apache/hadoop/hdfs/ s... Date: Tue, 26 Feb 2013 22:04:36 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20130226220436.AA61123889B3@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: szetszwo Date: Tue Feb 26 22:04:35 2013 New Revision: 1450477 URL: http://svn.apache.org/r1450477 Log: HDFS-4523. Fix INodeFile replacement, TestQuota and javac errors from trunk merge. Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt?rev=1450477&r1=1450476&r2=1450477&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt Tue Feb 26 22:04:35 2013 @@ -176,3 +176,6 @@ Branch-2802 Snapshot (Unreleased) HDFS-4514. Add CLI for supporting snapshot rename, diff report, and snapshottable directory listing. (Jing Zhao via szetszwo) + + HDFS-4523. Fix INodeFile replacement, TestQuota and javac errors from trunk + merge. (szetszwo) Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1450477&r1=1450476&r2=1450477&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Tue Feb 26 22:04:35 2013 @@ -335,7 +335,7 @@ public class FSDirectory implements Clos INodeFileUnderConstruction.valueOf(inodesInPath.getLastINode(), path); // check quota limits and updated space consumed - updateCount(inodesInPath, 0, 0, fileINode.getBlockDiskspace(), true); + updateCount(inodesInPath, 0, fileINode.getBlockDiskspace(), true); // associate new last block for the file BlockInfoUnderConstruction blockInfo = @@ -426,7 +426,7 @@ public class FSDirectory implements Clos // update space consumed final INodesInPath iip = rootDir.getINodesInPath4Write(path, true); - updateCount(iip, 0, 0, -fileNode.getBlockDiskspace(), true); + updateCount(iip, 0, -fileNode.getBlockDiskspace(), true); } /** @@ -969,15 +969,11 @@ public class FSDirectory implements Clos INodeDirectory trgParent = (INodeDirectory)trgINodes[trgINodes.length-2]; final Snapshot trgLatestSnapshot = trgINodesInPath.getLatestSnapshot(); - INodeFile [] allSrcInodes = new INodeFile[srcs.length]; - int i = 0; - int totalBlocks = 0; - for(String src : srcs) { - INodeFile srcInode = (INodeFile)getINode(src); - allSrcInodes[i++] = srcInode; - totalBlocks += srcInode.numBlocks(); + final INodeFile [] allSrcInodes = new INodeFile[srcs.length]; + for(int i = 0; i < srcs.length; i++) { + allSrcInodes[i] = (INodeFile)getINode4Write(srcs[i]); } - trgInode.appendBlocks(allSrcInodes, totalBlocks); // copy the blocks + trgInode.concatBlocks(allSrcInodes); // since we are in the same dir - we can use same parent to remove files int count = 0; @@ -1908,17 +1904,16 @@ public class FSDirectory implements Clos private INode removeLastINode(final INodesInPath inodesInPath) { final INode[] inodes = inodesInPath.getINodes(); final int pos = inodes.length - 1; - INode removedNode = ((INodeDirectory)inodes[pos-1]).removeChild( + final boolean removed = ((INodeDirectory)inodes[pos-1]).removeChild( inodes[pos], inodesInPath.getLatestSnapshot()); - if (removedNode != null) { - Preconditions.checkState(removedNode == inodes[pos]); - - inodesInPath.setINode(pos - 1, removedNode.getParent()); - final Quota.Counts counts = removedNode.computeQuotaUsage(); + if (removed) { + inodesInPath.setINode(pos - 1, inodes[pos].getParent()); + final Quota.Counts counts = inodes[pos].computeQuotaUsage(); updateCountNoQuotaCheck(inodesInPath, pos, -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE)); + return inodes[pos]; } - return removedNode; + return null; } /** Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1450477&r1=1450476&r2=1450477&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Tue Feb 26 22:04:35 2013 @@ -1556,7 +1556,7 @@ public class FSNamesystem implements Nam } // check replication and blocks size - if(repl != srcInode.getFileReplication()) { + if(repl != srcInode.getBlockReplication()) { throw new HadoopIllegalArgumentException("concat: the soruce file " + src + " and the target file " + target + " should have the same replication: source replication is " @@ -5722,6 +5722,7 @@ public class FSNamesystem implements Nam /** Allow snapshot on a directroy. */ public void allowSnapshot(String path) throws SafeModeException, IOException { + final FSPermissionChecker pc = getPermissionChecker(); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -5729,7 +5730,7 @@ public class FSNamesystem implements Nam throw new SafeModeException("Cannot allow snapshot for " + path, safeMode); } - checkOwner(path); + checkOwner(pc, path); snapshotManager.setSnapshottable(path); getEditLog().logAllowSnapshot(path); @@ -5749,6 +5750,7 @@ public class FSNamesystem implements Nam /** Disallow snapshot on a directory. */ public void disallowSnapshot(String path) throws SafeModeException, IOException { + final FSPermissionChecker pc = getPermissionChecker(); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -5756,7 +5758,7 @@ public class FSNamesystem implements Nam throw new SafeModeException("Cannot disallow snapshot for " + path, safeMode); } - checkOwner(path); + checkOwner(pc, path); snapshotManager.resetSnapshottable(path); getEditLog().logDisallowSnapshot(path); @@ -5780,6 +5782,7 @@ public class FSNamesystem implements Nam */ public void createSnapshot(String snapshotRoot, String snapshotName) throws SafeModeException, IOException { + final FSPermissionChecker pc = getPermissionChecker(); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -5787,7 +5790,7 @@ public class FSNamesystem implements Nam throw new SafeModeException("Cannot create snapshot for " + snapshotRoot, safeMode); } - checkOwner(snapshotRoot); + checkOwner(pc, snapshotRoot); dir.writeLock(); try { @@ -5819,6 +5822,7 @@ public class FSNamesystem implements Nam */ public void renameSnapshot(String path, String snapshotOldName, String snapshotNewName) throws SafeModeException, IOException { + final FSPermissionChecker pc = getPermissionChecker(); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -5826,7 +5830,7 @@ public class FSNamesystem implements Nam throw new SafeModeException("Cannot rename snapshot for " + path, safeMode); } - checkOwner(path); + checkOwner(pc, path); // TODO: check if the new name is valid. May also need this for // creationSnapshot @@ -5863,8 +5867,8 @@ public class FSNamesystem implements Nam checkOperation(OperationCategory.READ); FSPermissionChecker checker = new FSPermissionChecker( fsOwner.getShortUserName(), supergroup); - status = snapshotManager - .getSnapshottableDirListing(checker.isSuper ? null : checker.user); + final String user = checker.isSuperUser()? null : checker.getUser(); + status = snapshotManager.getSnapshottableDirListing(user); } finally { readUnlock(); } @@ -5919,6 +5923,7 @@ public class FSNamesystem implements Nam */ public void deleteSnapshot(String snapshotRoot, String snapshotName) throws SafeModeException, IOException { + final FSPermissionChecker pc = getPermissionChecker(); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -5926,7 +5931,7 @@ public class FSNamesystem implements Nam throw new SafeModeException( "Cannot delete snapshot for " + snapshotRoot, safeMode); } - checkOwner(snapshotRoot); + checkOwner(pc, snapshotRoot); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); dir.writeLock(); Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1450477&r1=1450476&r2=1450477&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Tue Feb 26 22:04:35 2013 @@ -123,18 +123,41 @@ public class INodeDirectory extends INod * * @param child the child inode to be removed * @param latest See {@link INode#recordModification(Snapshot)}. - * @return the removed child inode. */ - public INode removeChild(INode child, Snapshot latest) { - assertChildrenNonNull(); - + public boolean removeChild(INode child, Snapshot latest) { if (isInLatestSnapshot(latest)) { return replaceSelf4INodeDirectoryWithSnapshot() .removeChild(child, latest); } + return removeChild(child); + } + + /** + * Remove the specified child from this directory. + * The basic remove method which actually calls children.remove(..). + * + * @param child the child inode to be removed + * + * @return true if the child is removed; false if the child is not found. + */ + protected final boolean removeChild(final INode child) { + assertChildrenNonNull(); final int i = searchChildren(child.getLocalNameBytes()); - return i >= 0? children.remove(i): null; + if (i < 0) { + return false; + } + + final INode removed = children.remove(i); + Preconditions.checkState(removed == child); + return true; + } + + /** + * Remove the specified child and all its snapshot copies from this directory. + */ + public boolean removeChildAndAllSnapshotCopies(INode child) { + return removeChild(child); } /** @@ -197,13 +220,18 @@ public class INodeDirectory extends INod oldChild.clearReferences(); } + private void replaceChildFile(final INodeFile oldChild, final INodeFile newChild) { + replaceChild(oldChild, newChild); + newChild.updateBlockCollection(); + } + /** Replace a child {@link INodeFile} with an {@link INodeFileWithSnapshot}. */ INodeFileWithSnapshot replaceChild4INodeFileWithSnapshot( final INodeFile child) { Preconditions.checkArgument(!(child instanceof INodeFileWithSnapshot), "Child file is already an INodeFileWithSnapshot, child=" + child); final INodeFileWithSnapshot newChild = new INodeFileWithSnapshot(child); - replaceChild(child, newChild); + replaceChildFile(child, newChild); return newChild; } @@ -214,7 +242,7 @@ public class INodeDirectory extends INod "Child file is already an INodeFileUnderConstructionWithSnapshot, child=" + child); final INodeFileUnderConstructionWithSnapshot newChild = new INodeFileUnderConstructionWithSnapshot(child, null); - replaceChild(child, newChild); + replaceChildFile(child, newChild); return newChild; } @@ -840,7 +868,11 @@ public class INodeDirectory extends INod public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, final Snapshot snapshot) { super.dumpTreeRecursively(out, prefix, snapshot); - out.println(", childrenSize=" + getChildrenList(snapshot).size()); + out.print(", childrenSize=" + getChildrenList(snapshot).size()); + if (this instanceof INodeDirectoryWithQuota) { +// out.print(((INodeDirectoryWithQuota)this).quotaString()); + } + out.println(); if (prefix.length() >= 2) { prefix.setLength(prefix.length() - 2); Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java?rev=1450477&r1=1450476&r2=1450477&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java Tue Feb 26 22:04:35 2013 @@ -173,4 +173,14 @@ public class INodeDirectoryWithQuota ext throw new DSQuotaExceededException(dsQuota, diskspace + dsDelta); } } + + String namespaceString() { + return "namespace: " + (nsQuota < 0? "-": namespace + "/" + nsQuota); + } + String diskspaceString() { + return "diskspace: " + (dsQuota < 0? "-": diskspace + "/" + dsQuota); + } + String quotaString() { + return ", Quota[" + namespaceString() + ", " + diskspaceString() + "]"; + } } Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1450477&r1=1450476&r2=1450477&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Tue Feb 26 22:04:35 2013 @@ -196,11 +196,23 @@ public class INodeFile extends INode imp return this.blocks; } + void updateBlockCollection() { + if (blocks != null) { + for(BlockInfo b : blocks) { + b.setBlockCollection(this); + } + } + } + /** * append array of blocks to this.blocks */ - void appendBlocks(INodeFile [] inodes, int totalAddedBlocks) { + void concatBlocks(INodeFile[] inodes) { int size = this.blocks.length; + int totalAddedBlocks = 0; + for(INodeFile f : inodes) { + totalAddedBlocks += f.blocks.length; + } BlockInfo[] newlist = new BlockInfo[size + totalAddedBlocks]; System.arraycopy(this.blocks, 0, newlist, 0, size); @@ -209,11 +221,9 @@ public class INodeFile extends INode imp System.arraycopy(in.blocks, 0, newlist, size, in.blocks.length); size += in.blocks.length; } - - for(BlockInfo bi: newlist) { - bi.setBlockCollection(this); - } + setBlocks(newlist); + updateBlockCollection(); } /** Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java?rev=1450477&r1=1450476&r2=1450477&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java Tue Feb 26 22:04:35 2013 @@ -29,9 +29,9 @@ import org.apache.hadoop.hdfs.protocol.S import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.INode; +import org.apache.hadoop.hdfs.server.namenode.INode.Content.CountsMap.Key; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; -import org.apache.hadoop.hdfs.server.namenode.INode.Content.CountsMap.Key; import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff; import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.Container; import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.UndoInfo; @@ -60,6 +60,16 @@ public class INodeDirectoryWithSnapshot return getCreatedList().set(c, newChild); } + private final boolean removeCreatedChild(final int c, final INode child) { + final List created = getCreatedList(); + if (created.get(c) == child) { + final INode removed = created.remove(c); + Preconditions.checkState(removed == child); + return true; + } + return false; + } + /** clear the created list */ private int destroyCreatedList( final INodeDirectoryWithSnapshot currentINode, @@ -499,28 +509,49 @@ public class INodeDirectoryWithSnapshot } @Override - public INode removeChild(INode child, Snapshot latest) { + public boolean removeChild(INode child, Snapshot latest) { ChildrenDiff diff = null; UndoInfo undoInfo = null; if (latest != null) { diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff; undoInfo = diff.delete(child); } - final INode removed = super.removeChild(child, null); + final boolean removed = removeChild(child); if (undoInfo != null) { - if (removed == null) { + if (!removed) { //remove failed, undo diff.undoDelete(child, undoInfo); } } return removed; } + + @Override + public boolean removeChildAndAllSnapshotCopies(INode child) { + if (!removeChild(child)) { + return false; + } + + // remove same child from the created list, if there is any. + final byte[] name = child.getLocalNameBytes(); + final List diffList = diffs.asList(); + for(int i = diffList.size() - 1; i >= 0; i--) { + final ChildrenDiff diff = diffList.get(i).diff; + final int c = diff.searchCreatedIndex(name); + if (c >= 0) { + if (diff.removeCreatedChild(c, child)) { + return true; + } + } + } + return true; + } @Override public void replaceChild(final INode oldChild, final INode newChild) { super.replaceChild(oldChild, newChild); - // replace the created child, if there is any. + // replace same child in the created list, if there is any. final byte[] name = oldChild.getLocalNameBytes(); final List diffList = diffs.asList(); for(int i = diffList.size() - 1; i >= 0; i--) { Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java?rev=1450477&r1=1450476&r2=1450477&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java Tue Feb 26 22:04:35 2013 @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; -import java.util.Iterator; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.namenode.INodeFile; Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java?rev=1450477&r1=1450476&r2=1450477&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java Tue Feb 26 22:04:35 2013 @@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.tools.DFSA import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.UserGroupInformation; +import org.junit.Assert; import org.junit.Test; /** A class for testing quota-related commands */ @@ -171,15 +172,13 @@ public class TestQuota { fout = dfs.create(childFile1, replication); // 10.s: but writing fileLen bytes should result in an quota exception - hasException = false; try { fout.write(new byte[fileLen]); fout.close(); + Assert.fail(); } catch (QuotaExceededException e) { - hasException = true; IOUtils.closeStream(fout); } - assertTrue(hasException); //delete the file dfs.delete(childFile1, false); Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1450477&r1=1450476&r2=1450477&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Tue Feb 26 22:04:35 2013 @@ -205,27 +205,14 @@ public class TestINodeFile { } @Test - public void testAppendBlocks() { + public void testConcatBlocks() { INodeFile origFile = createINodeFiles(1, "origfile")[0]; assertEquals("Number of blocks didn't match", origFile.numBlocks(), 1L); INodeFile[] appendFiles = createINodeFiles(4, "appendfile"); - origFile.appendBlocks(appendFiles, getTotalBlocks(appendFiles)); + origFile.concatBlocks(appendFiles); assertEquals("Number of blocks didn't match", origFile.numBlocks(), 5L); } - - /** - * Gives the count of blocks for a given number of files - * @param files Array of INode files - * @return total count of blocks - */ - private int getTotalBlocks(INodeFile[] files) { - int nBlocks=0; - for(int i=0; i < files.length; i++) { - nBlocks += files[i].numBlocks(); - } - return nBlocks; - } /** * Creates the required number of files with one block each Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java?rev=1450477&r1=1450476&r2=1450477&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java Tue Feb 26 22:04:35 2013 @@ -17,13 +17,14 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; +import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; +import java.util.Arrays; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -31,14 +32,13 @@ import org.apache.hadoop.hdfs.DFSConfigK import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INodeFile; -import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -46,6 +46,8 @@ import org.junit.Test; * Test cases for snapshot-related information in blocksMap. */ public class TestSnapshotBlocksMap { + // TODO: fix concat for snapshot + private static final boolean runConcatTest = false; private static final long seed = 0; private static final short REPLICATION = 3; @@ -57,8 +59,10 @@ public class TestSnapshotBlocksMap { protected Configuration conf; protected MiniDFSCluster cluster; protected FSNamesystem fsn; + FSDirectory fsdir; + BlockManager blockmanager; protected DistributedFileSystem hdfs; - + @Before public void setUp() throws Exception { conf = new Configuration(); @@ -68,6 +72,8 @@ public class TestSnapshotBlocksMap { cluster.waitActive(); fsn = cluster.getNamesystem(); + fsdir = fsn.getFSDirectory(); + blockmanager = fsn.getBlockManager(); hdfs = cluster.getFileSystem(); } @@ -77,7 +83,39 @@ public class TestSnapshotBlocksMap { cluster.shutdown(); } } - + + void assertAllNull(INodeFile inode, Path path, String[] snapshots) throws Exception { + Assert.assertNull(inode.getBlocks()); + assertINodeNull(path.toString()); + assertINodeNullInSnapshots(path, snapshots); + } + + void assertINodeNull(String path) throws Exception { + Assert.assertNull(fsdir.getINode(path)); + } + + void assertINodeNullInSnapshots(Path path, String... snapshots) throws Exception { + for(String s : snapshots) { + assertINodeNull(SnapshotTestHelper.getSnapshotPath( + path.getParent(), s, path.getName()).toString()); + } + } + + INodeFile assertBlockCollection(String path, int numBlocks) throws Exception { + final INodeFile file = INodeFile.valueOf(fsdir.getINode(path), path); + assertEquals(numBlocks, file.getBlocks().length); + for(BlockInfo b : file.getBlocks()) { + assertBlockCollection(file, b); + } + return file; + } + + void assertBlockCollection(final INodeFile file, final BlockInfo b) { + Assert.assertSame(b, blockmanager.getStoredBlock(b)); + Assert.assertSame(file, blockmanager.getBlockCollection(b)); + Assert.assertSame(file, b.getBlockCollection()); + } + /** * Test deleting a file with snapshots. Need to check the blocksMap to make * sure the corresponding record is updated correctly. @@ -87,87 +125,107 @@ public class TestSnapshotBlocksMap { Path file0 = new Path(sub1, "file0"); Path file1 = new Path(sub1, "file1"); - Path subsub1 = new Path(sub1, "sub1"); - Path subfile0 = new Path(subsub1, "file0"); + Path sub2 = new Path(sub1, "sub2"); + Path file2 = new Path(sub2, "file2"); + + Path file3 = new Path(sub1, "file3"); + Path file4 = new Path(sub1, "file4"); + Path file5 = new Path(sub1, "file5"); // Create file under sub1 - DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed); - DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed); - DFSTestUtil.createFile(hdfs, subfile0, BLOCKSIZE, REPLICATION, seed); - - BlockManager bm = fsn.getBlockManager(); - FSDirectory dir = fsn.getFSDirectory(); - - INodeFile inodeForDeletedFile = INodeFile.valueOf( - dir.getINode(subfile0.toString()), subfile0.toString()); - BlockInfo[] blocksForDeletedFile = inodeForDeletedFile.getBlocks(); - assertEquals(blocksForDeletedFile.length, 1); - BlockCollection bcForDeletedFile = bm - .getBlockCollection(blocksForDeletedFile[0]); - assertNotNull(bcForDeletedFile); + DFSTestUtil.createFile(hdfs, file0, 4*BLOCKSIZE, REPLICATION, seed); + DFSTestUtil.createFile(hdfs, file1, 2*BLOCKSIZE, REPLICATION, seed); + DFSTestUtil.createFile(hdfs, file2, 3*BLOCKSIZE, REPLICATION, seed); + // Normal deletion - hdfs.delete(subsub1, true); - bcForDeletedFile = bm.getBlockCollection(blocksForDeletedFile[0]); - // The INode should have been removed from the blocksMap - assertNull(bcForDeletedFile); + { + final INodeFile f2 = assertBlockCollection(file2.toString(), 3); + BlockInfo[] blocks = f2.getBlocks(); + hdfs.delete(sub2, true); + // The INode should have been removed from the blocksMap + for(BlockInfo b : blocks) { + assertNull(blockmanager.getBlockCollection(b)); + } + } // Create snapshots for sub1 - for (int i = 0; i < 2; i++) { - SnapshotTestHelper.createSnapshot(hdfs, sub1, "s" + i); + final String[] snapshots = {"s0", "s1", "s2"}; + DFSTestUtil.createFile(hdfs, file3, 5*BLOCKSIZE, REPLICATION, seed); + SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[0]); + DFSTestUtil.createFile(hdfs, file4, 1*BLOCKSIZE, REPLICATION, seed); + SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[1]); + DFSTestUtil.createFile(hdfs, file5, 7*BLOCKSIZE, REPLICATION, seed); + SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[2]); + + // set replication so that the inode should be replaced for snapshots + { + INodeFile f1 = assertBlockCollection(file1.toString(), 2); + Assert.assertSame(INodeFile.class, f1.getClass()); + hdfs.setReplication(file1, (short)2); + f1 = assertBlockCollection(file1.toString(), 2); + Assert.assertSame(INodeFileWithSnapshot.class, f1.getClass()); } // Check the block information for file0 - // Get the INode for file0 - INodeFile inode = INodeFile.valueOf(dir.getINode(file0.toString()), - file0.toString()); - BlockInfo[] blocks = inode.getBlocks(); - // Get the INode for the first block from blocksMap - BlockCollection bc = bm.getBlockCollection(blocks[0]); - // The two INode should be the same one - assertTrue(bc == inode); + final INodeFile f0 = assertBlockCollection(file0.toString(), 4); + BlockInfo[] blocks0 = f0.getBlocks(); // Also check the block information for snapshot of file0 Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0", file0.getName()); - INodeFile ssINode0 = INodeFile.valueOf(dir.getINode(snapshotFile0.toString()), - snapshotFile0.toString()); - BlockInfo[] ssBlocks = ssINode0.getBlocks(); - // The snapshot of file1 should contain 1 block - assertEquals(1, ssBlocks.length); + assertBlockCollection(snapshotFile0.toString(), 4); // Delete file0 hdfs.delete(file0, true); - // Make sure the first block of file0 is still in blocksMap - BlockInfo blockInfoAfterDeletion = bm.getStoredBlock(blocks[0]); - assertNotNull(blockInfoAfterDeletion); - // Check the INode information - BlockCollection bcAfterDeletion = blockInfoAfterDeletion - .getBlockCollection(); + // Make sure the blocks of file0 is still in blocksMap + for(BlockInfo b : blocks0) { + assertNotNull(blockmanager.getBlockCollection(b)); + } + assertBlockCollection(snapshotFile0.toString(), 4); // Compare the INode in the blocksMap with INodes for snapshots - Path snapshot1File0 = SnapshotTestHelper.getSnapshotPath(sub1, "s1", - file0.getName()); - INodeFile ssINode1 = INodeFile.valueOf( - dir.getINode(snapshot1File0.toString()), snapshot1File0.toString()); - assertTrue(bcAfterDeletion == ssINode0 || bcAfterDeletion == ssINode1); - assertEquals(1, bcAfterDeletion.getBlocks().length); + String s1f0 = SnapshotTestHelper.getSnapshotPath(sub1, "s1", + file0.getName()).toString(); + assertBlockCollection(s1f0, 4); // Delete snapshot s1 hdfs.deleteSnapshot(sub1, "s1"); + // Make sure the first block of file0 is still in blocksMap - BlockInfo blockInfoAfterSnapshotDeletion = bm.getStoredBlock(blocks[0]); - assertNotNull(blockInfoAfterSnapshotDeletion); - BlockCollection bcAfterSnapshotDeletion = blockInfoAfterSnapshotDeletion - .getBlockCollection(); - assertTrue(bcAfterSnapshotDeletion == ssINode0); - assertEquals(1, bcAfterSnapshotDeletion.getBlocks().length); + for(BlockInfo b : blocks0) { + assertNotNull(blockmanager.getBlockCollection(b)); + } + assertBlockCollection(snapshotFile0.toString(), 4); + try { - ssINode1 = INodeFile.valueOf( - dir.getINode(snapshot1File0.toString()), snapshot1File0.toString()); + INodeFile.valueOf(fsdir.getINode(s1f0), s1f0); fail("Expect FileNotFoundException when identifying the INode in a deleted Snapshot"); } catch (IOException e) { - GenericTestUtils.assertExceptionContains("File does not exist: " - + snapshot1File0.toString(), e); + assertExceptionContains("File does not exist: " + s1f0, e); + } + + // concat file1, file3 and file5 to file4 + if (runConcatTest) { + final INodeFile f1 = assertBlockCollection(file1.toString(), 2); + final BlockInfo[] f1blocks = f1.getBlocks(); + final INodeFile f3 = assertBlockCollection(file3.toString(), 5); + final BlockInfo[] f3blocks = f3.getBlocks(); + final INodeFile f5 = assertBlockCollection(file5.toString(), 7); + final BlockInfo[] f5blocks = f5.getBlocks(); + assertBlockCollection(file4.toString(), 1); + + hdfs.concat(file4, new Path[]{file1, file3, file5}); + + final INodeFile f4 = assertBlockCollection(file4.toString(), 15); + final BlockInfo[] blocks4 = f4.getBlocks(); + for(BlockInfo[] blocks : Arrays.asList(f1blocks, f3blocks, blocks4, f5blocks)) { + for(BlockInfo b : blocks) { + assertBlockCollection(f4, b); + } + } + assertAllNull(f1, file1, snapshots); + assertAllNull(f3, file3, snapshots); + assertAllNull(f5, file5, snapshots); } }