Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id EE9E7D7AD for ; Sun, 4 Nov 2012 22:00:36 +0000 (UTC) Received: (qmail 54465 invoked by uid 500); 4 Nov 2012 22:00:36 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 54379 invoked by uid 500); 4 Nov 2012 22:00:36 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 54367 invoked by uid 99); 4 Nov 2012 22:00:36 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Sun, 04 Nov 2012 22:00:36 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Sun, 04 Nov 2012 22:00:34 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 815C323888E3; Sun, 4 Nov 2012 22:00:14 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1405648 - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/ Date: Sun, 04 Nov 2012 22:00:14 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20121104220014.815C323888E3@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: szetszwo Date: Sun Nov 4 22:00:13 2012 New Revision: 1405648 URL: http://svn.apache.org/viewvc?rev=1405648&view=rev Log: HDFS-4146. Use getter and setter in INodeFileWithLink to access blocks and initialize root directory as snapshottable. Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt?rev=1405648&r1=1405647&r2=1405648&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt Sun Nov 4 22:00:13 2012 @@ -47,3 +47,6 @@ Branch-2802 Snapshot (Unreleased) HDFS-4141. Support directory diff - the difference between the current state and a previous snapshot of an INodeDirectory. (szetszwo) + + HDFS-4146. Use getter and setter in INodeFileWithLink to access blocks and + initialize root directory as snapshottable. (szetszwo) Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1405648&r1=1405647&r2=1405648&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Sun Nov 4 22:00:13 2012 @@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.server.blo import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath; +import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.util.ByteArray; import com.google.common.base.Preconditions; @@ -120,9 +121,10 @@ public class FSDirectory implements Clos FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) { this.dirLock = new ReentrantReadWriteLock(true); // fair this.cond = dirLock.writeLock().newCondition(); - rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME, - ns.createFsOwnerPermissions(new FsPermission((short)0755)), - Long.MAX_VALUE, UNKNOWN_DISK_SPACE); + + this.namesystem = ns; + reset(); + this.fsImage = fsImage; int configuredLimit = conf.getInt( DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT); @@ -143,7 +145,6 @@ public class FSDirectory implements Clos NameNode.LOG.info("Caching file names occuring more than " + threshold + " times"); nameCache = new NameCache(threshold); - namesystem = ns; } private FSNamesystem getFSNamesystem() { @@ -2030,9 +2031,11 @@ public class FSDirectory implements Clos * Reset the entire namespace tree. */ void reset() { - rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME, + final INodeDirectoryWithQuota r = new INodeDirectoryWithQuota( + INodeDirectory.ROOT_NAME, getFSNamesystem().createFsOwnerPermissions(new FsPermission((short)0755)), - Integer.MAX_VALUE, -1); + Long.MAX_VALUE, UNKNOWN_DISK_SPACE); + rootDir = INodeDirectorySnapshottable.newInstance(r, 0); } /** Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1405648&r1=1405647&r2=1405648&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Sun Nov 4 22:00:13 2012 @@ -55,7 +55,7 @@ public class INodeFile extends INode imp private long header; - protected BlockInfo[] blocks; + private BlockInfo[] blocks; INodeFile(PermissionStatus permissions, BlockInfo[] blklist, short replication, long modificationTime, Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java?rev=1405648&r1=1405647&r2=1405648&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java Sun Nov 4 22:00:13 2012 @@ -107,35 +107,36 @@ public class INodeFileWithLink extends I } private void collectBlocksBeyondMaxAndClear(final long max, final List v) { - if (blocks != null) { + final BlockInfo[] oldBlocks = getBlocks(); + if (oldBlocks != null) { //find the minimum n such that the size of the first n blocks > max int n = 0; - for(long size = 0; n < blocks.length && max > size; n++) { - size += blocks[n].getNumBytes(); + for(long size = 0; n < oldBlocks.length && max > size; n++) { + size += oldBlocks[n].getNumBytes(); } - //starting from block[n], the data is beyond max. - if (n < blocks.length) { + //starting from block n, the data is beyond max. + if (n < oldBlocks.length) { //resize the array. final BlockInfo[] newBlocks; if (n == 0) { newBlocks = null; } else { newBlocks = new BlockInfo[n]; - System.arraycopy(blocks, 0, newBlocks, 0, n); + System.arraycopy(oldBlocks, 0, newBlocks, 0, n); } for(INodeFileWithLink i = next; i != this; i = i.getNext()) { - i.blocks = newBlocks; + i.setBlocks(newBlocks); } //collect the blocks beyond max. if (v != null) { - for(; n < blocks.length; n++) { - v.add(blocks[n]); + for(; n < oldBlocks.length; n++) { + v.add(oldBlocks[n]); } } } - blocks = null; + setBlocks(null); } } }