hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1405648 - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/
Date Sun, 04 Nov 2012 22:00:14 GMT
Author: szetszwo
Date: Sun Nov  4 22:00:13 2012
New Revision: 1405648

URL: http://svn.apache.org/viewvc?rev=1405648&view=rev
Log:
HDFS-4146. Use getter and setter in INodeFileWithLink to access blocks and initialize root
directory as snapshottable.

Modified:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt?rev=1405648&r1=1405647&r2=1405648&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
Sun Nov  4 22:00:13 2012
@@ -47,3 +47,6 @@ Branch-2802 Snapshot (Unreleased)
 
   HDFS-4141. Support directory diff - the difference between the current state
   and a previous snapshot of an INodeDirectory. (szetszwo)
+
+  HDFS-4146. Use getter and setter in INodeFileWithLink to access blocks and
+  initialize root directory as snapshottable. (szetszwo)

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1405648&r1=1405647&r2=1405648&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
Sun Nov  4 22:00:13 2012
@@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
 import org.apache.hadoop.hdfs.util.ByteArray;
 
 import com.google.common.base.Preconditions;
@@ -120,9 +121,10 @@ public class FSDirectory implements Clos
   FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) {
     this.dirLock = new ReentrantReadWriteLock(true); // fair
     this.cond = dirLock.writeLock().newCondition();
-    rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
-        ns.createFsOwnerPermissions(new FsPermission((short)0755)),
-        Long.MAX_VALUE, UNKNOWN_DISK_SPACE);
+
+    this.namesystem = ns;
+    reset();
+
     this.fsImage = fsImage;
     int configuredLimit = conf.getInt(
         DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
@@ -143,7 +145,6 @@ public class FSDirectory implements Clos
     NameNode.LOG.info("Caching file names occuring more than " + threshold
         + " times");
     nameCache = new NameCache<ByteArray>(threshold);
-    namesystem = ns;
   }
     
   private FSNamesystem getFSNamesystem() {
@@ -2030,9 +2031,11 @@ public class FSDirectory implements Clos
    * Reset the entire namespace tree.
    */
   void reset() {
-    rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
+    final INodeDirectoryWithQuota r = new INodeDirectoryWithQuota(
+        INodeDirectory.ROOT_NAME,
         getFSNamesystem().createFsOwnerPermissions(new FsPermission((short)0755)),
-        Integer.MAX_VALUE, -1);
+        Long.MAX_VALUE, UNKNOWN_DISK_SPACE);
+    rootDir = INodeDirectorySnapshottable.newInstance(r, 0);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1405648&r1=1405647&r2=1405648&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
Sun Nov  4 22:00:13 2012
@@ -55,7 +55,7 @@ public class INodeFile extends INode imp
 
   private long header;
 
-  protected BlockInfo[] blocks;
+  private BlockInfo[] blocks;
 
   INodeFile(PermissionStatus permissions, BlockInfo[] blklist,
                       short replication, long modificationTime,

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java?rev=1405648&r1=1405647&r2=1405648&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java
Sun Nov  4 22:00:13 2012
@@ -107,35 +107,36 @@ public class INodeFileWithLink extends I
   }
 
   private void collectBlocksBeyondMaxAndClear(final long max, final List<Block> v)
{
-    if (blocks != null) {
+    final BlockInfo[] oldBlocks = getBlocks();
+    if (oldBlocks != null) {
       //find the minimum n such that the size of the first n blocks > max
       int n = 0;
-      for(long size = 0; n < blocks.length && max > size; n++) {
-        size += blocks[n].getNumBytes();
+      for(long size = 0; n < oldBlocks.length && max > size; n++) {
+        size += oldBlocks[n].getNumBytes();
       }
 
-      //starting from block[n], the data is beyond max.
-      if (n < blocks.length) {
+      //starting from block n, the data is beyond max.
+      if (n < oldBlocks.length) {
         //resize the array.  
         final BlockInfo[] newBlocks;
         if (n == 0) {
           newBlocks = null;
         } else {
           newBlocks = new BlockInfo[n];
-          System.arraycopy(blocks, 0, newBlocks, 0, n);
+          System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
         }
         for(INodeFileWithLink i = next; i != this; i = i.getNext()) {
-          i.blocks = newBlocks;
+          i.setBlocks(newBlocks);
         }
 
         //collect the blocks beyond max.  
         if (v != null) {
-          for(; n < blocks.length; n++) {
-            v.add(blocks[n]);
+          for(; n < oldBlocks.length; n++) {
+            v.add(oldBlocks[n]);
           }
         }
       }
-      blocks = null;
+      setBlocks(null);
     }
   }
 }



Mime
View raw message