hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dhr...@apache.org
Subject svn commit: r610478 - in /lucene/hadoop/trunk: ./ conf/ src/java/org/apache/hadoop/dfs/ src/test/org/apache/hadoop/dfs/ src/webapps/dfs/
Date Wed, 09 Jan 2008 17:39:53 GMT
Author: dhruba
Date: Wed Jan  9 09:39:25 2008
New Revision: 610478

URL: http://svn.apache.org/viewvc?rev=610478&view=rev
Log:
HADOOP-2447. HDFS can be configured to limit the total number of
objects (inodes and blocks) in the file system. (dhruba)


Added:
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileLimit.java   (with props)
Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/conf/hadoop-default.xml
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java
    lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?rev=610478&r1=610477&r2=610478&view=diff
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Wed Jan  9 09:39:25 2008
@@ -64,6 +64,9 @@
     default.  Enable with dfs.permissions=true.
     (Tsz Wo (Nicholas) & taton via cutting)
 
+    HADOOP-2447. HDFS can be configured to limit the total number of 
+    objects (inodes and blocks) in the file system. (dhruba)
+
   IMPROVEMENTS
 
     HADOOP-2045.  Change committer list on website to a table, so that

Modified: lucene/hadoop/trunk/conf/hadoop-default.xml
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/conf/hadoop-default.xml?rev=610478&r1=610477&r2=610478&view=diff
==============================================================================
--- lucene/hadoop/trunk/conf/hadoop-default.xml (original)
+++ lucene/hadoop/trunk/conf/hadoop-default.xml Wed Jan  9 09:39:25 2008
@@ -435,6 +435,15 @@
 </property> 
 
 <property>
+  <name>dfs.max.objects</name>
+  <value>0</value>
+  <description>The maximum number of files, directories and blocks
+  dfs supports. A value of zero indicates no limit to the number
+  of objects that dfs supports.
+  </description>
+</property>
+
+<property>
   <name>fs.s3.block.size</name>
   <value>67108864</value>
   <description>Block size to use when writing files to S3.</description>

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java?rev=610478&r1=610477&r2=610478&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java Wed Jan  9 09:39:25
2008
@@ -45,6 +45,8 @@
   boolean ready = false;
   // Metrics record
   private MetricsRecord directoryMetrics = null;
+
+  volatile private long totalInodes = 1;   // number of inodes, for rootdir
     
   /** Access an existing dfs name directory. */
   public FSDirectory(FSNamesystem ns, Configuration conf) throws IOException {
@@ -141,6 +143,9 @@
       } catch (FileNotFoundException e) {
         newNode = null;
       }
+      if (newNode != null) {
+        totalInodes++;
+      }
     }
     if (newNode == null) {
       NameNode.stateChangeLog.info("DIR* FSDirectory.addFile: "
@@ -183,6 +188,9 @@
       } catch (FileNotFoundException e) {
         return null;
       }
+      if (newNode != null) {
+        totalInodes++;
+      }
       return newNode;
     }
   }
@@ -461,6 +469,7 @@
           ArrayList<Block> v = new ArrayList<Block>();
           int filesRemoved = targetNode.collectSubtreeBlocks(v);
           incrDeletedFileCount(filesRemoved);
+          totalInodes -= filesRemoved;
           for (Block b : v) {
             namesystem.blocksMap.removeINode(b);
           }
@@ -597,7 +606,7 @@
    * Create directory entries for every item
    */
   boolean mkdirs(String src, PermissionStatus permissions,
-      boolean inheritPermission, long now) {
+      boolean inheritPermission, long now) throws IOException {
     src = normalizePath(src);
 
     // Use this to collect all the dirs we need to construct
@@ -619,8 +628,15 @@
     for (int i = numElts - 1; i >= 0; i--) {
       String cur = v.get(i);
       try {
-        INode inserted = unprotectedMkdir(cur, permissions,
-            inheritPermission || i != 0, now);
+        INode inserted = null;
+        synchronized (rootDir) {
+          inserted = rootDir.addNode(cur, 
+                             new INodeDirectory(permissions, now),
+                             inheritPermission || i != 0);
+          if (inserted != null) {
+            totalInodes++;
+          }
+        }
         if (inserted != null) {
           NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
                                         +"created directory "+cur);
@@ -643,11 +659,15 @@
 
   /**
    */
-  INodeDirectory unprotectedMkdir(String src, PermissionStatus permissions,
+  INode unprotectedMkdir(String src, PermissionStatus permissions,
       boolean inheritPermission, long timestamp) throws FileNotFoundException {
     synchronized (rootDir) {
-      return rootDir.addNode(src, new INodeDirectory(permissions, timestamp),
-          inheritPermission);
+      INode newNode = rootDir.addNode(src, new INodeDirectory(permissions, 
+                                      timestamp), inheritPermission);
+      if (newNode != null) {
+        totalInodes++;
+      }
+      return newNode;
     }
   }
 
@@ -672,6 +692,12 @@
       else {
         return targetNode.computeContentsLength();
       }
+    }
+  }
+
+  long totalInodes() {
+    synchronized (rootDir) {
+      return totalInodes;
     }
   }
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?rev=610478&r1=610477&r2=610478&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Wed Jan  9 09:39:25
2008
@@ -213,6 +213,8 @@
   private static final SimpleDateFormat DATE_FORM =
     new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
 
+  private long maxFsObjects = 0;          // maximum number of fs objects
+
   /**
    * FSNamesystem constructor.
    */
@@ -347,6 +349,7 @@
                                                    "dfs.namenode.decommission.interval",
                                                    5 * 60 * 1000);    
     this.defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    this.maxFsObjects = conf.getLong("dfs.max.objects", 0);
   }
 
   /** Return the FSNamesystem object
@@ -1000,6 +1003,7 @@
       // Now we can add the name to the filesystem. This file has no
       // blocks associated with it.
       //
+      checkFsObjectLimit();
       INode newNode = dir.addFile(src, permissions,
           replication, blockSize, holder, clientMachine, clientNode);
       if (newNode == null) {
@@ -1043,6 +1047,9 @@
         throw new SafeModeException("Cannot add block to " + src, safeMode);
       }
 
+      // have we exceeded the configured limit of fs objects.
+      checkFsObjectLimit();
+
       INodeFileUnderConstruction pendingFile  = checkLease(src, clientName);
 
       //
@@ -1496,6 +1503,11 @@
     }
     checkAncestorAccess(src, FsAction.WRITE);
 
+    // validate that we have enough inodes. This is, at best, a 
+    // heuristic because the mkdirs() operation migth need to 
+    // create multiple inodes.
+    checkFsObjectLimit();
+
     if (!dir.mkdirs(src, permissions, false, now())) {
       throw new IOException("Invalid directory name: " + src);
     }
@@ -3701,6 +3713,13 @@
   }
 
   /**
+   * Get the total number of blocks in the system. 
+   */
+  long getBlockTotal() {
+    return blocksMap.size();
+  }
+
+  /**
    * Enter safe mode manually.
    * @throws IOException
    */
@@ -3879,5 +3898,24 @@
           ancestorAccess, parentAccess, access, subAccess);
     }
     return pc;
+  }
+
+  /*
+   * Check to see if we have exceeded the limit on the number
+   * of inodes.
+   */
+  void checkFsObjectLimit() throws IOException {
+    if (maxFsObjects != 0 &&
+        maxFsObjects <= dir.totalInodes() + getBlockTotal()) {
+      throw new IOException("Exceeded the configured number of objects " +
+                             maxFsObjects + " in the filesystem.");
+    }
+  }
+
+  /**
+   * Get the total number of objects in the system. 
+   */
+  long getMaxObjects() {
+    return maxFsObjects;
   }
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java?rev=610478&r1=610477&r2=610478&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java Wed Jan  9 09:39:25
2008
@@ -34,6 +34,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.dfs.FSConstants.UpgradeAction;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.net.NetUtils;
 
 public class JspHelper {
@@ -171,6 +172,28 @@
     if (!fsn.isInSafeMode())
       return "";
     return "Safe mode is ON. <em>" + fsn.getSafeModeTip() + "</em><br>";
+  }
+
+  public String getInodeLimitText() {
+    long inodes = fsn.dir.totalInodes();
+    long blocks = fsn.getBlockTotal();
+    long maxobjects = fsn.getMaxObjects();
+    long totalMemory = Runtime.getRuntime().totalMemory();   
+    long maxMemory = Runtime.getRuntime().maxMemory();   
+
+    long used = (totalMemory * 100)/maxMemory;
+ 
+    String str = inodes + " files and directories, " +
+                 blocks + " blocks = " +
+                 (inodes + blocks) + " total";
+    if (maxobjects != 0) {
+      long pct = ((inodes + blocks) * 100)/maxobjects;
+      str += " / " + maxobjects + " (" + pct + "%)";
+    }
+    str += ".  Heap Size is " + FsShell.byteDesc(totalMemory) + " / " + 
+           FsShell.byteDesc(maxMemory) + 
+           " (" + used + "%) <br>";
+    return str;
   }
 
   public String getUpgradeStatusText() {

Added: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileLimit.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileLimit.java?rev=610478&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileLimit.java (added)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileLimit.java Wed Jan  9 09:39:25
2008
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.dfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.net.*;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.dfs.FSConstants.DatanodeReportType;
+
+
+/**
+ * This class tests that a file system adheres to the limit of
+ * maximum number of files that is configured.
+ */
+public class TestFileLimit extends TestCase {
+  static final long seed = 0xDEADBEEFL;
+  static final int blockSize = 8192;
+  static final int numBlocks = 2;
+  static final int fileSize = numBlocks * blockSize;
+  boolean simulatedStorage = false;
+
+  // The test file is 2 times the blocksize plus one. This means that when the
+  // entire file is written, the first two blocks definitely get flushed to
+  // the datanodes.
+
+  private static String TEST_ROOT_DIR =
+    new Path(System.getProperty("test.build.data","/tmp"))
+    .toString().replace(' ', '+');
+  
+  //
+  // creates a zero file.
+  //
+  private void createFile(FileSystem fileSys, Path name)
+    throws IOException {
+    FSDataOutputStream stm = fileSys.create(name, true,
+                                            fileSys.getConf().getInt("io.file.buffer.size",
4096),
+                                            (short)1, (long)blockSize);
+    stm.close();
+  }
+
+  private void waitForLimit(FSNamesystem namesys, long num)
+  {
+    // wait for number of blocks to decrease
+    while (true) {
+      long total = namesys.getBlockTotal() + namesys.dir.totalInodes();
+      System.out.println("Comparing current nodes " + total +
+                         " to become " + num);
+      if (total == num) {
+        break;
+      }
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {
+      }
+    }
+  }
+
+  /**
+   * Test that file data becomes available before file is closed.
+   */
+  public void testFileLimit() throws IOException {
+    Configuration conf = new Configuration();
+    int maxObjects = 5;
+    conf.setLong("dfs.max.objects", maxObjects);
+    conf.setLong("dfs.blockreport.intervalMsec", 1000L);
+    conf.setInt("dfs.heartbeat.interval", 1);
+    int currentNodes = 0;
+    
+    if (simulatedStorage) {
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    }
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    FSNamesystem namesys = FSNamesystem.fsNamesystemObject;
+    NameNode namenode = cluster.getNameNode();
+    try {
+
+      //
+      // check that / exists
+      //
+      Path path = new Path("/");
+      assertTrue("/ should be a directory", 
+                 fs.getFileStatus(path).isDir() == true);
+      currentNodes = 1;          // root inode
+
+      // verify that we can create the specified number of files. We leave
+      // one for the "/". Each file takes an inode and a block.
+      //
+      for (int i = 0; i < maxObjects/2; i++) {
+        Path file = new Path("/filestatus" + i);
+        createFile(fs, file);
+        System.out.println("Created file " + file);
+        currentNodes += 2;      // two more objects for this creation.
+      }
+
+      // verify that creating another file fails
+      boolean hitException = false;
+      try {
+        Path file = new Path("/filestatus");
+        createFile(fs, file);
+        System.out.println("Created file " + file);
+      } catch (IOException e) {
+        hitException = true;
+      }
+      assertTrue("Was able to exceed file limit", hitException);
+
+      // delete one file
+      Path file0 = new Path("/filestatus0");
+      fs.delete(file0);
+      System.out.println("Deleted file " + file0);
+      currentNodes -= 2;
+
+      // wait for number of blocks to decrease
+      waitForLimit(namesys, currentNodes);
+
+      // now, we shud be able to create a new file
+      createFile(fs, file0);
+      System.out.println("Created file " + file0 + " again.");
+      currentNodes += 2;
+
+      // delete the file again
+      file0 = new Path("/filestatus0");
+      fs.delete(file0);
+      System.out.println("Deleted file " + file0 + " again.");
+      currentNodes -= 2;
+
+      // wait for number of blocks to decrease
+      waitForLimit(namesys, currentNodes);
+
+      // create two directories in place of the file that we deleted
+      Path dir = new Path("/dir0/dir1");
+      fs.mkdirs(dir);
+      System.out.println("Created directories " + dir);
+      currentNodes += 2;
+      waitForLimit(namesys, currentNodes);
+
+      // verify that creating another directory fails
+      hitException = false;
+      try {
+        fs.mkdirs(new Path("dir.fail"));
+        System.out.println("Created directory should not have succeeded.");
+      } catch (IOException e) {
+        hitException = true;
+      }
+      assertTrue("Was able to exceed dir limit", hitException);
+
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
+  public void testFileLimitSimulated() throws IOException {
+    simulatedStorage = true;
+    testFileLimit();
+    simulatedStorage = false;
+  }
+}

Propchange: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileLimit.java
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileLimit.java
------------------------------------------------------------------------------
    svn:keywords = Id Revision HeadURL

Modified: lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp?rev=610478&r1=610477&r2=610478&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp (original)
+++ lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp Wed Jan  9 09:39:25 2008
@@ -239,6 +239,7 @@
 <hr>
 <h3>Cluster Summary</h3>
 <b> <%= jspHelper.getSafeModeText()%> </b>
+<b> <%= jspHelper.getInodeLimitText()%> </b>
 
 <% 
     generateDFSHealthReport(out, request); 



Mime
View raw message