hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r931675 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/server/namenode/ src/java/org/apache/hadoop/hdfs/server/namenode/metrics/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/
Date Wed, 07 Apr 2010 20:39:15 GMT
Author: suresh
Date: Wed Apr  7 20:39:15 2010
New Revision: 931675

URL: http://svn.apache.org/viewvc?rev=931675&view=rev
Log:
HDFS-132. Fix namenode to not report files deleted metrics for deletions done while replaying
edits during startup. Contributed by Suresh Srinvias.


Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=931675&r1=931674&r2=931675&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Wed Apr  7 20:39:15 2010
@@ -138,6 +138,9 @@ Trunk (unreleased changes)
 
   BUG FIXES
 
+    HDFS-132. Fix namenode to not report files deleted metrics for deletions
+    done while replaying edits during startup. (suresh)
+
     HDFS-913. Rename fault injection test TestRename.java to TestFiRename.java
     to include it in tests run by ant target run-test-hdfs-fault-inject.
     (suresh)

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=931675&r1=931674&r2=931675&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Wed
Apr  7 20:39:15 2010
@@ -45,9 +45,6 @@ import org.apache.hadoop.hdfs.protocol.Q
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.metrics.MetricsContext;
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.MetricsUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 
 /*************************************************
@@ -64,7 +61,6 @@ class FSDirectory implements Closeable {
   INodeDirectoryWithQuota rootDir;
   FSImage fsImage;  
   private volatile boolean ready = false;
-  private MetricsRecord directoryMetrics = null;
   private static final long UNKNOWN_DISK_SPACE = -1;
   private final int lsLimit;  // max list limit
   
@@ -90,7 +86,6 @@ class FSDirectory implements Closeable {
         DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
     this.lsLimit = configuredLimit>0 ?
         configuredLimit : DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT;
-    initialize(conf);
   }
     
   private FSNamesystem getFSNamesystem() {
@@ -101,12 +96,6 @@ class FSDirectory implements Closeable {
     return getFSNamesystem().blockManager;
   }
 
-  private void initialize(Configuration conf) {
-    MetricsContext metricsContext = MetricsUtil.getContext("dfs");
-    directoryMetrics = MetricsUtil.createRecord(metricsContext, "FSDirectory");
-    directoryMetrics.setTag("sessionId", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY));
-  }
-
   void loadFSImage(Collection<URI> dataDirs,
                    Collection<URI> editsDirs,
                    StartupOption startOpt) 
@@ -135,8 +124,8 @@ class FSDirectory implements Closeable {
   }
 
   private void incrDeletedFileCount(int count) {
-    directoryMetrics.incrMetric("files_deleted", count);
-    directoryMetrics.update();
+    if (getFSNamesystem() != null)
+      NameNode.getNameNodeMetrics().numFilesDeleted.inc(count);
   }
     
   /**
@@ -418,7 +407,9 @@ class FSDirectory implements Closeable {
     }
     waitForReady();
     long now = FSNamesystem.now();
-    unprotectedRenameTo(src, dst, now, options);
+    if (unprotectedRenameTo(src, dst, now, options)) {
+      incrDeletedFileCount(1);
+    }
     fsImage.getEditLog().logRename(src, dst, now, options);
   }
 
@@ -538,9 +529,11 @@ class FSDirectory implements Closeable {
    * @param timestamp modification time
    * @param options Rename options
    * @throws IOException if the operation violates any quota limit
+   * @return true if rename overwrites {@code dst}
    */
-  void unprotectedRenameTo(String src, String dst, long timestamp,
-      Options.Rename... options) throws IOException, UnresolvedLinkException {
+  boolean unprotectedRenameTo(String src, String dst, long timestamp,
+      Options.Rename... options) throws IOException,
+      UnresolvedLinkException {
     boolean overwrite = false;
     if (null != options) {
       for (Rename option : options) {
@@ -569,7 +562,7 @@ class FSDirectory implements Closeable {
 
       // validate of the destination
       if (dst.equals(src)) {
-        return;
+        return false;
       }
       // dst cannot be a directory or a file under src
       if (dst.startsWith(src) && 
@@ -652,6 +645,7 @@ class FSDirectory implements Closeable {
         dstChild = addChildNoQuotaCheck(dstInodes, dstInodes.length - 1,
             removedSrc, UNKNOWN_DISK_SPACE, false);
 
+        int filesDeleted = 0;
         if (dstChild != null) {
           removedSrc = null;
           if (NameNode.stateChangeLog.isDebugEnabled()) {
@@ -667,11 +661,10 @@ class FSDirectory implements Closeable {
             INode rmdst = removedDst;
             removedDst = null;
             List<Block> collectedBlocks = new ArrayList<Block>();
-            int filecount = rmdst.collectSubtreeBlocksAndClear(collectedBlocks);
-            incrDeletedFileCount(filecount);
+            filesDeleted = rmdst.collectSubtreeBlocksAndClear(collectedBlocks);
             getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
           }
-          return;
+          return filesDeleted >0;
         }
       } finally {
         if (removedSrc != null) {
@@ -843,7 +836,7 @@ class FSDirectory implements Closeable {
   /**
    * Concat all the blocks from srcs to trg
    * and delete the srcs files
-   * @param trg target file to move the blocks to
+   * @param target target file to move the blocks to
    * @param srcs list of file to move the blocks from
    * Must be public because also called from EditLogs
    * NOTE: - it does not update quota (not needed for concat)
@@ -900,10 +893,11 @@ class FSDirectory implements Closeable {
     }
     waitForReady();
     long now = FSNamesystem.now();
-    INode removedNode = unprotectedDelete(src, collectedBlocks, now);
-    if (removedNode == null) {
+    int filesRemoved = unprotectedDelete(src, collectedBlocks, now);
+    if (filesRemoved <= 0) {
       return false;
     }
+    incrDeletedFileCount(filesRemoved);
     // Blocks will be deleted later by the caller of this method
     getFSNamesystem().removePathAndBlocks(src, null);
     fsImage.getEditLog().logDelete(src, now);
@@ -944,14 +938,14 @@ class FSDirectory implements Closeable {
    * <br>
    * @param src a string representation of a path to an inode
    * @param mtime the time the inode is removed
-   * @return deleted inode if deletion succeeds; else null
    */ 
-  INode unprotectedDelete(String src, long mtime) 
+  void unprotectedDelete(String src, long mtime) 
     throws UnresolvedLinkException {
     List<Block> collectedBlocks = new ArrayList<Block>();
-    INode removedNode = unprotectedDelete(src, collectedBlocks, mtime);
-    getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
-    return removedNode;
+    int filesRemoved = unprotectedDelete(src, collectedBlocks, mtime);
+    if (filesRemoved > 0) {
+      getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
+    }
   }
   
   /**
@@ -960,9 +954,9 @@ class FSDirectory implements Closeable {
    * @param src a string representation of a path to an inode
    * @param collectedBlocks blocks collected from the deleted path
    * @param mtime the time the inode is removed
-   * @return deleted inode if deletion succeeds; else null
+   * @return the number of inodes deleted; 0 if no inodes are deleted.
    */ 
-  INode unprotectedDelete(String src, List<Block> collectedBlocks, 
+  int unprotectedDelete(String src, List<Block> collectedBlocks, 
       long mtime) throws UnresolvedLinkException {
     src = normalizePath(src);
 
@@ -973,29 +967,28 @@ class FSDirectory implements Closeable {
       if (targetNode == null) { // non-existent src
         NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
             +"failed to remove "+src+" because it does not exist");
-        return null;
+        return 0;
       }
       if (inodes.length == 1) { // src is the root
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " +
             "failed to remove " + src +
             " because the root is not allowed to be deleted");
-        return null;
+        return 0;
       }
       int pos = inodes.length - 1;
       // Remove the node from the namespace
       targetNode = removeChild(inodes, pos);
       if (targetNode == null) {
-        return null;
+        return 0;
       }
       // set the parent's modification time
       inodes[pos-1].setModificationTime(mtime);
       int filesRemoved = targetNode.collectSubtreeBlocksAndClear(collectedBlocks);
-      incrDeletedFileCount(filesRemoved);
       if (NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
           +src+" is removed");
       }
-      return targetNode;
+      return filesRemoved;
     }
   }
 
@@ -1343,7 +1336,7 @@ class FSDirectory implements Closeable {
           return false;
         }
         // Directory creation also count towards FilesCreated
-        // to match count of files_deleted metric. 
+        // to match count of FilesDeleted metric.
         if (getFSNamesystem() != null)
           NameNode.getNameNodeMetrics().numFilesCreated.inc();
         fsImage.getEditLog().logMkDir(cur, inodes[i]);
@@ -1585,7 +1578,6 @@ class FSDirectory implements Closeable {
    * @param dir the root of the tree that represents the directory
    * @param counters counters for name space and disk space
    * @param nodesInPath INodes for the each of components in the path.
-   * @return the size of the tree
    */
   private static void updateCountForINodeWithQuota(INodeDirectory dir, 
                                                INode.DirCounts counts,

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java?rev=931675&r1=931674&r2=931675&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
(original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
Wed Apr  7 20:39:15 2010
@@ -49,6 +49,8 @@ public class NameNodeMetrics implements 
     
     private NameNodeActivityMBean namenodeActivityMBean;
     
+    public MetricsTimeVaryingInt numCreateFileOps = 
+                    new MetricsTimeVaryingInt("CreateFileOps", registry);
     public MetricsTimeVaryingInt numFilesCreated =
                           new MetricsTimeVaryingInt("FilesCreated", registry);
     public MetricsTimeVaryingInt numFilesAppended =
@@ -59,10 +61,11 @@ public class NameNodeMetrics implements 
                     new MetricsTimeVaryingInt("FilesRenamed", registry);
     public MetricsTimeVaryingInt numGetListingOps = 
                     new MetricsTimeVaryingInt("GetListingOps", registry);
-    public MetricsTimeVaryingInt numCreateFileOps = 
-                    new MetricsTimeVaryingInt("CreateFileOps", registry);
     public MetricsTimeVaryingInt numDeleteFileOps = 
                           new MetricsTimeVaryingInt("DeleteFileOps", registry);
+    public MetricsTimeVaryingInt numFilesDeleted = new MetricsTimeVaryingInt(
+        "FilesDeleted", registry, 
+        "Number of files and directories deleted by delete or rename operation");
     public MetricsTimeVaryingInt numFileInfoOps =
                           new MetricsTimeVaryingInt("FileInfoOps", registry);
     public MetricsTimeVaryingInt numAddBlockOps = 
@@ -72,12 +75,13 @@ public class NameNodeMetrics implements 
     public MetricsTimeVaryingInt numgetLinkTargetOps = 
                           new MetricsTimeVaryingInt("GetLinkTargetOps", registry);
 
-    public MetricsTimeVaryingRate transactions =
-                    new MetricsTimeVaryingRate("Transactions", registry, "Journal Transaction");
+    public MetricsTimeVaryingRate transactions = new MetricsTimeVaryingRate(
+      "Transactions", registry, "Journal Transaction");
     public MetricsTimeVaryingRate syncs =
                     new MetricsTimeVaryingRate("Syncs", registry, "Journal Sync");
-    public MetricsTimeVaryingInt transactionsBatchedInSync = 
-                    new MetricsTimeVaryingInt("JournalTransactionsBatchedInSync", registry,
"Journal Transactions Batched In Sync");
+    public MetricsTimeVaryingInt transactionsBatchedInSync = new MetricsTimeVaryingInt(
+      "JournalTransactionsBatchedInSync", registry,
+      "Journal Transactions Batched In Sync");
     public MetricsTimeVaryingRate blockReport =
                     new MetricsTimeVaryingRate("blockReport", registry, "Block Report");
     public MetricsIntValue safeModeTime =

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=931675&r1=931674&r2=931675&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
Wed Apr  7 20:39:15 2010
@@ -26,6 +26,7 @@ import junit.framework.TestCase;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -36,7 +37,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.mortbay.log.Log;
 
 /**
  * Test for metrics published by the Namenode
@@ -64,7 +64,6 @@ public class TestNameNodeMetrics extends
   private Random rand = new Random();
   private FSNamesystem namesystem;
   private NameNodeMetrics nnMetrics;
-  private NameNode nn;
 
   private static Path getTestPath(String fileName) {
     return new Path(TEST_ROOT_DIR_PATH, fileName);
@@ -77,8 +76,7 @@ public class TestNameNodeMetrics extends
     namesystem = cluster.getNamesystem();
     fs = (DistributedFileSystem) cluster.getFileSystem();
     metrics = namesystem.getFSNamesystemMetrics();
-    nn = cluster.getNameNode();
-    nnMetrics = nn.getNameNodeMetrics();
+    nnMetrics = NameNode.getNameNodeMetrics();
   }
   
   @Override
@@ -96,19 +94,14 @@ public class TestNameNodeMetrics extends
     // for some block related metrics to get updated)
     Thread.sleep(1000);
     metrics.doUpdates(null);
-  }
-
-  private void updateNNMetrics() throws Exception {
-    //Wait for nnmetrics update
-    Thread.sleep(1000);
     nnMetrics.doUpdates(null);
   }
-  
+
   private void readFile(FileSystem fileSys,Path name) throws IOException {
     //Just read file so that getNumBlockLocations are incremented
     DataInputStream stm = fileSys.open(name);
     byte [] buffer = new byte[4];
-    int bytesRead =  stm.read(buffer,0,4);
+    stm.read(buffer,0,4);
     stm.close();
   }
   
@@ -121,6 +114,11 @@ public class TestNameNodeMetrics extends
     int blockCapacity = namesystem.getBlockCapacity();
     updateMetrics();
     assertEquals(blockCapacity, metrics.blockCapacity.get());
+    
+    // File create operations is 1
+    // Number of files created is depth of <code>file</code> path
+    assertEquals(1, nnMetrics.numCreateFileOps.getPreviousIntervalValue());
+    assertEquals(file.depth(), nnMetrics.numFilesCreated.getPreviousIntervalValue());
 
     // Blocks are stored in a hashmap. Compute its capacity, which
     // doubles every time the number of entries reach the threshold.
@@ -143,6 +141,10 @@ public class TestNameNodeMetrics extends
     assertEquals(filesTotal, metrics.filesTotal.get());
     assertEquals(0, metrics.blocksTotal.get());
     assertEquals(0, metrics.pendingDeletionBlocks.get());
+    
+    // Delete file operations and number of files deleted must be 1
+    assertEquals(1, nnMetrics.numDeleteFileOps.getPreviousIntervalValue());
+    assertEquals(1, nnMetrics.numFilesDeleted.getPreviousIntervalValue());
   }
   
   /** Corrupt a block and ensure metrics reflects it */
@@ -197,6 +199,17 @@ public class TestNameNodeMetrics extends
     assertEquals(0, metrics.underReplicatedBlocks.get());
   }
   
+  public void testRenameMetrics() throws Exception {
+    Path src = getTestPath("src");
+    createFile(src, 100, (short)1);
+    Path target = getTestPath("target");
+    createFile(target, 100, (short)1);
+    fs.rename(src, target, Rename.OVERWRITE);
+    updateMetrics();
+    assertEquals(1, nnMetrics.numFilesRenamed.getPreviousIntervalValue());
+    assertEquals(1, nnMetrics.numFilesDeleted.getPreviousIntervalValue());
+  }
+  
   /**
    * Test numGetBlockLocations metric   
    * 
@@ -210,9 +223,6 @@ public class TestNameNodeMetrics extends
    * @throws IOException in case of an error
    */
   public void testGetBlockLocationMetric() throws Exception{
-    final String METHOD_NAME = "TestGetBlockLocationMetric";
-    Log.info("Running test "+METHOD_NAME);
-  
     Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "file1.dat");
 
     // When cluster starts first time there are no file  (read,create,open)
@@ -226,8 +236,7 @@ public class TestNameNodeMetrics extends
 
     //Perform create file operation
     createFile(file1_Path,100,(short)2);
-    // Update NameNode metrics
-    updateNNMetrics();
+    updateMetrics();
   
     //Create file does not change numGetBlockLocations metric
     //expect numGetBlockLocations = 0 for previous and current interval 
@@ -240,8 +249,7 @@ public class TestNameNodeMetrics extends
     // Open and read file operation increments numGetBlockLocations
     // Perform read file operation on earlier created file
     readFile(fs, file1_Path);
-    // Update NameNode metrics
-    updateNNMetrics();
+    updateMetrics();
     // Verify read file operation has incremented numGetBlockLocations by 1
     assertEquals("numGetBlockLocations for previous interval is incorrect",
     1,nnMetrics.numGetBlockLocations.getPreviousIntervalValue());
@@ -252,7 +260,7 @@ public class TestNameNodeMetrics extends
     // opening and reading file  twice will increment numGetBlockLocations by 2
     readFile(fs, file1_Path);
     readFile(fs, file1_Path);
-    updateNNMetrics();
+    updateMetrics();
     assertEquals("numGetBlockLocations for previous interval is incorrect",
     2,nnMetrics.numGetBlockLocations.getPreviousIntervalValue());
     // Verify numGetBlockLocations for current interval is 0



Mime
View raw message