hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jiten...@apache.org
Subject svn commit: r1200009 - in /hadoop/common/branches/branch-0.20-security-205: ./ src/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/test/org/apache/hadoop/hdfs/
Date Wed, 09 Nov 2011 22:22:46 GMT
Author: jitendra
Date: Wed Nov  9 22:22:45 2011
New Revision: 1200009

URL: http://svn.apache.org/viewvc?rev=1200009&view=rev
Log:
Merged r1199990 from branch-0.20-security for HDFS-611.

Added:
    hadoop/common/branches/branch-0.20-security-205/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java
      - copied unchanged from r1199990, hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java
    hadoop/common/branches/branch-0.20-security-205/src/test/org/apache/hadoop/hdfs/TestDFSRemove.java
      - copied unchanged from r1199990, hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/hdfs/TestDFSRemove.java
Modified:
    hadoop/common/branches/branch-0.20-security-205/CHANGES.txt
    hadoop/common/branches/branch-0.20-security-205/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java

Modified: hadoop/common/branches/branch-0.20-security-205/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-205/CHANGES.txt?rev=1200009&r1=1200008&r2=1200009&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-205/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.20-security-205/CHANGES.txt Wed Nov  9 22:22:45 2011
@@ -90,6 +90,9 @@ Release 0.20.205.1 - unreleased
     HDFS-1257. Race condition on FSNamesystem#recentInvalidateSets introduced 
     by HADOOP-5124. (Eric Payne via jitendra)
 
+    HDFS-611. Heartbeats times from Datanodes increase when there are plenty of
+    blocks to delete. (Zheng Shao via jitendra)
+
 Release 0.20.205.0 - 2011.10.06
 
   NEW FEATURES

Modified: hadoop/common/branches/branch-0.20-security-205/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-205/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1200009&r1=1200008&r2=1200009&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-205/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
(original)
+++ hadoop/common/branches/branch-0.20-security-205/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
Wed Nov  9 22:22:45 2011
@@ -354,6 +354,7 @@ public class FSDataset implements FSCons
   }
 
   class FSVolume {
+    private File currentDir;
     private FSDir dataDir;
     private File tmpDir;
     private File blocksBeingWritten;     // clients write here
@@ -366,6 +367,7 @@ public class FSDataset implements FSCons
     FSVolume(File currentDir, Configuration conf) throws IOException {
       this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
       this.dataDir = new FSDir(currentDir);
+      this.currentDir = currentDir;
       boolean supportAppends = conf.getBoolean("dfs.support.append", false);
       File parent = currentDir.getParentFile();
 
@@ -413,8 +415,16 @@ public class FSDataset implements FSCons
       this.dfsUsage.start();
     }
 
+    File getCurrentDir() {
+      return currentDir;
+    }
+    
     void decDfsUsed(long value) {
-      dfsUsage.decDfsUsed(value);
+      // The caller to this method (BlockFileDeleteTask.run()) does
+      // not have locked FSDataset.this yet.
+      synchronized(FSDataset.this) {
+        dfsUsage.decDfsUsed(value);
+      }
     }
     
     long getDfsUsed() throws IOException {
@@ -913,7 +923,8 @@ public class FSDataset implements FSCons
   HashMap<Block,DatanodeBlockInfo> volumeMap = new HashMap<Block, DatanodeBlockInfo>();;
   static  Random random = new Random();
   private int validVolsRequired;
-  
+  FSDatasetAsyncDiskService asyncDiskService;
+
   /**
    * An FSDataset has a directory where it loads its data files.
    */
@@ -954,6 +965,11 @@ public class FSDataset implements FSCons
     }
     volumes = new FSVolumeSet(volArray);
     volumes.getVolumeMap(volumeMap);
+    File[] roots = new File[storage.getNumStorageDirs()];
+    for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
+      roots[idx] = storage.getStorageDir(idx).getCurrentDir();
+    }
+    asyncDiskService = new FSDatasetAsyncDiskService(roots);
     registerMBean(storage.getStorageID());
   }
 
@@ -1799,22 +1815,10 @@ public class FSDataset implements FSCons
         volumeMap.remove(invalidBlks[i]);
       }
       File metaFile = getMetaFile( f, invalidBlks[i] );
-      long blockSize = f.length()+metaFile.length();
-      if ( !f.delete() || ( !metaFile.delete() && metaFile.exists() ) ) {
-        DataNode.LOG.warn("Unexpected error trying to delete block "
-                          + invalidBlks[i] + " at file " + f);
-        error = true;
-        continue;
-      }
-      v.decDfsUsed(blockSize);
-      DataNode.LOG.info("Deleting block " + invalidBlks[i] + " file " + f);
-      if (f.exists()) {
-        //
-        // This is a temporary check especially for hadoop-1220. 
-        // This will go away in the future.
-        //
-        DataNode.LOG.info("File " + f + " was deleted but still exists!");
-      }
+      long dfsBytes = f.length() + metaFile.length();
+      
+      // Delete the block asynchronously to make sure we can do it fast enough
+      asyncDiskService.deleteAsync(v, f, metaFile, dfsBytes, invalidBlks[i].toString());
     }
     if (error) {
       throw new IOException("Error in deleting blocks.");
@@ -1918,6 +1922,10 @@ public class FSDataset implements FSCons
     if (mbeanName != null)
       MBeans.unregister(mbeanName);
     
+    if (asyncDiskService != null) {
+      asyncDiskService.shutdown();
+    }
+
     if(volumes != null) {
       for (FSVolume volume : volumes.volumes) {
         if(volume != null) {



Mime
View raw message