hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From s..@apache.org
Subject svn commit: r614301 - in /lucene/hadoop/trunk: CHANGES.txt src/java/org/apache/hadoop/dfs/DataNode.java src/java/org/apache/hadoop/dfs/FSDataset.java src/test/org/apache/hadoop/dfs/SimulatedFSDataset.java
Date Tue, 22 Jan 2008 20:02:27 GMT
Author: shv
Date: Tue Jan 22 12:02:26 2008
New Revision: 614301

URL: http://svn.apache.org/viewvc?rev=614301&view=rev
Log:
HADOOP-2549. Correct detection of a full disk for data-nodes. Contributed by Hairong Kuang.

Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/SimulatedFSDataset.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?rev=614301&r1=614300&r2=614301&view=diff
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Tue Jan 22 12:02:26 2008
@@ -528,6 +528,9 @@
     the datanodes. The periodicity of this computation is now configurable.
     (dhruba)
 
+    HADOOP-2549. Correct disk size computation so that data-nodes could switch 
+    to other local drives if current is full. (Hairong Kuang via shv)
+
 Release 0.15.3 - 2008-01-18
 
   BUG FIXES

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java?rev=614301&r1=614300&r2=614301&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java Tue Jan 22 12:02:26 2008
@@ -35,7 +35,6 @@
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.dfs.BlockCommand;
 import org.apache.hadoop.dfs.DatanodeProtocol;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.dfs.FSDatasetInterface.MetaDataInputStream;
 
 import java.io.*;
@@ -123,6 +122,16 @@
   private DataBlockScanner blockScanner;
   private Daemon blockScannerThread;
 
+  /**
+   * We need an estimate for block size to check if the disk partition has
+   * enough space. For now we set it to be the default block size set
+   * in the server side configuration, which is not ideal because the
+   * default block size should be a client-size configuration. 
+   * A better solution is to include in the header the estimated block size,
+   * i.e. either the actual block size or the default block size.
+   */
+  private long estimateBlockSize;
+  
   // The following three fields are to support balancing
   final static short MAX_BALANCING_THREADS = 5;
   private Semaphore balancingSem = new Semaphore(MAX_BALANCING_THREADS);
@@ -265,6 +274,7 @@
     
     this.defaultBytesPerChecksum = 
        Math.max(conf.getInt("io.bytes.per.checksum", 512), 1); 
+    this.estimateBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
     this.socketTimeout =  conf.getInt("dfs.socket.timeout",
                                       FSConstants.READ_TIMEOUT);
     String address = conf.get("dfs.datanode.bindAddress", "0.0.0.0:50010");
@@ -1080,7 +1090,7 @@
       //
       // Read in the header
       //
-      Block block = new Block(in.readLong(), 0);
+      Block block = new Block(in.readLong(), estimateBlockSize);
       LOG.info("Receiving block " + block + " from " + s.getInetAddress());
       int pipelineSize = in.readInt(); // num of datanodes in entire pipeline
       boolean isRecovery = in.readBoolean(); // is this part of recovery?
@@ -1330,7 +1340,7 @@
       balancingSem.acquireUninterruptibly();
 
       /* read header */
-      Block block = new Block(in.readLong(), 0); // block id & len
+      Block block = new Block(in.readLong(), estimateBlockSize); // block id & len
       String sourceID = Text.readString(in);
 
       short opStatus = OP_STATUS_SUCCESS;

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java?rev=614301&r1=614300&r2=614301&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java Tue Jan 22 12:02:26
2008
@@ -272,8 +272,6 @@
     
     FSVolume(File currentDir, Configuration conf) throws IOException {
       this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
-      // add block size to the configured reserved space
-      this.reserved += conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
       this.usableDiskPct = conf.getFloat("dfs.datanode.du.pct",
                                          (float) USABLE_DISK_PCT_DEFAULT);
       File parent = currentDir.getParentFile();
@@ -309,8 +307,7 @@
       if (remaining>available) {
         remaining = available;
       }
-      remaining = (long)(remaining * usableDiskPct); 
-      return (remaining > 0) ? remaining : 0;
+      return (remaining > 0) ? (long)(remaining * usableDiskPct) : 0;
     }
       
     String getMount() throws IOException {
@@ -387,7 +384,7 @@
       while (true) {
         FSVolume volume = volumes[curVolume];
         curVolume = (curVolume + 1) % volumes.length;
-        if (volume.getAvailable() >= blockSize) { return volume; }
+        if (volume.getAvailable() > blockSize) { return volume; }
         if (curVolume == startVolume) {
           throw new DiskOutOfSpaceException("Insufficient space for an additional block");
         }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/SimulatedFSDataset.java?rev=614301&r1=614300&r2=614301&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/SimulatedFSDataset.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/SimulatedFSDataset.java Tue Jan 22
12:02:26 2008
@@ -141,7 +141,7 @@
           throw new IOException("Creating block, no free space available");
         }
       } else {
-        storage.free(extraLen);
+        storage.free(-extraLen);
       }
       theBlock.len = finalSize;  
 



Mime
View raw message