hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1150067 - in /hadoop/common/trunk/hdfs: CHANGES.txt src/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
Date Sat, 23 Jul 2011 08:45:56 GMT
Author: szetszwo
Date: Sat Jul 23 08:45:55 2011
New Revision: 1150067

URL: http://svn.apache.org/viewvc?rev=1150067&view=rev
Log:
HDFS-1739.  Add available volume size to the error message when datanode throws DiskOutOfSpaceException.
 Contributed by Uma Maheswara Rao G

Modified:
    hadoop/common/trunk/hdfs/CHANGES.txt
    hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
    hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java

Modified: hadoop/common/trunk/hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/CHANGES.txt?rev=1150067&r1=1150066&r2=1150067&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hdfs/CHANGES.txt Sat Jul 23 08:45:55 2011
@@ -593,6 +593,9 @@ Trunk (unreleased changes)
     HDFS-2112.  Move ReplicationMonitor to block management.  (Uma Maheswara
     Rao G via szetszwo)
 
+    HDFS-1739.  Add available volume size to the error message when datanode
+    throws DiskOutOfSpaceException.  (Uma Maheswara Rao G via szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

Modified: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java?rev=1150067&r1=1150066&r2=1150067&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
(original)
+++ hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
Sat Jul 23 08:45:55 2011
@@ -41,13 +41,24 @@ public class RoundRobinVolumesPolicy imp
     }
     
     int startVolume = curVolume;
+    long maxAvailable = 0;
     
     while (true) {
       FSVolume volume = volumes.get(curVolume);
       curVolume = (curVolume + 1) % volumes.size();
-      if (volume.getAvailable() > blockSize) { return volume; }
+      long availableVolumeSize = volume.getAvailable();
+      if (availableVolumeSize > blockSize) { return volume; }
+      
+      if (availableVolumeSize > maxAvailable) {
+        maxAvailable = availableVolumeSize;
+      }
+      
       if (curVolume == startVolume) {
-        throw new DiskOutOfSpaceException("Insufficient space for an additional block");
+        throw new DiskOutOfSpaceException(
+            "Insufficient space for an additional block. Volume with the most available space
has "
+                + maxAvailable
+                + " bytes free, configured block size is "
+                + blockSize);
       }
     }
   }

Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java?rev=1150067&r1=1150066&r2=1150067&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
(original)
+++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
Sat Jul 23 08:45:55 2011
@@ -24,6 +24,7 @@ import java.util.List;
 import junit.framework.Assert;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -63,5 +64,33 @@ public class TestRoundRobinVolumesPolicy
       // Passed.
     }
   }
+  
+  // ChooseVolume should throw DiskOutOfSpaceException with volume and block sizes in exception
message.
+  @Test
+  public void testRRPolicyExceptionMessage()
+      throws Exception {
+    final List<FSVolume> volumes = new ArrayList<FSVolume>();
+
+    // First volume, with 500 bytes of space.
+    volumes.add(Mockito.mock(FSVolume.class));
+    Mockito.when(volumes.get(0).getAvailable()).thenReturn(500L);
+
+    // Second volume, with 600 bytes of space.
+    volumes.add(Mockito.mock(FSVolume.class));
+    Mockito.when(volumes.get(1).getAvailable()).thenReturn(600L);
+
+    RoundRobinVolumesPolicy policy = new RoundRobinVolumesPolicy();
+    int blockSize = 700;
+    try {
+      policy.chooseVolume(volumes, blockSize);
+      Assert.fail("expected to throw DiskOutOfSpaceException");
+    } catch (DiskOutOfSpaceException e) {
+      Assert
+          .assertEquals(
+              "Not returnig the expected message",
+              "Insufficient space for an additional block. Volume with the most available
space has 600 bytes free, configured block size is " + blockSize, e
+                  .getMessage());
+    }
+  }
 
 }



Mime
View raw message