hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hair...@apache.org
Subject svn commit: r898467 - in /hadoop/hdfs/trunk: CHANGES.txt src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
Date Tue, 12 Jan 2010 19:04:25 GMT
Author: hairong
Date: Tue Jan 12 19:04:24 2010
New Revision: 898467

URL: http://svn.apache.org/viewvc?rev=898467&view=rev
Log:
HDFS-145. Cleanup inconsistent block length handling code in FSNameSystem#addStoredBlock.
Contributed by Hairong Kuang.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=898467&r1=898466&r2=898467&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Tue Jan 12 19:04:24 2010
@@ -418,6 +418,9 @@
 
     HDFS-813. Enable the append test in TestReadWhileWriting.  (szetszwo)
 
+    HDFS-145. Cleanup inconsistent block length handling code in
+    FSNameSystem#addStoredBlock. (hairong)
+
   BUG FIXES
 
     HDFS-76. Better error message to users when commands fail because of 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=898467&r1=898466&r2=898467&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java Tue
Jan 12 19:04:24 2010
@@ -265,6 +265,21 @@
       "commitBlock length is less than the stored one "
       + commitBlock.getNumBytes() + " vs. " + lastBlock.getNumBytes();
     ((BlockInfoUnderConstruction)lastBlock).commitBlock(commitBlock);
+    
+    // Adjust disk space consumption if required
+    long diff = fileINode.getPreferredBlockSize() - commitBlock.getNumBytes();    
+    if (diff > 0) {
+      try {
+        String path = /* For finding parents */
+        namesystem.leaseManager.findPath(fileINode);
+        namesystem.dir.updateSpaceConsumed(path, 0, -diff
+            * fileINode.getReplication());
+      } catch (IOException e) {
+        FSNamesystem.LOG
+            .warn("Unexpected exception while updating disk space : "
+                + e.getMessage());
+      }
+    }
   }
 
   /**
@@ -1023,73 +1038,6 @@
     // add block to the data-node
     boolean added = node.addBlock(storedBlock);
 
-    if (block != storedBlock) {
-      long cursize = storedBlock.getNumBytes();
-      long newsize = block.getNumBytes();
-      if (newsize >= 0) {
-        if (cursize == 0) {
-          storedBlock.setNumBytes(newsize);
-        } else if (cursize != newsize) {
-          FSNamesystem.LOG.warn("Inconsistent size for block " + block +
-                   " reported from " + node.getName() +
-                   " current size is " + cursize +
-                   " reported size is " + newsize);
-          try {
-            if (cursize > newsize) {
-              // new replica is smaller in size than existing block.
-              // Mark the new replica as corrupt.
-              FSNamesystem.LOG.warn("Mark new replica "
-                  + block + " from " + node.getName() + " as corrupt "
-                  + "because length is shorter than existing ones");
-              markBlockAsCorrupt(storedBlock, node);
-            } else {
-              // new replica is larger in size than existing block.
-              // Mark pre-existing replicas as corrupt.
-              int numNodes = storedBlock.numNodes();
-              int count = 0;
-              DatanodeDescriptor nodes[] = new DatanodeDescriptor[numNodes];
-              Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(storedBlock);
-              while (it.hasNext()) {
-                DatanodeDescriptor dd = it.next();
-                if (!dd.equals(node)) {
-                  nodes[count++] = dd;
-                }
-              }
-              for (int j = 0; j < count; j++) {
-                FSNamesystem.LOG.warn("Mark existing replica "
-                        + block + " from " + node.getName() + " as corrupt "
-                        + "because its length is shorter than the new one");
-                markBlockAsCorrupt(storedBlock, nodes[j]);
-              }
-              //
-              // change the size of block in blocksMap
-              //
-              storedBlock.setNumBytes(newsize);
-            }
-          } catch (IOException e) {
-            FSNamesystem.LOG.warn("Error in deleting bad block " + block + e);
-          }
-        }
-
-        // Updated space consumed if required.
-        long diff = fileINode.getPreferredBlockSize() - storedBlock.getNumBytes();
-        
-        if (diff > 0 && fileINode.isUnderConstruction() &&
-            cursize < storedBlock.getNumBytes()) {
-          try {
-            String path = /* For finding parents */
-            namesystem.leaseManager.findPath((INodeFileUnderConstruction)fileINode);
-            namesystem.dir.updateSpaceConsumed(path, 0, -diff
-                * fileINode.getReplication());
-          } catch (IOException e) {
-            FSNamesystem.LOG
-                .warn("Unexpected exception while updating disk space : "
-                    + e.getMessage());
-          }
-        }
-      }
-    }
-
     int curReplicaDelta = 0;
     if (added) {
       curReplicaDelta = 1;

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java?rev=898467&r1=898466&r2=898467&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java Tue Jan 12
19:04:24 2010
@@ -52,8 +52,7 @@
 
 
 /**
- * This class tests that a file need not be closed before its
- * data can be read by another client.
+ * This class tests various cases during file creation.
  */
 public class TestFileCreation extends junit.framework.TestCase {
   static final String DIR = "/" + TestFileCreation.class.getSimpleName() + "/";
@@ -71,10 +70,6 @@
   static final int fileSize = numBlocks * blockSize + 1;
   boolean simulatedStorage = false;
 
-  // The test file is 2 times the blocksize plus one. This means that when the
-  // entire file is written, the first two blocks definitely get flushed to
-  // the datanodes.
-
   // creates a file but does not close it
   public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
     throws IOException {
@@ -100,49 +95,6 @@
     stm.write(buffer, 0, size);
   }
 
-  //
-  // verify that the data written to the full blocks are sane
-  // 
-  private void checkFile(FileSystem fileSys, Path name, int repl)
-    throws IOException {
-    boolean done = false;
-
-    // wait till all full blocks are confirmed by the datanodes.
-    while (!done) {
-      try {
-        Thread.sleep(1000);
-      } catch (InterruptedException e) {}
-      done = true;
-      BlockLocation[] locations = fileSys.getFileBlockLocations(
-          fileSys.getFileStatus(name), 0, fileSize);
-      if (locations.length < numBlocks) {
-        done = false;
-        continue;
-      }
-      for (int idx = 0; idx < locations.length; idx++) {
-        if (locations[idx].getHosts().length < repl) {
-          done = false;
-          break;
-        }
-      }
-    }
-    FSDataInputStream stm = fileSys.open(name);
-    final byte[] expected;
-    if (simulatedStorage) {
-      expected = new byte[numBlocks * blockSize];
-      for (int i= 0; i < expected.length; i++) {  
-        expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
-      }
-    } else {
-      expected = AppendTestUtil.randomBytes(seed, numBlocks*blockSize);
-    }
-    // do a sanity check. Read the file
-    byte[] actual = new byte[numBlocks * blockSize];
-    stm.readFully(0, actual);
-    stm.close();
-    checkData(actual, 0, expected, "Read 1");
-  }
-
   static private void checkData(byte[] actual, int from, byte[] expected, String message)
{
     for (int idx = 0; idx < actual.length; idx++) {
       assertEquals(message+" byte "+(from+idx)+" differs. expected "+
@@ -201,7 +153,7 @@
   }
 
   /**
-   * Test that file data becomes available before file is closed.
+   * Test if file creation and disk space consumption works right
    */
   public void testFileCreation() throws IOException {
     Configuration conf = new HdfsConfiguration();
@@ -240,6 +192,10 @@
       // create a new file in home directory. Do not close it.
       //
       Path file1 = new Path("filestatus.dat");
+      Path parent = file1.getParent();
+      fs.mkdirs(parent);
+      DistributedFileSystem dfs = (DistributedFileSystem)fs;
+      dfs.setQuota(file1.getParent(), 100L, blockSize*5);
       FSDataOutputStream stm = createFile(fs, file1, 1);
 
       // verify that file exists in FS namespace
@@ -250,23 +206,18 @@
       // write to file
       writeFile(stm);
 
-      // Make sure a client can read it before it is closed.
-      checkFile(fs, file1, 1);
-
-      // verify that file size has changed
-      long len = fs.getFileStatus(file1).getLen();
-      assertTrue(file1 + " should be of size " + (numBlocks * blockSize) +
-                 " but found to be of size " + len, 
-                  len == numBlocks * blockSize);
-
       stm.close();
 
       // verify that file size has changed to the full size
-      len = fs.getFileStatus(file1).getLen();
+      long len = fs.getFileStatus(file1).getLen();
       assertTrue(file1 + " should be of size " + fileSize +
                  " but found to be of size " + len, 
                   len == fileSize);
       
+      // verify the disk space the file occupied
+      long diskSpace = dfs.getContentSummary(file1.getParent()).getLength();
+      assertEquals(file1 + " should take " + fileSize + " bytes disk space " +
+          "but found to take " + diskSpace + " bytes", fileSize, diskSpace);
       
       // Check storage usage 
       // can't check capacities for real storage since the OS file system may be changing
under us.



Mime
View raw message