hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dhr...@apache.org
Subject svn commit: r630992 - in /hadoop/core/trunk: CHANGES.txt src/java/org/apache/hadoop/dfs/FSConstants.java src/java/org/apache/hadoop/dfs/FSImage.java src/java/org/apache/hadoop/dfs/FSNamesystem.java src/test/org/apache/hadoop/dfs/TestFileCreation.java
Date Mon, 25 Feb 2008 21:07:40 GMT
Author: dhruba
Date: Mon Feb 25 13:07:34 2008
New Revision: 630992

URL: http://svn.apache.org/viewvc?rev=630992&view=rev
Log:
HADOOP-2345.  Fixed bad disk format introduced by HADOOP-2345.
Disk layout version changed from -12 to -13. (dhruba)


Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSConstants.java
    hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSImage.java
    hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
    hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=630992&r1=630991&r2=630992&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Mon Feb 25 13:07:34 2008
@@ -16,6 +16,9 @@
     HADOOP-1902. "dfs du" command without any arguments operates on the
     current working directory.  (Mahadev Konar via dhruba)
 
+    HADOOP-2345.  Fixed bad disk format introduced by HADOOP-2345.
+    Disk layout version changed from -12 to -13. (dhruba)
+
   NEW FEATURES
 
     HADOOP-1398.  Add HBase in-memory block cache.  (tomwhite)

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSConstants.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSConstants.java?rev=630992&r1=630991&r2=630992&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSConstants.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSConstants.java Mon Feb 25 13:07:34
2008
@@ -183,7 +183,7 @@
   // Version is reflected in the data storage file.
   // Versions are negative.
   // Decrement LAYOUT_VERSION to define a new version.
-  public static final int LAYOUT_VERSION = -12;
+  public static final int LAYOUT_VERSION = -13;
   // Current version: 
-  // Introduce OPEN, CLOSE and GENSTAMP transactions for supporting appends
+  // Fix bug introduced by OPEN, CLOSE and GENSTAMP transactions for supporting appends
 }

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSImage.java?rev=630992&r1=630991&r2=630992&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSImage.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSImage.java Mon Feb 25 13:07:34 2008
@@ -911,7 +911,7 @@
                                   FSNamesystem fs) throws IOException {
 
     FSDirectory fsDir = fs.dir;
-    if (version > -12) // pre lease image version
+    if (version > -13) // pre lease image version
       return;
     int size = in.readInt();
 
@@ -930,10 +930,6 @@
       INodeFile oldnode = (INodeFile) old;
       fsDir.replaceNode(path, oldnode, cons);
       fs.addLease(path, cons.getClientName()); 
-    }
-    if (fs.countLease() != size) {
-      throw new IOException("Created " + size + " leases but found " +
-                            fs.countLease());
     }
   }
 

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?rev=630992&r1=630991&r2=630992&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Mon Feb 25 13:07:34
2008
@@ -4202,9 +4202,12 @@
    */
   void saveFilesUnderConstruction(DataOutputStream out) throws IOException {
     synchronized (sortedLeases) {
-      out.writeInt(sortedLeases.size()); // write the size
-      for (Iterator<Lease> it = sortedLeases.iterator(); it.hasNext();) {
-        Lease lease = it.next();        
+      int count = 0;
+      for (Lease lease : sortedLeases) {
+        count += lease.getPaths().size();
+      }
+      out.writeInt(count); // write the size
+      for (Lease lease : sortedLeases) {
         Collection<StringBytesWritable> files = lease.getPaths();
         for (Iterator<StringBytesWritable> i = files.iterator(); i.hasNext();){
           String path = i.next().getString();

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java?rev=630992&r1=630991&r2=630992&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java Mon Feb 25 13:07:34
2008
@@ -358,7 +358,27 @@
                          + "Created file filestatus.dat with one "
                          + " replicas.");
 
+      // create another new file.
+      //
+      Path file2 = new Path("/filestatus2.dat");
+      FSDataOutputStream stm2 = createFile(fs, file2, 1);
+      System.out.println("testFileCreationNamenodeRestart: "
+                         + "Created file filestatus2.dat with one "
+                         + " replicas.");
+
       // restart cluster with the same namenode port as before.
+      // This ensures that leases are persisted in fsimage.
+      cluster.shutdown();
+      try {
+        Thread.sleep(5000);
+      } catch (InterruptedException e) {
+      }
+      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
+                                   null, null, null);
+      cluster.waitActive();
+
+      // restart cluster yet again. This triggers the code to read in
+      // persistent leases from fsimage.
       cluster.shutdown();
       try {
         Thread.sleep(5000);
@@ -375,13 +395,22 @@
       rand.nextBytes(buffer);
       stm.write(buffer);
       stm.close();
+      stm2.write(buffer);
+      stm2.close();
 
       // verify that new block is associated with this file
       DFSClient client = new DFSClient(addr, conf);
       LocatedBlocks locations = client.namenode.getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
       System.out.println("locations = " + locations.locatedBlockCount());
-      assertTrue("Error blocks were not cleaned up",
+      assertTrue("Error blocks were not cleaned up for file " + file1,
+                 locations.locatedBlockCount() == 1);
+
+      // verify filestatus2.dat
+      locations = client.namenode.getBlockLocations(
+                                  file2.toString(), 0, Long.MAX_VALUE);
+      System.out.println("locations = " + locations.locatedBlockCount());
+      assertTrue("Error blocks were not cleaned up for file " + file2,
                  locations.locatedBlockCount() == 1);
     } finally {
       fs.close();



Mime
View raw message