hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r543207 - in /lucene/hadoop/trunk: ./ src/java/org/apache/hadoop/dfs/ src/test/org/apache/hadoop/dfs/
Date Thu, 31 May 2007 18:33:19 GMT
Author: cutting
Date: Thu May 31 11:33:18 2007
New Revision: 543207

URL: http://svn.apache.org/viewvc?view=rev&rev=543207
Log:
HADOOP-1242.  Improve handling of DFS upgrades.  Contributed by Konstantin.

Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataStorage.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSImage.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=543207&r1=543206&r2=543207
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Thu May 31 11:33:18 2007
@@ -505,6 +505,9 @@
      AlreadyBeingCreatedException when wrapped as a RemoteException.
      (Hairong Kuang via tomwhite)
 
+129. HADOOP-1242.  Improve handling of DFS upgrades.
+     (Konstantin Shvachko via cutting)
+
 
 Release 0.12.3 - 2007-04-06
 

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataStorage.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataStorage.java?view=diff&rev=543207&r1=543206&r2=543207
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataStorage.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataStorage.java Thu May 31 11:33:18
2007
@@ -11,7 +11,6 @@
 
 import org.apache.hadoop.dfs.FSConstants.StartupOption;
 import org.apache.hadoop.dfs.FSConstants.NodeType;
-import org.apache.hadoop.dfs.FSImage.NameNodeFile;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.fs.FileUtil.HardLink;
 
@@ -164,6 +163,21 @@
     File oldF = new File(sd.root, "storage");
     if (!oldF.exists())
       return false;
+    // check the layout version inside the storage file
+    // Lock and Read old storage file
+    RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
+    if (oldFile == null)
+      throw new IOException("Cannot read file: " + oldF);
+    FileLock oldLock = oldFile.getChannel().tryLock();
+    try {
+      oldFile.seek(0);
+      int odlVersion = oldFile.readInt();
+      if (odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION)
+        return false;
+    } finally {
+      oldLock.release();
+      oldFile.close();
+    }
     // check consistency of the old storage
     File oldDataDir = new File(sd.root, "data");
     if (!oldDataDir.exists()) 
@@ -206,13 +220,14 @@
     FileLock oldLock = oldFile.getChannel().tryLock();
     if (oldLock == null)
       throw new IOException("Cannot lock file: " + oldF);
+    String odlStorageID = "";
     try {
       oldFile.seek(0);
       int odlVersion = oldFile.readInt();
       if (odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION)
         throw new IncorrectVersionException(odlVersion, "file " + oldF,
                                             LAST_PRE_UPGRADE_LAYOUT_VERSION);
-      String odlStorageID = org.apache.hadoop.io.UTF8.readString(oldFile);
+      odlStorageID = org.apache.hadoop.io.UTF8.readString(oldFile);
   
       // check new storage
       File newDataDir = sd.getCurrentDir();
@@ -221,14 +236,8 @@
         throw new IOException("Version file already exists: " + versionF);
       if (newDataDir.exists()) // somebody created current dir manually
         deleteDir(newDataDir);
-      // Write new layout
+      // move "data" to "current"
       rename(oldDataDir, newDataDir);
-  
-      this.layoutVersion = FSConstants.LAYOUT_VERSION;
-      this.namespaceID = nsInfo.getNamespaceID();
-      this.cTime = 0;
-      this.storageID = odlStorageID;
-      sd.write();
       // close and unlock old file
     } finally {
       oldLock.release();
@@ -236,6 +245,13 @@
     }
     // move old storage file into current dir
     rename(oldF, new File(sd.getCurrentDir(), "storage"));
+
+    // Write new version file
+    this.layoutVersion = FSConstants.LAYOUT_VERSION;
+    this.namespaceID = nsInfo.getNamespaceID();
+    this.cTime = 0;
+    this.storageID = odlStorageID;
+    sd.write();
     LOG.info("Conversion of " + oldF + " is complete.");
   }
 
@@ -408,5 +424,23 @@
     
     for(int i = 0; i < blockNames.length; i++)
       linkBlocks(new File(from, blockNames[i]), new File(to, blockNames[i]));
+  }
+
+  protected void corruptPreUpgradeStorage(File rootDir) throws IOException {
+    File oldF = new File(rootDir, "storage");
+    if (oldF.exists())
+      return;
+    // recreate old storage file to let pre-upgrade versions fail
+    if (!oldF.createNewFile())
+      throw new IOException("Cannot create file " + oldF);
+    RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
+    if (oldFile == null)
+      throw new IOException("Cannot read file: " + oldF);
+    // write new version into old storage file
+    try {
+      writeCorruptedData(oldFile);
+    } finally {
+      oldFile.close();
+    }
   }
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSImage.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSImage.java?view=diff&rev=543207&r1=543206&r2=543207
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSImage.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSImage.java Thu May 31 11:33:18 2007
@@ -27,6 +27,7 @@
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.io.RandomAccessFile;
 import java.util.AbstractList;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -435,7 +436,21 @@
   boolean isConversionNeeded(StorageDirectory sd) throws IOException {
     File oldImageDir = new File(sd.root, "image");
     if (!oldImageDir.exists())
-      return false;
+      throw new InconsistentFSStateException(sd.root,
+          oldImageDir + " does not exist.");
+    // check the layout version inside the image file
+    File oldF = new File(oldImageDir, "fsimage");
+    RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
+    if (oldFile == null)
+      throw new IOException("Cannot read file: " + oldF);
+    try {
+      oldFile.seek(0);
+      int odlVersion = oldFile.readInt();
+      if (odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION)
+        return false;
+    } finally {
+      oldFile.close();
+    }
     // check consistency of the old storage
     if (!oldImageDir.isDirectory())
       throw new InconsistentFSStateException(sd.root,
@@ -492,8 +507,8 @@
       needReformat = true;
     } else {
       sd.write();
-      LOG.info("Conversion of " + oldImage + " is complete.");
     }
+    LOG.info("Conversion of " + oldImage + " is complete.");
     return needReformat;
   }
 
@@ -958,6 +973,27 @@
       node.setRemaining(remaining);
       node.setLastUpdate(lastUpdate);
       node.setXceiverCount(xceiverCount);
+    }
+  }
+
+  protected void corruptPreUpgradeStorage(File rootDir) throws IOException {
+    File oldImageDir = new File(rootDir, "image");
+    if (!oldImageDir.exists())
+      if (!oldImageDir.mkdir())
+        throw new IOException("Cannot create directory " + oldImageDir);
+    File oldImage = new File(oldImageDir, "fsimage");
+    if (!oldImage.exists())
+      // recreate old image file to let pre-upgrade versions fail
+      if (!oldImage.createNewFile())
+        throw new IOException("Cannot create file " + oldImage);
+    RandomAccessFile oldFile = new RandomAccessFile(oldImage, "rws");
+    if (oldFile == null)
+      throw new IOException("Cannot read file: " + oldImage);
+    // write new version into old image file
+    try {
+      writeCorruptedData(oldFile);
+    } finally {
+      oldFile.close();
     }
   }
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java?view=diff&rev=543207&r1=543206&r2=543207
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java Thu May 31 11:33:18 2007
@@ -157,6 +157,7 @@
      * @throws IOException
      */
     void write() throws IOException {
+      corruptPreUpgradeStorage(root);
       write(getVersionFile());
     }
 
@@ -520,5 +521,21 @@
     return "NS-" + Integer.toString(storage.getNamespaceID())
       + "-" + Integer.toString(storage.getLayoutVersion())
       + "-" + Long.toString(storage.getCTime());
+  }
+
+  // Pre-upgrade version compatibility
+  protected abstract void corruptPreUpgradeStorage(File rootDir) throws IOException;
+
+  protected void writeCorruptedData(RandomAccessFile file) throws IOException {
+    final String messageForPreUpgradeVersion =
+      "\nThis file is INTENTIONALLY CORRUPTED so that versions\n"
+      + "of Hadoop prior to 0.13 (which are incompatible\n"
+      + "with this directory layout) will fail to start.\n";
+  
+    file.seek(0);
+    file.writeInt(FSConstants.LAYOUT_VERSION);
+    org.apache.hadoop.io.UTF8.writeString(file, "");
+    file.writeBytes(messageForPreUpgradeVersion);
+    file.getFD().sync();
   }
 }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java?view=diff&rev=543207&r1=543206&r2=543207
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java Thu May 31 11:33:18
2007
@@ -23,11 +23,9 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.dfs.FSConstants.NodeType;
 import static org.apache.hadoop.dfs.FSConstants.NodeType.NAME_NODE;
 import static org.apache.hadoop.dfs.FSConstants.NodeType.DATA_NODE;
 import org.apache.hadoop.dfs.FSConstants.StartupOption;
-import org.apache.hadoop.fs.Path;
 
 /**
  * This test ensures the appropriate response from the system when 
@@ -82,7 +80,6 @@
    * This test attempts to finalize the NameNode and DataNode.
    */
   public void testFinalize() throws Exception {
-    File[] baseDirs;
     UpgradeUtilities.initialize();
     
     for (int numDirs = 1; numDirs <= 2; numDirs++) {

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java?view=diff&rev=543207&r1=543206&r2=543207
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java Thu May 31 11:33:18
2007
@@ -249,16 +249,26 @@
       LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
       switch (nodeType) {
       case NAME_NODE:
-        localFS.copyToLocalFile(
-                                new Path(namenodeStorage.toString(), "current"),
+        localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"),
                                 new Path(newDir.toString()),
                                 false);
+        Path newImgDir = new Path(newDir.getParent(), "image");
+        if (!localFS.exists(newImgDir))
+          localFS.copyToLocalFile(
+              new Path(namenodeStorage.toString(), "image"),
+              newImgDir,
+              false);
         break;
       case DATA_NODE:
-        localFS.copyToLocalFile(
-                                new Path(datanodeStorage.toString(), "current"),
+        localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"),
                                 new Path(newDir.toString()),
                                 false);
+        Path newStorageFile = new Path(newDir.getParent(), "storage");
+        if (!localFS.exists(newStorageFile))
+          localFS.copyToLocalFile(
+              new Path(datanodeStorage.toString(), "storage"),
+              newStorageFile,
+              false);
         break;
       }
       retVal[i] = newDir;



Mime
View raw message