hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1102556 - in /hadoop/hdfs/branches/HDFS-1073: ./ src/java/org/apache/hadoop/hdfs/server/namenode/ src/java/org/apache/hadoop/hdfs/util/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/util/
Date Fri, 13 May 2011 03:33:43 GMT
Author: todd
Date: Fri May 13 03:33:42 2011
New Revision: 1102556

URL: http://svn.apache.org/viewvc?rev=1102556&view=rev
Log:
HDFS-1800. Extend image checksumming to function with multiple fsimage files per directory.
Contributed by Todd Lipcon.

Added:
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java
Modified:
    hadoop/hdfs/branches/HDFS-1073/CHANGES.HDFS-1073.txt
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java

Modified: hadoop/hdfs/branches/HDFS-1073/CHANGES.HDFS-1073.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/CHANGES.HDFS-1073.txt?rev=1102556&r1=1102555&r2=1102556&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/CHANGES.HDFS-1073.txt (original)
+++ hadoop/hdfs/branches/HDFS-1073/CHANGES.HDFS-1073.txt Fri May 13 03:33:42 2011
@@ -23,3 +23,5 @@ HDFS-1799. Refactor log rolling and file
            (Ivan Kelly and Todd Lipcon via todd)
 HDFS-1801. Remove use of timestamps to identify checkpoints and logs (todd)
 HDFS-1930. TestDFSUpgrade failing in HDFS-1073 branch (todd)
+HDFS-1800. Extend image checksumming to function with multiple fsimage files
+           per directory. (todd)

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java?rev=1102556&r1=1102555&r2=1102556&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
Fri May 13 03:33:42 2011
@@ -174,7 +174,8 @@ public class BackupImage extends FSImage
 
       getFSDirectoryRootLock().writeLock();
       try { // load image under rootDir lock
-        loadFSImage(NNStorage.getStorageFile(sdName, NameNodeFile.IMAGE));
+        loadFSImage(NNStorage.getStorageFile(sdName, NameNodeFile.IMAGE),
+            sig.getImageDigest());
       } finally {
         getFSDirectoryRootLock().writeUnlock();
       }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1102556&r1=1102555&r2=1102556&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
Fri May 13 03:33:42 2011
@@ -56,6 +56,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
+import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 
@@ -629,8 +630,9 @@ public class FSImage implements NNStorag
     // TODO need to discuss what the correct logic is for determing which
     // storage directory to read properties from
     sdForProperties.read();
-
-    loadFSImage(loadPlan.getImageFile());
+    File imageFile = loadPlan.getImageFile();
+    MD5Hash expectedMD5 = MD5FileUtils.readStoredMd5ForFile(imageFile);
+    loadFSImage(imageFile, expectedMD5);
     needToSave |= loadEdits(loadPlan.getEditsFiles());
 
     /* TODO(todd) Need to discuss whether we should force a re-save
@@ -674,7 +676,7 @@ public class FSImage implements NNStorag
    * filenames and blocks.  Return whether we should
    * "re-save" and consolidate the edit-logs
    */
-  void loadFSImage(File curFile) throws IOException {
+  void loadFSImage(File curFile, MD5Hash expectedMd5) throws IOException {
     FSImageFormat.Loader loader = new FSImageFormat.Loader(
         conf, getFSNamesystem());
     loader.load(curFile);
@@ -683,13 +685,14 @@ public class FSImage implements NNStorag
     // Check that the image digest we loaded matches up with what
     // we expected
     MD5Hash readImageMd5 = loader.getLoadedImageMd5();
-    if (storage.getImageDigest() == null) {
-      storage.setImageDigest(readImageMd5); // set this fsimage's checksum
-    } else if (!storage.getImageDigest().equals(readImageMd5)) {
+    if (expectedMd5 != null &&
+        !expectedMd5.equals(readImageMd5)) {
       throw new IOException("Image file " + curFile +
           " is corrupt with MD5 checksum of " + readImageMd5 +
-          " but expecting " + storage.getImageDigest());
+          " but expecting " + expectedMd5);
     }
+    
+    storage.setImageDigest(readImageMd5); // set this fsimage's checksum
     storage.setCheckpointTxId(loader.getLoadedImageTxId());
   }
 
@@ -701,6 +704,7 @@ public class FSImage implements NNStorag
     FSImageFormat.Saver saver = new FSImageFormat.Saver();
     FSImageCompression compression = FSImageCompression.createCompression(conf);
     saver.save(newFile, getFSNamesystem(), compression);
+    MD5FileUtils.saveMD5File(newFile, saver.getSavedDigest());
     storage.setImageDigest(saver.getSavedDigest());
     storage.setCheckpointTxId(editLog.getLastWrittenTxId());
   }
@@ -950,12 +954,23 @@ public class FSImage implements NNStorag
                                 + editsFile.getCanonicalPath());
       }
       // delete old fsimage if sd is the edits only the directory
-      if (!sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
-        File imageFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE);
+      File imageFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE);
+      if (!sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {        
         if(imageFile.exists() && !imageFile.delete())
           throw new IOException("Cannot delete image file " 
                                 + imageFile.getCanonicalPath());
+      } else {
+        try {
+          MD5FileUtils.saveMD5File(imageFile, newImageDigest);
+        } catch (IOException ioe) {
+          LOG.error("Cannot save image md5 in " + sd, ioe);
+          
+          if(al == null) al = new ArrayList<StorageDirectory> (1);
+          al.add(sd);
+          continue;
+        }
       }
+      
       try {
         sd.write();
       } catch (IOException e) {

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java?rev=1102556&r1=1102555&r2=1102556&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
Fri May 13 03:33:42 2011
@@ -687,20 +687,6 @@ public class NNStorage extends Storage i
         sDUS == null? false : Boolean.parseBoolean(sDUS),
         sDUV == null? getLayoutVersion() : Integer.parseInt(sDUV));
 
-    String sMd5 = props.getProperty(MESSAGE_DIGEST_PROPERTY);
-    if (layoutVersion <= -26) {
-      if (sMd5 == null) {
-        throw new InconsistentFSStateException(sd.getRoot(),
-            "file " + STORAGE_FILE_VERSION
-            + " does not have MD5 image digest.");
-      }
-      this.imageDigest = new MD5Hash(sMd5);
-    } else if (sMd5 != null) {
-      throw new InconsistentFSStateException(sd.getRoot(),
-          "file " + STORAGE_FILE_VERSION +
-          " has image MD5 digest when version is " + layoutVersion);
-    }
-
     String sCheckpointId = props.getProperty(CHECKPOINT_TXID_PROPERTY);
     if (layoutVersion <= FSConstants.FIRST_STORED_TXIDS_VERSION) {
       if (sCheckpointId == null) {
@@ -743,12 +729,7 @@ public class NNStorage extends Storage i
       props.setProperty("distributedUpgradeVersion",
                         Integer.toString(uVersion));
     }
-    if (imageDigest == null) {
-      imageDigest = MD5Hash.digest(
-          new FileInputStream(getStorageFile(sd, NameNodeFile.IMAGE)));
-    }
 
-    props.setProperty(MESSAGE_DIGEST_PROPERTY, imageDigest.toString());
     props.setProperty(CHECKPOINT_TXID_PROPERTY, String.valueOf(checkpointTxId));
   }
 

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1102556&r1=1102555&r2=1102556&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
Fri May 13 03:33:42 2011
@@ -713,10 +713,9 @@ public class SecondaryNameNode implement
         throw new IOException("Could not locate checkpoint edits");
       
       this.getStorage().setStorageInfo(sig);
-      this.getStorage().setImageDigest(sig.getImageDigest());
       if (loadImage) {
-        getStorage();
-        loadFSImage(NNStorage.getStorageFile(sdName, NameNodeFile.IMAGE));
+        loadFSImage(getStorage().getStorageFile(sdName, NameNodeFile.IMAGE),
+            sig.getImageDigest());
       }
       List<File> editsFiles =
         FSImageOldStorageInspector.getEditsInStorageDir(sdEdits);

Added: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java?rev=1102556&view=auto
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java
(added)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java
Fri May 13 03:33:42 2011
@@ -0,0 +1,170 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.util;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.InputStream;
+import java.security.DigestInputStream;
+import java.security.MessageDigest;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.MD5Hash;
+import org.apache.hadoop.util.StringUtils;
+
+/**
+ * Static functions for dealing with files of the same format
+ * that the Unix "md5sum" utility writes.
+ */
+public abstract class MD5FileUtils {
+  private static final Log LOG = LogFactory.getLog(
+      MD5FileUtils.class);
+
+  private static final String MD5_SUFFIX = ".md5";
+  private static final Pattern LINE_REGEX =
+    Pattern.compile("([0-9a-f]{32}) [ \\*](.+)");
+  
+  /**
+   * Verify that the previously saved md5 for the given file matches
+   * expectedMd5.
+   * @throws IOException 
+   */
+  public static void verifySavedMD5(File dataFile, MD5Hash expectedMD5)
+      throws IOException {
+    MD5Hash storedHash = readStoredMd5ForFile(dataFile);
+    // Check the hash itself
+    if (!expectedMD5.equals(storedHash)) {
+      throw new IOException(
+          "File " + dataFile + " did not match stored MD5 checksum " +
+          " (stored: " + storedHash + ", computed: " + expectedMD5);
+    }
+  }
+  
+  /**
+   * Read the md5 checksum stored alongside the given file.
+   * @param dataFile the file containing data
+   * @return the checksum stored in dataFile.md5
+   */
+  public static MD5Hash readStoredMd5ForFile(File dataFile) throws IOException {
+    File md5File = getDigestFileForFile(dataFile);
+
+    String md5Line;
+    
+    BufferedReader reader =
+      new BufferedReader(new FileReader(md5File));
+    try {
+      md5Line = reader.readLine().trim();
+    } catch (IOException ioe) {
+      throw new IOException("Error reading md5 file at " + md5File, ioe);
+    } finally {
+      IOUtils.cleanup(LOG, reader);
+    }
+    
+    Matcher matcher = LINE_REGEX.matcher(md5Line);
+    if (!matcher.matches()) {
+      throw new IOException("Invalid MD5 file at " + md5File
+          + " (does not match expected pattern)");
+    }
+    String storedHash = matcher.group(1);
+    File referencedFile = new File(matcher.group(2));
+
+    // Sanity check: Make sure that the file referenced in the .md5 file at
+    // least has the same name as the file we expect
+    if (!referencedFile.getName().equals(dataFile.getName())) {
+      throw new IOException(
+          "MD5 file at " + md5File + " references file named " +
+          referencedFile.getName() + " but we expected it to reference " +
+          dataFile);
+    }
+    return new MD5Hash(storedHash);
+  }
+  
+  /**
+   * Read dataFile and compute its MD5 checksum.
+   */
+  public static MD5Hash computeMd5ForFile(File dataFile) throws IOException {
+    InputStream in = new FileInputStream(dataFile);
+    try {
+      MessageDigest digester = MD5Hash.getDigester();
+      DigestInputStream dis = new DigestInputStream(in, digester);
+      IOUtils.copyBytes(dis, new IOUtils.NullOutputStream(), 128*1024);
+      
+      return new MD5Hash(digester.digest());
+    } finally {
+      IOUtils.closeStream(in);
+    }
+  }
+
+  /**
+   * Save the ".md5" file that lists the md5sum of another file.
+   * @param dataFile the original file whose md5 was computed
+   * @param digest the computed digest
+   * @throws IOException
+   */
+  public static void saveMD5File(File dataFile, MD5Hash digest)
+      throws IOException {
+    File md5File = getDigestFileForFile(dataFile);
+    String digestString = StringUtils.byteToHexString(
+        digest.getDigest());
+    String md5Line = digestString + " *" + dataFile.getName() + "\n";
+    
+    File md5FileTmp = new File(md5File.getParentFile(),
+        md5File.getName() + ".tmp");
+    
+    boolean success = false;
+    
+    // Write to tmp file
+    FileWriter writer = new FileWriter(md5FileTmp);
+    try {
+      writer.write(md5Line);
+      success = true;
+    } finally {
+      IOUtils.cleanup(LOG, writer);
+      if (!success) {
+        md5FileTmp.delete();
+      }
+    }
+    
+    // Move tmp file into place
+    if (!md5FileTmp.renameTo(md5File)) {
+      if (!md5File.delete() || !md5FileTmp.renameTo(md5File)) {
+        md5FileTmp.delete();
+        throw new IOException(
+            "Unable to rename " + md5FileTmp + " to " + md5File);
+      }
+    }
+    
+    LOG.debug("Saved MD5 " + digest + " to " + md5File);
+  }
+
+  /**
+   * @return a reference to the file with .md5 suffix that will
+   * contain the md5 checksum for the given data file.
+   */
+  public static File getDigestFileForFile(File file) {
+    return new File(file.getParentFile(), file.getName() + MD5_SUFFIX);
+  }
+}

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1102556&r1=1102555&r2=1102556&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
Fri May 13 03:33:42 2011
@@ -40,6 +40,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 
@@ -50,6 +51,7 @@ import org.apache.hadoop.hdfs.protocol.F
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
+import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -423,69 +425,57 @@ public class TestStartup extends TestCas
   }
 
   private void testImageChecksum(boolean compress) throws Exception {
-    Configuration conf = new Configuration();
-    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
-    conf.set("dfs.http.address", "127.0.0.1:0");
-    File base_dir = new File(
-        System.getProperty("test.build.data", "build/test/data"), "dfs/");
-    conf.set("dfs.name.dir", new File(base_dir, "name").getPath());
-    conf.setBoolean("dfs.permissions", false);
+    MiniDFSCluster cluster = null;
+    Configuration conf = new HdfsConfiguration();
     if (compress) {
       conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
     }
 
-    GenericTestUtils.formatNamenode(conf);
-
-    // create an image
-    LOG.info("Create an fsimage");
-    NameNode namenode = new NameNode(conf);
-    namenode.getNamesystem().mkdirs("/test",
-        new PermissionStatus("hairong", null, FsPermission.getDefault()), true);
-    assertTrue(namenode.getFileInfo("/test").isDir());
-    namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-    namenode.saveNamespace();
-
-    FSImage image = namenode.getFSImage();
-    image.loadFSImage();
-
-    File versionFile = image.getStorage().getStorageDir(0).getVersionFile();
-
-    RandomAccessFile file = new RandomAccessFile(versionFile, "rws");
-    FileInputStream in = null;
-    FileOutputStream out = null;
     try {
-      // read the property from version file
-      in = new FileInputStream(file.getFD());
-      file.seek(0);
-      Properties props = new Properties();
-      props.load(in);
-
-      // get the MD5 property and change it
-      String sMd5 = props.getProperty(NNStorage.MESSAGE_DIGEST_PROPERTY);
-      MD5Hash md5 = new MD5Hash(sMd5);
-      byte[] bytes = md5.getDigest();
-      bytes[0] += 1;
-      md5 = new MD5Hash(bytes);
-      props.setProperty(NNStorage.MESSAGE_DIGEST_PROPERTY, md5.toString());
-
-      // write the properties back to version file
-      file.seek(0);
-      out = new FileOutputStream(file.getFD());
-      props.store(out, null);
-      out.flush();
-      file.setLength(out.getChannel().position());
-
-      // now load the image again
-      image.loadFSImage();
-
-      fail("Expect to get a checksumerror");
-    } catch(IOException e) {
-        assertTrue(e.getMessage().contains("is corrupt"));
+        LOG.info("\n===========================================\n" +
+                 "Starting empty cluster");
+        
+        cluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(0)
+          .format(true)
+          .build();
+        cluster.waitActive();
+        
+        FileSystem fs = cluster.getFileSystem();
+        fs.mkdirs(new Path("/test"));
+        
+        // Directory layout looks like:
+        // test/data/dfs/nameN/current/{fsimage,edits,...}
+        File nameDir = new File(cluster.getNameDirs(0).iterator().next().getPath());
+        File dfsDir = nameDir.getParentFile();
+        assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
+        
+        LOG.info("Shutting down cluster #1");
+        cluster.shutdown();
+        cluster = null;
+
+        // Corrupt the md5 file to all 0s
+        File imageFile = new File(nameDir, "current/fsimage");
+        MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
+        
+        // Try to start a new cluster
+        LOG.info("\n===========================================\n" +
+        "Starting same cluster after simulated crash");
+        try {
+          cluster = new MiniDFSCluster.Builder(conf)
+            .numDataNodes(0)
+            .format(false)
+            .build();
+          fail("Should not have successfully started with corrupt image");
+        } catch (IOException ioe) {
+          if (!ioe.getMessage().contains("is corrupt with MD5")) {
+            throw ioe;
+          }
+        }
     } finally {
-      IOUtils.closeStream(in);
-      IOUtils.closeStream(out);
-      namenode.stop();
-      namenode.join();
+      if (cluster != null) {
+        cluster.shutdown();
+      }
     }
   }
 }

Added: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java?rev=1102556&view=auto
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java
(added)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java
Fri May 13 03:33:42 2011
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.util;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FileWriter;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.util.MD5FileUtils;
+import org.apache.hadoop.io.MD5Hash;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestMD5FileUtils {
+  private static final File TEST_DIR_ROOT = new File(
+      System.getProperty("test.build.data","build/test/data"));
+  private static final File TEST_DIR = new File(TEST_DIR_ROOT,
+      "TestMD5FileUtils");
+  private static final File TEST_FILE = new File(TEST_DIR,
+      "testMd5File.dat");
+  
+  private static final int TEST_DATA_LEN = 128 * 1024; // 128KB test data
+  private static final byte[] TEST_DATA =
+    DFSTestUtil.generateSequentialBytes(0, TEST_DATA_LEN);
+  private static final MD5Hash TEST_MD5 = MD5Hash.digest(TEST_DATA);
+  
+  @Before
+  public void setup() throws IOException {
+    FileUtil.fullyDelete(TEST_DIR);
+    assertTrue(TEST_DIR.mkdirs());
+    
+    // Write a file out
+    FileOutputStream fos = new FileOutputStream(TEST_FILE);
+    fos.write(TEST_DATA);
+    fos.close();
+  }
+  
+  @Test
+  public void testComputeMd5ForFile() throws Exception {
+    MD5Hash computedDigest = MD5FileUtils.computeMd5ForFile(TEST_FILE);
+    assertEquals(TEST_MD5, computedDigest);    
+  }
+
+  @Test
+  public void testVerifyMD5FileGood() throws Exception {
+    MD5FileUtils.saveMD5File(TEST_FILE, TEST_MD5);
+    MD5FileUtils.verifySavedMD5(TEST_FILE, TEST_MD5);
+  }
+
+  /**
+   * Test when .md5 file does not exist at all
+   */
+  @Test(expected=IOException.class)
+  public void testVerifyMD5FileMissing() throws Exception {
+    MD5FileUtils.verifySavedMD5(TEST_FILE, TEST_MD5);
+  }
+
+  /**
+   * Test when .md5 file exists but incorrect checksum
+   */
+  @Test
+  public void testVerifyMD5FileBadDigest() throws Exception {
+    MD5FileUtils.saveMD5File(TEST_FILE, MD5Hash.digest(new byte[0]));
+    try {
+      MD5FileUtils.verifySavedMD5(TEST_FILE, TEST_MD5);
+      fail("Did not throw");
+    } catch (IOException ioe) {
+      // Expected
+    }
+  }
+  
+  /**
+   * Test when .md5 file exists but has a bad format
+   */
+  @Test
+  public void testVerifyMD5FileBadFormat() throws Exception {
+    FileWriter writer = new FileWriter(MD5FileUtils.getDigestFileForFile(TEST_FILE));
+    try {
+      writer.write("this is not an md5 file");
+    } finally {
+      writer.close();
+    }
+    
+    try {
+      MD5FileUtils.verifySavedMD5(TEST_FILE, TEST_MD5);
+      fail("Did not throw");
+    } catch (IOException ioe) {
+      // expected
+    }
+  }  
+}



Mime
View raw message