hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1126286 [3/3] - in /hadoop/hdfs/branches/HDFS-1073: ./ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/java/org/apache/hadoop/hdfs/server/protocol/ src/java/org/apache/hadoop/hdfs/tools/offlin...
Date Mon, 23 May 2011 01:19:51 GMT
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java?rev=1126286&r1=1126285&r2=1126286&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java Mon May 23 01:19:49 2011
@@ -28,6 +28,13 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
 
 /**
  * This class tests various combinations of dfs.name.dir 
@@ -65,10 +72,22 @@ public class TestNameEditsConfigs extend
   }
 
   void checkImageAndEditsFilesExistence(File dir, 
-                                        boolean imageMustExist,
-                                        boolean editsMustExist) {
-    assertTrue(imageMustExist == new File(dir, FILE_IMAGE).exists());
-    assertTrue(editsMustExist == new File(dir, FILE_EDITS).exists());
+                                        boolean shouldHaveImages,
+                                        boolean shouldHaveEdits)
+  throws IOException {
+    FSImageTransactionalStorageInspector ins = inspect(dir);
+
+    if (shouldHaveImages) {
+      assertTrue("Expect images in " + dir, ins.foundImages.size() > 0);
+    } else {
+      assertTrue("Expect no images in " + dir, ins.foundImages.isEmpty());      
+    }
+
+    if (shouldHaveEdits) {
+      assertTrue("Expect edits in " + dir, ins.foundEditLogs.size() > 0);
+    } else {
+      assertTrue("Expect no edits in " + dir, ins.foundEditLogs.isEmpty());
+    }
   }
 
   private void checkFile(FileSystem fileSys, Path name, int repl)
@@ -107,9 +126,10 @@ public class TestNameEditsConfigs extend
    *    do not read any stale image or edits. 
    * All along the test, we create and delete files at reach restart to make
    * sure we are reading proper edits and image.
+   * @throws Exception 
    */
   @SuppressWarnings("deprecation")
-  public void testNameEditsConfigs() throws IOException {
+  public void testNameEditsConfigs() throws Exception {
     Path file1 = new Path("TestNameEditsConfigs1");
     Path file2 = new Path("TestNameEditsConfigs2");
     Path file3 = new Path("TestNameEditsConfigs3");
@@ -117,12 +137,26 @@ public class TestNameEditsConfigs extend
     SecondaryNameNode secondary = null;
     Configuration conf = null;
     FileSystem fileSys = null;
-    File newNameDir = new File(base_dir, "name");
-    File newEditsDir = new File(base_dir, "edits");
-    File nameAndEdits = new File(base_dir, "name_and_edits");
-    File checkpointNameDir = new File(base_dir, "secondname");
-    File checkpointEditsDir = new File(base_dir, "secondedits");
-    File checkpointNameAndEdits = new File(base_dir, "second_name_and_edits");
+    final File newNameDir = new File(base_dir, "name");
+    final File newEditsDir = new File(base_dir, "edits");
+    final File nameAndEdits = new File(base_dir, "name_and_edits");
+    final File checkpointNameDir = new File(base_dir, "secondname");
+    final File checkpointEditsDir = new File(base_dir, "secondedits");
+    final File checkpointNameAndEdits = new File(base_dir, "second_name_and_edits");
+    
+    ImmutableList<File> allCurrentDirs = ImmutableList.of(
+        new File(nameAndEdits, "current"),
+        new File(newNameDir, "current"),
+        new File(newEditsDir, "current"),
+        new File(checkpointNameAndEdits, "current"),
+        new File(checkpointNameDir, "current"),
+        new File(checkpointEditsDir, "current"));
+    ImmutableList<File> imageCurrentDirs = ImmutableList.of(
+        new File(nameAndEdits, "current"),
+        new File(newNameDir, "current"),
+        new File(checkpointNameAndEdits, "current"),
+        new File(checkpointNameDir, "current"));
+    
     
     // Start namenode with same dfs.name.dir and dfs.name.edits.dir
     conf = new HdfsConfiguration();
@@ -188,23 +222,12 @@ public class TestNameEditsConfigs extend
       secondary.shutdown();
     }
 
-    checkImageAndEditsFilesExistence(nameAndEdits, true, true);
-    checkImageAndEditsFilesExistence(newNameDir, true, false);
-    checkImageAndEditsFilesExistence(newEditsDir, false, true);
-    checkImageAndEditsFilesExistence(checkpointNameAndEdits, true, true);
-    checkImageAndEditsFilesExistence(checkpointNameDir, true, false);
-    checkImageAndEditsFilesExistence(checkpointEditsDir, false, true);
-
+    FSImageTestUtil.assertParallelFilesAreIdentical(allCurrentDirs,
+        ImmutableSet.of("VERSION"));
+    FSImageTestUtil.assertSameNewestImage(imageCurrentDirs);
+    
     // Now remove common directory both have and start namenode with 
     // separate name and edits dirs
-    new File(nameAndEdits, FILE_EDITS).renameTo(
-        new File(newNameDir, FILE_EDITS));
-    new File(nameAndEdits, FILE_IMAGE).renameTo(
-        new File(newEditsDir, FILE_IMAGE));
-    new File(checkpointNameAndEdits, FILE_EDITS).renameTo(
-        new File(checkpointNameDir, FILE_EDITS));
-    new File(checkpointNameAndEdits, FILE_IMAGE).renameTo(
-        new File(checkpointEditsDir, FILE_IMAGE));
     conf =  new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, newNameDir.getPath());
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, newEditsDir.getPath());
@@ -234,7 +257,8 @@ public class TestNameEditsConfigs extend
       cluster.shutdown();
       secondary.shutdown();
     }
-
+    
+    // No edit logs in new name dir
     checkImageAndEditsFilesExistence(newNameDir, true, false);
     checkImageAndEditsFilesExistence(newEditsDir, false, true);
     checkImageAndEditsFilesExistence(checkpointNameDir, true, false);
@@ -278,12 +302,18 @@ public class TestNameEditsConfigs extend
     checkImageAndEditsFilesExistence(checkpointNameAndEdits, true, true);
   }
 
+  private FSImageTransactionalStorageInspector inspect(File storageDir)
+      throws IOException {
+    return FSImageTestUtil.inspectStorageDirectory(
+        new File(storageDir, "current"), NameNodeDirType.IMAGE_AND_EDITS);
+  }
+
   /**
    * Test various configuration options of dfs.name.dir and dfs.name.edits.dir
    * This test tries to simulate failure scenarios.
    * 1. Start cluster with shared name and edits dir
    * 2. Restart cluster by adding separate name and edits dirs
-   * 3. Restart cluster by removing shared name and edits dir
+   * T3. Restart cluster by removing shared name and edits dir
    * 4. Restart cluster with old shared name and edits dir, but only latest 
    *    name dir. This should fail since we dont have latest edits dir
    * 5. Restart cluster with old shared name and edits dir, but only latest
@@ -311,6 +341,10 @@ public class TestNameEditsConfigs extend
                                 .manageNameDfsDirs(false)
                                 .build();
     cluster.waitActive();
+    
+    // Check that the dir has a VERSION file
+    assertTrue(new File(nameAndEdits, "current/VERSION").exists());
+    
     fileSys = cluster.getFileSystem();
 
     try {
@@ -339,6 +373,12 @@ public class TestNameEditsConfigs extend
                                 .manageNameDfsDirs(false)
                                 .build();
     cluster.waitActive();
+
+    // Check that the dirs have a VERSION file
+    assertTrue(new File(nameAndEdits, "current/VERSION").exists());
+    assertTrue(new File(newNameDir, "current/VERSION").exists());
+    assertTrue(new File(newEditsDir, "current/VERSION").exists());
+
     fileSys = cluster.getFileSystem();
 
     try {
@@ -377,7 +417,7 @@ public class TestNameEditsConfigs extend
       fileSys.close();
       cluster.shutdown();
     }
-
+    
     // Add old shared directory for name and edits along with latest name
     conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, newNameDir.getPath() + "," + 
@@ -398,13 +438,9 @@ public class TestNameEditsConfigs extend
       cluster = null;
     }
 
-    // Add old shared directory for name and edits along with latest edits
-    // This case is currently disabled, because once we have HDFS-1073 complete
-    // we can easily distinguish between the edits file in the old dir and the
-    // edits file in the new one based on their file names. This part of the
-    // test will be re-enabled to make sure the NN starts with valid edits
-    // in this case. TODO
-    /*    
+    // Add old shared directory for name and edits along with latest edits. 
+    // This is OK, since the latest edits will have segments leading all
+    // the way from the image in name_and_edits.
     conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath());
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, newEditsDir.getPath() +
@@ -416,12 +452,17 @@ public class TestNameEditsConfigs extend
                                   .format(false)
                                   .manageNameDfsDirs(false)
                                   .build();
-      assertTrue(false);
+      assertTrue(!fileSys.exists(file1));
+      assertTrue(fileSys.exists(file2));
+      checkFile(fileSys, file2, replication);
+      cleanupFile(fileSys, file2);
+      writeFile(fileSys, file3, replication);
+      checkFile(fileSys, file3, replication);
     } catch (IOException e) { // expect to fail
       System.out.println("cluster start failed due to missing latest name dir");
     } finally {
-      cluster = null;
+      fileSys.close();
+      cluster.shutdown();
     }
-    */
   }
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java?rev=1126286&r1=1126285&r2=1126286&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java Mon May 23 01:19:49 2011
@@ -34,6 +34,7 @@ import org.apache.hadoop.util.PureJavaCr
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 import java.util.ArrayList;
@@ -102,16 +103,20 @@ public class TestParallelImageWrite exte
       assertEquals(dirstatus.getGroup() + "_XXX", newdirstatus.getGroup());
       rootmtime = fs.getFileStatus(rootpath).getModificationTime();
 
-      final long checkAfterRestart = checkImages(fsn, numNamenodeDirs);
+      final String checkAfterRestart = checkImages(fsn, numNamenodeDirs);
       
       // Modify the system and then perform saveNamespace
       files.cleanup(fs, dir);
       files.createFiles(fs, dir);
       fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
       cluster.getNameNode().saveNamespace();
-      final long checkAfterModify = checkImages(fsn, numNamenodeDirs);
-      assertTrue("Modified namespace doesn't change fsimage contents",
-          checkAfterRestart != checkAfterModify);
+      final String checkAfterModify = checkImages(fsn, numNamenodeDirs);
+      /**
+       * TODO the following assertion is no longer valid since the fsimage
+       * includes a transaction ID in its header.
+      assertFalse("Modified namespace doesn't change fsimage contents",
+          !checkAfterRestart.equals(checkAfterModify));
+       */
       fsn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
       files.cleanup(fs, dir);
     } finally {
@@ -124,42 +129,34 @@ public class TestParallelImageWrite exte
    * and non-empty, and there are the expected number of them.
    * @param fsn - the FSNamesystem being checked.
    * @param numImageDirs - the configured number of StorageDirectory of type IMAGE. 
-   * @return - the checksum of the FSImage files, which must all be the same.
+   * @return - the md5 hash of the most recent FSImage files, which must all be the same.
    * @throws AssertionFailedError if image files are empty or different,
    *     if less than two StorageDirectory are provided, or if the
    *     actual number of StorageDirectory is less than configured.
    */
-  public static long checkImages(FSNamesystem fsn, int numImageDirs) throws Exception {
+  public static String checkImages(
+      FSNamesystem fsn, int numImageDirs)
+  throws Exception {    
     NNStorage stg = fsn.getFSImage().getStorage();
     //any failed StorageDirectory is removed from the storageDirs list
     assertEquals("Some StorageDirectories failed Upgrade",
         numImageDirs, stg.getNumStorageDirs(NameNodeDirType.IMAGE));
     assertTrue("Not enough fsimage copies in MiniDFSCluster " + 
         "to test parallel write", numImageDirs > 1);
-    //checksum the FSImage stored in each storageDir
-    Iterator<StorageDirectory> iter = stg.dirIterator(NameNodeDirType.IMAGE);
-    List<Long> checksums = new ArrayList<Long>();
-    while (iter.hasNext()) {
-      StorageDirectory sd = iter.next();
-      File fsImage = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE);
-      PureJavaCrc32 crc = new PureJavaCrc32();
-      FileInputStream in = new FileInputStream(fsImage);
-      byte[] buff = new byte[4096];
-      int read = 0;
-      while ((read = in.read(buff)) != -1) {
-       crc.update(buff, 0, read);
-      }
-      long val = crc.getValue();
-      checksums.add(val);
-    }
-    assertEquals(numImageDirs, checksums.size());
-    PureJavaCrc32 crc = new PureJavaCrc32();
-    long emptyCrc = crc.getValue();
-    assertTrue("Empty fsimage file", checksums.get(0) != emptyCrc);
-    for (int i = 1; i < numImageDirs; i++) {
-      assertEquals(checksums.get(i - 1), checksums.get(i));
-    }
-    return checksums.get(0);
+
+    // List of "current/" directory from each SD
+    List<File> dirs = FSImageTestUtil.getCurrentDirs(stg, NameNodeDirType.IMAGE);
+
+    // across directories, all files with same names should be identical hashes   
+    FSImageTestUtil.assertParallelFilesAreIdentical(
+        dirs, Collections.<String>emptySet());
+    FSImageTestUtil.assertSameNewestImage(dirs);
+    
+    // Return the hash of the newest image file
+    StorageDirectory firstSd = stg.dirIterator(NameNodeDirType.IMAGE).next();
+    File latestImage = FSImageTestUtil.findLatestImageFile(firstSd);
+    String md5 = FSImageTestUtil.getFileMD5(latestImage);
+    return md5;
   }
 }
 

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java?rev=1126286&r1=1126285&r2=1126286&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java Mon May 23 01:19:49 2011
@@ -18,11 +18,13 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
+
 import static org.junit.Assert.*;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.anyLong;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.spy;
@@ -84,17 +86,17 @@ public class TestSaveNamespace {
 
     public Void answer(InvocationOnMock invocation) throws Throwable {
       Object[] args = invocation.getArguments();
-      File f = (File)args[0];
+      StorageDirectory sd = (StorageDirectory)args[0];
 
       if (count++ == 1) {
-        LOG.info("Injecting fault for file: " + f);
+        LOG.info("Injecting fault for sd: " + sd);
         if (exceptionType) {
           throw new RuntimeException("Injected fault: saveFSImage second time");
         } else {
           throw new IOException("Injected fault: saveFSImage second time");
         }
       }
-      LOG.info("Not injecting fault for file: " + f);
+      LOG.info("Not injecting fault for sd: " + sd);
       return (Void)invocation.callRealMethod();
     }
   }
@@ -121,16 +123,13 @@ public class TestSaveNamespace {
 
     FSImage spyImage = spy(originalImage);
     fsn.dir.fsImage = spyImage;
-    
-    spyImage.getStorage().setStorageDirectories(FSNamesystem.getNamespaceDirs(conf), 
-                                                FSNamesystem.getNamespaceEditsDirs(conf));
 
     // inject fault
     switch(fault) {
     case SAVE_FSIMAGE:
       // The spy throws a RuntimeException when writing to the second directory
       doAnswer(new FaultySaveImage()).
-        when(spyImage).saveFSImage((File)anyObject());
+        when(spyImage).saveFSImage((StorageDirectory)anyObject(), anyLong());
       break;
     case MOVE_CURRENT:
       // The spy throws a RuntimeException when calling moveCurrent()
@@ -191,35 +190,31 @@ public class TestSaveNamespace {
     // Replace the FSImage with a spy
     FSImage originalImage = fsn.dir.fsImage;
     NNStorage storage = originalImage.getStorage();
-    storage.close(); // unlock any directories that FSNamesystem's initialization may have locked
-
-    NNStorage spyStorage = spy(storage);
-    originalImage.storage = spyStorage;
 
     FSImage spyImage = spy(originalImage);
     fsn.dir.fsImage = spyImage;
-
-    spyImage.getStorage().setStorageDirectories(FSNamesystem.getNamespaceDirs(conf), 
-                                                FSNamesystem.getNamespaceEditsDirs(conf));
-
-    // inject fault
-    // The spy throws a IOException when writing to the second directory
-    doAnswer(new FaultySaveImage(false)).
-      when(spyImage).saveFSImage((File)anyObject());
+    
+    File currentDir = storage.getStorageDir(0).getCurrentDir();
+    currentDir.setExecutable(false);
+    currentDir.setReadable(false);
 
     try {
       doAnEdit(fsn, 1);
       fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
 
-      // Save namespace - this  injects a fault and marks one
-      // directory as faulty.
+      // Save namespace - should mark the first storage dir as faulty
+      // since it's not traversable.
       LOG.info("Doing the first savenamespace.");
       fsn.saveNamespace();
-      LOG.warn("First savenamespace sucessful.");
+      LOG.info("First savenamespace sucessful.");      
+      
       assertTrue("Savenamespace should have marked one directory as bad." +
-                 " But found " + spyStorage.getRemovedStorageDirs().size() +
+                 " But found " + storage.getRemovedStorageDirs().size() +
                  " bad directories.", 
-                   spyStorage.getRemovedStorageDirs().size() == 1);
+                   storage.getRemovedStorageDirs().size() == 1);
+
+      currentDir.setExecutable(true);
+      currentDir.setReadable(true);
 
       // The next call to savenamespace should try inserting the
       // erroneous directory back to fs.name.dir. This command should
@@ -249,8 +244,17 @@ public class TestSaveNamespace {
       checkEditExists(fsn, 1);
       LOG.info("Reloaded image is good.");
     } finally {
+      if (currentDir.exists()) {
+        currentDir.setExecutable(true);
+        currentDir.setReadable(true);
+      }
+
       if (fsn != null) {
-        fsn.close();
+        try {
+          fsn.close();
+        } catch (Throwable t) {
+          LOG.fatal("Failed to shut down", t);
+        }
       }
     }
   }
@@ -319,7 +323,8 @@ public class TestSaveNamespace {
         FSNamesystem.getNamespaceEditsDirs(conf));
 
     doThrow(new IOException("Injected fault: saveFSImage")).
-      when(spyImage).saveFSImage((File)anyObject());
+      when(spyImage).saveFSImage((StorageDirectory)anyObject(),
+                                 Mockito.anyLong());
 
     try {
       doAnEdit(fsn, 1);
@@ -403,20 +408,28 @@ public class TestSaveNamespace {
     FSNamesystem fsn = new FSNamesystem(conf);
 
     try {
-      assertEquals(0, fsn.getEditLog().getLastWrittenTxId());
-      doAnEdit(fsn, 1);
+      // We have a BEGIN_LOG_SEGMENT txn to start
       assertEquals(1, fsn.getEditLog().getLastWrittenTxId());
+      doAnEdit(fsn, 1);
+      assertEquals(2, fsn.getEditLog().getLastWrittenTxId());
       
       fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
       fsn.saveNamespace();
+
+      // 2 more txns: END the first segment, BEGIN a new one
+      assertEquals(4, fsn.getEditLog().getLastWrittenTxId());
       
       // Shut down and restart
       fsn.getFSImage().close();
       fsn.close();
+      
+      // 1 more txn to END that segment
+      assertEquals(5, fsn.getEditLog().getLastWrittenTxId());
       fsn = null;
       
       fsn = new FSNamesystem(conf);
-      assertEquals(1, fsn.getEditLog().getLastWrittenTxId());
+      // 1 more txn to start new segment on restart
+      assertEquals(6, fsn.getEditLog().getLastWrittenTxId());
       
     } finally {
       if (fsn != null) {

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java?rev=1126286&r1=1126285&r2=1126286&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java Mon May 23 01:19:49 2011
@@ -107,8 +107,6 @@ public class TestSecurityTokenEditLog ex
   
       // set small size of flush buffer
       editLog.setOutputBufferCapacity(2048);
-      editLog.close();
-      editLog.open();
       namesystem.getDelegationTokenSecretManager().startThreads();
     
       // Create threads and make them run transactions concurrently.
@@ -129,25 +127,24 @@ public class TestSecurityTokenEditLog ex
       } 
       
       editLog.close();
-  
+        
       // Verify that we can read in all the transactions that we have written.
       // If there were any corruptions, it is likely that the reading in
       // of these transactions will throw an exception.
       //
       namesystem.getDelegationTokenSecretManager().stopThreads();
       int numKeys = namesystem.getDelegationTokenSecretManager().getNumberOfKeys();
-      for (Iterator<StorageDirectory> it = 
-             fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
-        FSEditLogLoader loader = new FSEditLogLoader(namesystem);
-        File editFile = fsimage.getStorage().getStorageFile(it.next(), NameNodeFile.EDITS);
+      int expectedTransactions = NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS + numKeys
+          + 2; // + 2 for BEGIN and END txns
+
+      for (StorageDirectory sd : fsimage.getStorage().dirIterable(NameNodeDirType.EDITS)) {
+        File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 1 + expectedTransactions - 1);
         System.out.println("Verifying file: " + editFile);
+        
+        FSEditLogLoader loader = new FSEditLogLoader(namesystem);        
         int numEdits = loader.loadFSEdits(
             new EditLogFileInputStream(editFile), 1);
-        assertTrue("Verification for " + editFile + " failed. " +
-                   "Expected " + (NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS + numKeys) + " transactions. "+
-                   "Found " + numEdits + " transactions.",
-                   numEdits == NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS +numKeys);
-  
+        assertEquals("Verification for " + editFile, expectedTransactions, numEdits);
       }
     } finally {
       if(fileSys != null) fileSys.close();

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1126286&r1=1126285&r2=1126286&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java Mon May 23 01:19:49 2011
@@ -234,13 +234,11 @@ public class TestStartup extends TestCas
       sd = it.next();
 
       if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
-        img.getStorage();
-        File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE);
+        File imf = img.getStorage().getStorageFile(sd, NameNodeFile.IMAGE, 0);
         LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize);
         assertEquals(expectedImgSize, imf.length());	
       } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
-        img.getStorage();
-        File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS);
+        File edf = img.getStorage().getStorageFile(sd, NameNodeFile.EDITS, 0);
         LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length()  + "; expected = " + expectedEditsSize);
         assertEquals(expectedEditsSize, edf.length());	
       } else {
@@ -345,8 +343,8 @@ public class TestStartup extends TestCas
       FSImage image = nn.getFSImage();
       StorageDirectory sd = image.getStorage().getStorageDir(0); //only one
       assertEquals(sd.getStorageDirType(), NameNodeDirType.IMAGE_AND_EDITS);
-      File imf = image.getStorage().getStorageFile(sd, NameNodeFile.IMAGE);
-      File edf = image.getStorage().getStorageFile(sd, NameNodeFile.EDITS);
+      File imf = image.getStorage().getStorageFile(sd, NameNodeFile.IMAGE, 0);
+      File edf = image.getStorage().getStorageFile(sd, NameNodeFile.EDITS, 0);
       LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length());
       LOG.info("--edits file " + edf.getAbsolutePath() + "; len = " + edf.length());
 
@@ -456,7 +454,7 @@ public class TestStartup extends TestCas
         cluster = null;
 
         // Corrupt the md5 file to all 0s
-        File imageFile = new File(nameDir, "current/fsimage");
+        File imageFile = new File(nameDir, "current/fsimage_0");
         MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
         
         // Try to start a new cluster
@@ -469,7 +467,7 @@ public class TestStartup extends TestCas
             .build();
           fail("Should not have successfully started with corrupt image");
         } catch (IOException ioe) {
-          if (!ioe.getMessage().contains("is corrupt with MD5")) {
+          if (!ioe.getCause().getMessage().contains("is corrupt with MD5")) {
             throw ioe;
           }
         }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java?rev=1126286&r1=1126285&r2=1126286&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java Mon May 23 01:19:49 2011
@@ -68,20 +68,7 @@ public class TestStorageRestore extends 
   static final int blockSize = 4096;
   static final int fileSize = 8192;
   private File path1, path2, path3;
-  private MiniDFSCluster cluster;
-
-  private void writeFile(FileSystem fileSys, Path name, int repl)
-  throws IOException {
-    FSDataOutputStream stm = fileSys.create(name, true,
-        fileSys.getConf().getInt("io.file.buffer.size", 4096),
-        (short)repl, (long)blockSize);
-    byte[] buffer = new byte[fileSize];
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
-  
+  private MiniDFSCluster cluster;  
  
   protected void setUp() throws Exception {
     config = new HdfsConfiguration();
@@ -118,7 +105,7 @@ public class TestStorageRestore extends 
   }
   
   /**
-   * invalidate storage by removing storage directories
+   * invalidate storage by removing the second and third storage directories
    */
   public void invalidateStorage(FSImage fi) throws IOException {
     ArrayList<StorageDirectory> al = new ArrayList<StorageDirectory>(2);
@@ -153,119 +140,14 @@ public class TestStorageRestore extends 
     LOG.info("current storages and corresoponding sizes:");
     for(Iterator<StorageDirectory> it = fs.getStorage().dirIterator(); it.hasNext(); ) {
       StorageDirectory sd = it.next();
-      
-      if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
-        File imf = fs.getStorage().getStorageFile(sd, NameNodeFile.IMAGE);
-        LOG.info("  image file " + imf.getAbsolutePath() + "; len = " + imf.length());  
-      }
-      if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
-        File edf = fs.getStorage().getStorageFile(sd, NameNodeFile.EDITS);
-        LOG.info("  edits file " + edf.getAbsolutePath() + "; len = " + edf.length()); 
-      }
-    }
-  }
-  
-  
-  /**
-   * This function returns a md5 hash of a file.
-   * 
-   * @param file input file
-   * @return The md5 string
-   */
-  public String getFileMD5(File file) throws Exception {
-    String res = new String();
-    MessageDigest mD = MessageDigest.getInstance("MD5");
-    DataInputStream dis = new DataInputStream(new FileInputStream(file));
 
-    try {
-      while(true) {
-        mD.update(dis.readByte());
-      }
-    } catch (EOFException eof) {}
-
-    BigInteger bigInt = new BigInteger(1, mD.digest());
-    res = bigInt.toString(16);
-    dis.close();
-
-    return res;
-  }
-
-  
-  /**
-   * read currentCheckpointTime directly from the file  TODO this is dup code
-   * @param currDir
-   * @return the checkpoint time
-   * @throws IOException
-   */
-  long readCheckpointTime(File currDir) throws IOException {
-    File timeFile = new File(currDir, NameNodeFile.TIME.getName()); 
-    long timeStamp = 0L;
-    if (timeFile.exists() && timeFile.canRead()) {
-      DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
-      try {
-        timeStamp = in.readLong();
-      } finally {
-        in.close();
+      File curDir = sd.getCurrentDir();
+      for (File f : curDir.listFiles()) {
+        LOG.info("  file " + f.getAbsolutePath() + "; len = " + f.length());  
       }
     }
-    return timeStamp;
   }
-  
-  /**
-   *  check if files exist/not exist
-   * @throws IOException 
-   */
-  public void checkFiles(boolean valid) throws IOException {
-    //look at the valid storage
-    File fsImg1 = new File(path1, Storage.STORAGE_DIR_CURRENT + "/" + NameNodeFile.IMAGE.getName());
-    File fsImg2 = new File(path2, Storage.STORAGE_DIR_CURRENT + "/" + NameNodeFile.IMAGE.getName());
-    File fsImg3 = new File(path3, Storage.STORAGE_DIR_CURRENT + "/" + NameNodeFile.IMAGE.getName());
-
-    File fsEdits1 = new File(path1, Storage.STORAGE_DIR_CURRENT + "/" + NameNodeFile.EDITS.getName());
-    File fsEdits2 = new File(path2, Storage.STORAGE_DIR_CURRENT + "/" + NameNodeFile.EDITS.getName());
-    File fsEdits3 = new File(path3, Storage.STORAGE_DIR_CURRENT + "/" + NameNodeFile.EDITS.getName());
 
-    String md5_1 = null,md5_2 = null,md5_3 = null;
-    try {
-      md5_1 = getFileMD5(fsEdits1);
-      md5_2 = getFileMD5(fsEdits2);
-      md5_3 = getFileMD5(fsEdits3);
-    } catch (Exception e) {
-      System.err.println("md 5 calculation failed:" + e.getLocalizedMessage());
-    }
-    this.printStorages(cluster.getNameNode().getFSImage());
-    
-    LOG.info("++++ image files = "+fsImg1.getAbsolutePath() + "," + fsImg2.getAbsolutePath() + ","+ fsImg3.getAbsolutePath());
-    LOG.info("++++ edits files = "+fsEdits1.getAbsolutePath() + "," + fsEdits2.getAbsolutePath() + ","+ fsEdits3.getAbsolutePath());
-    LOG.info("checkFiles compares lengths: img1=" + fsImg1.length()  + ",img2=" + fsImg2.length()  + ",img3=" + fsImg3.length());
-    LOG.info("checkFiles compares lengths: edits1=" + fsEdits1.length()  + ",edits2=" + fsEdits2.length()  + ",edits3=" + fsEdits3.length());
-    LOG.info("checkFiles compares md5s: " + fsEdits1.getAbsolutePath() + 
-        "="+ md5_1  + "," + fsEdits2.getAbsolutePath() + "=" + md5_2  + "," +
-        fsEdits3.getAbsolutePath() + "=" + md5_3);  
-    
-    if(valid) {
-      // should be the same
-      assertTrue(fsImg1.length() == fsImg2.length());
-      assertTrue(0 == fsImg3.length()); //shouldn't be created
-      assertTrue(fsEdits1.length() == fsEdits2.length());
-      assertTrue(fsEdits1.length() == fsEdits3.length());
-      assertTrue(md5_1.equals(md5_2));
-      assertTrue(md5_1.equals(md5_3));
-    } else {
-      // should be different
-      //assertTrue(fsImg1.length() != fsImg2.length());
-      //assertTrue(fsImg1.length() != fsImg3.length());
-      long len1 = EditLogFileInputStream.getValidLength(fsEdits1);
-      long len2 = EditLogFileInputStream.getValidLength(fsEdits2);
-      long len3 = EditLogFileInputStream.getValidLength(fsEdits3);
-      assertTrue("edits1 = edits2", len1 != len2);
-      assertTrue("edits1 = edits3", len1 != len3);
-      
-      assertTrue(!md5_1.equals(md5_2));
-      assertTrue(!md5_1.equals(md5_3));
-    }
-  }
-  
   /**
    * test 
    * 1. create DFS cluster with 3 storage directories - 2 EDITS_IMAGE, 1 EDITS
@@ -280,7 +162,7 @@ public class TestStorageRestore extends 
    */
   @SuppressWarnings("deprecation")
   public void testStorageRestore() throws Exception {
-    int numDatanodes = 2;
+    int numDatanodes = 0;
     cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes)
                                                 .manageNameDfsDirs(false)
                                                 .build();
@@ -292,36 +174,81 @@ public class TestStorageRestore extends 
     
     FileSystem fs = cluster.getFileSystem();
     Path path = new Path("/", "test");
-    writeFile(fs, path, 2);
+    assertTrue(fs.mkdirs(path));
     
-    System.out.println("****testStorageRestore: file test written, invalidating storage...");
+    System.out.println("****testStorageRestore: dir 'test' created, invalidating storage...");
   
     invalidateStorage(cluster.getNameNode().getFSImage());
-    //secondary.doCheckpoint(); // this will cause storages to be removed.
     printStorages(cluster.getNameNode().getFSImage());
-    System.out.println("****testStorageRestore: storage invalidated + doCheckpoint");
+    System.out.println("****testStorageRestore: storage invalidated");
 
     path = new Path("/", "test1");
-    writeFile(fs, path, 2);
-    System.out.println("****testStorageRestore: file test1 written");
-    
-    checkFiles(false); // SHOULD BE FALSE
-    
+    assertTrue(fs.mkdirs(path));
+
+    System.out.println("****testStorageRestore: dir 'test1' created");
+
+    // We did another edit, so the still-active directory at 'path1'
+    // should now differ from the others
+    FSImageTestUtil.assertFileContentsDifferent(2,
+        new File(path1, "current/edits_inprogress_1"),
+        new File(path2, "current/edits_inprogress_1"),
+        new File(path3, "current/edits_inprogress_1"));
+    FSImageTestUtil.assertFileContentsSame(
+        new File(path2, "current/edits_inprogress_1"),
+        new File(path3, "current/edits_inprogress_1"));
+        
     System.out.println("****testStorageRestore: checkfiles(false) run");
     
     secondary.doCheckpoint();  ///should enable storage..
     
-    checkFiles(true);
-    System.out.println("****testStorageRestore: second Checkpoint done and checkFiles(true) run");
+    // We should have a checkpoint through txid 4 in the two image dirs
+    // (txid=4 for BEGIN, mkdir, mkdir, END)
+    FSImageTestUtil.assertFileContentsSame(
+        new File(path1, "current/fsimage_4"),
+        new File(path2, "current/fsimage_4"));
+    assertFalse("Should not have any image in an edits-only directory",
+        new File(path3, "current/fsimage_4").exists());
+
+    // Should have finalized logs in the directory that didn't fail
+    assertTrue("Should have finalized logs in the directory that didn't fail",
+        new File(path1, "current/edits_1-4").exists());
+    // Should not have finalized logs in the failed directories
+    assertFalse("Should not have finalized logs in the failed directories",
+        new File(path2, "current/edits_1-4").exists());
+    assertFalse("Should not have finalized logs in the failed directories",
+        new File(path3, "current/edits_1-4").exists());
+    
+    // The new log segment should be in all of the directories.
+    FSImageTestUtil.assertFileContentsSame(
+        new File(path1, "current/edits_inprogress_5"),
+        new File(path2, "current/edits_inprogress_5"),
+        new File(path3, "current/edits_inprogress_5"));
+    String md5BeforeEdit = FSImageTestUtil.getFileMD5(
+        new File(path1, "current/edits_inprogress_5"));
     
-    // verify that all the logs are active
+    // Do another edit to verify that all the logs are active.
     path = new Path("/", "test2");
-    writeFile(fs, path, 2);
-    System.out.println("****testStorageRestore: wrote a file and checkFiles(true) run");
-    checkFiles(true);
-    
+    assertTrue(fs.mkdirs(path));
+
+    // Logs should be changed by the edit.
+    String md5AfterEdit =  FSImageTestUtil.getFileMD5(
+        new File(path1, "current/edits_inprogress_5"));
+    assertFalse(md5BeforeEdit.equals(md5AfterEdit));
+
+    // And all logs should be changed.
+    FSImageTestUtil.assertFileContentsSame(
+        new File(path1, "current/edits_inprogress_5"),
+        new File(path2, "current/edits_inprogress_5"),
+        new File(path3, "current/edits_inprogress_5"));
+
     secondary.shutdown();
     cluster.shutdown();
+    
+    // All logs should be finalized by clean shutdown
+    FSImageTestUtil.assertFileContentsSame(
+        new File(path1, "current/edits_5-7"),
+        new File(path2, "current/edits_5-7"),        
+        new File(path3, "current/edits_5-7"));
   }
   
   /**

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java?rev=1126286&r1=1126285&r2=1126286&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java Mon May 23 01:19:49 2011
@@ -58,7 +58,7 @@ public class TestOfflineEditsViewer {
     System.getProperty("test.build.data", "build/test/data");
 
   private static String cacheDir =
-    System.getProperty("test.cache.data", "build/test/data/cache");
+    System.getProperty("test.cache.data", "build/test/cache");
 
   // to create edits and get edits filename
   private static final OfflineEditsViewerHelper nnHelper 
@@ -85,6 +85,11 @@ public class TestOfflineEditsViewer {
     obsoleteOpCodes.put(FSEditLogOpCodes.OP_JSPOOL_START, true);
   }
 
+  @Before
+  public void setup() {
+    new File(cacheDir).mkdirs();
+  }
+  
   /**
    * Test the OfflineEditsViewer
    */

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored?rev=1126286&r1=1126285&r2=1126286&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored.xml?rev=1126286&r1=1126285&r2=1126286&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored.xml (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored.xml Mon May 23 01:19:49 2011
@@ -1,424 +1,500 @@
 <?xml version="1.0"?>
 <EDITS>
-  <EDITS_VERSION>-24</EDITS_VERSION>
+  <EDITS_VERSION>-37</EDITS_VERSION>
+  <RECORD>
+    <OPCODE>23</OPCODE>
+    <DATA>
+      <TRANSACTION_ID>1</TRANSACTION_ID>
+    </DATA>
+    <CHECKSUM>1504643968</CHECKSUM>
+  </RECORD>
   <RECORD>
     <OPCODE>21</OPCODE>
     <DATA>
+      <TRANSACTION_ID>2</TRANSACTION_ID>
       <KEY_ID>1</KEY_ID>
-      <KEY_EXPIRY_DATE>1287183164658</KEY_EXPIRY_DATE>
+      <KEY_EXPIRY_DATE>1304751257518</KEY_EXPIRY_DATE>
       <KEY_LENGTH>3</KEY_LENGTH>
-      <KEY_BLOB>drEs</KEY_BLOB>
+      <KEY_BLOB>2FhO</KEY_BLOB>
     </DATA>
+    <CHECKSUM>-174778556</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>21</OPCODE>
     <DATA>
+      <TRANSACTION_ID>3</TRANSACTION_ID>
       <KEY_ID>2</KEY_ID>
-      <KEY_EXPIRY_DATE>1287183164703</KEY_EXPIRY_DATE>
+      <KEY_EXPIRY_DATE>1304751257521</KEY_EXPIRY_DATE>
       <KEY_LENGTH>3</KEY_LENGTH>
-      <KEY_BLOB>1cGc</KEY_BLOB>
+      <KEY_BLOB>77-r</KEY_BLOB>
     </DATA>
+    <CHECKSUM>1565957291</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>10</OPCODE>
     <DATA>
+      <TRANSACTION_ID>4</TRANSACTION_ID>
       <GENERATION_STAMP>1001</GENERATION_STAMP>
     </DATA>
+    <CHECKSUM>1423210231</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>0</OPCODE>
     <DATA>
+      <TRANSACTION_ID>5</TRANSACTION_ID>
       <LENGTH>5</LENGTH>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1286491964741</MTIME>
-      <ATIME>1286491964741</ATIME>
+      <MTIME>1304060057562</MTIME>
+      <ATIME>1304060057562</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <NUMBLOCKS>0</NUMBLOCKS>
       <PERMISSION_STATUS>
-        <USERNAME>steffl</USERNAME>
+        <USERNAME>todd</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <FS_PERMISSIONS>420</FS_PERMISSIONS>
       </PERMISSION_STATUS>
-      <CLIENT_NAME>DFSClient_471171074</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-66857152_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
     </DATA>
+    <CHECKSUM>-1854451489</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>9</OPCODE>
     <DATA>
+      <TRANSACTION_ID>6</TRANSACTION_ID>
       <LENGTH>5</LENGTH>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1286491964758</MTIME>
-      <ATIME>1286491964741</ATIME>
+      <MTIME>1304060057572</MTIME>
+      <ATIME>1304060057562</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <NUMBLOCKS>0</NUMBLOCKS>
       <PERMISSION_STATUS>
-        <USERNAME>steffl</USERNAME>
+        <USERNAME>todd</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <FS_PERMISSIONS>420</FS_PERMISSIONS>
       </PERMISSION_STATUS>
     </DATA>
+    <CHECKSUM>617592855</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>1</OPCODE>
     <DATA>
+      <TRANSACTION_ID>7</TRANSACTION_ID>
       <LENGTH>3</LENGTH>
       <SOURCE>/file_create</SOURCE>
       <DESTINATION>/file_moved</DESTINATION>
-      <TIMESTAMP>1286491964766</TIMESTAMP>
+      <TIMESTAMP>1304060057575</TIMESTAMP>
     </DATA>
+    <CHECKSUM>367100554</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>2</OPCODE>
     <DATA>
+      <TRANSACTION_ID>8</TRANSACTION_ID>
       <LENGTH>2</LENGTH>
       <PATH>/file_moved</PATH>
-      <TIMESTAMP>1286491964775</TIMESTAMP>
+      <TIMESTAMP>1304060057577</TIMESTAMP>
     </DATA>
+    <CHECKSUM>1048346698</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>3</OPCODE>
     <DATA>
+      <TRANSACTION_ID>9</TRANSACTION_ID>
       <LENGTH>3</LENGTH>
       <PATH>/directory_mkdir</PATH>
-      <TIMESTAMP>1286491964783</TIMESTAMP>
+      <TIMESTAMP>1304060057581</TIMESTAMP>
       <ATIME>0</ATIME>
       <PERMISSION_STATUS>
-        <USERNAME>steffl</USERNAME>
+        <USERNAME>todd</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <FS_PERMISSIONS>493</FS_PERMISSIONS>
       </PERMISSION_STATUS>
     </DATA>
+    <CHECKSUM>1207240248</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>10</OPCODE>
     <DATA>
+      <TRANSACTION_ID>10</TRANSACTION_ID>
       <GENERATION_STAMP>1002</GENERATION_STAMP>
     </DATA>
+    <CHECKSUM>85982431</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>0</OPCODE>
     <DATA>
+      <TRANSACTION_ID>11</TRANSACTION_ID>
       <LENGTH>5</LENGTH>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1286491964796</MTIME>
-      <ATIME>1286491964796</ATIME>
+      <MTIME>1304060057584</MTIME>
+      <ATIME>1304060057584</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <NUMBLOCKS>0</NUMBLOCKS>
       <PERMISSION_STATUS>
-        <USERNAME>steffl</USERNAME>
+        <USERNAME>todd</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <FS_PERMISSIONS>420</FS_PERMISSIONS>
       </PERMISSION_STATUS>
-      <CLIENT_NAME>DFSClient_471171074</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-66857152_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
     </DATA>
+    <CHECKSUM>1796314473</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>9</OPCODE>
     <DATA>
+      <TRANSACTION_ID>12</TRANSACTION_ID>
       <LENGTH>5</LENGTH>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1286491964814</MTIME>
-      <ATIME>1286491964796</ATIME>
+      <MTIME>1304060057588</MTIME>
+      <ATIME>1304060057584</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <NUMBLOCKS>0</NUMBLOCKS>
       <PERMISSION_STATUS>
-        <USERNAME>steffl</USERNAME>
+        <USERNAME>todd</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <FS_PERMISSIONS>420</FS_PERMISSIONS>
       </PERMISSION_STATUS>
     </DATA>
+    <CHECKSUM>1017626905</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>4</OPCODE>
     <DATA>
+      <TRANSACTION_ID>13</TRANSACTION_ID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
     </DATA>
+    <CHECKSUM>1842610087</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>7</OPCODE>
     <DATA>
+      <TRANSACTION_ID>14</TRANSACTION_ID>
       <PATH>/file_create</PATH>
       <FS_PERMISSIONS>511</FS_PERMISSIONS>
     </DATA>
+    <CHECKSUM>605568911</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>8</OPCODE>
     <DATA>
+      <TRANSACTION_ID>15</TRANSACTION_ID>
       <PATH>/file_create</PATH>
       <USERNAME>newOwner</USERNAME>
       <GROUPNAME/>
     </DATA>
+    <CHECKSUM>-1411790340</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>13</OPCODE>
     <DATA>
+      <TRANSACTION_ID>16</TRANSACTION_ID>
       <LENGTH>3</LENGTH>
       <PATH>/file_create</PATH>
       <MTIME>1285195527000</MTIME>
       <ATIME>1285195527000</ATIME>
     </DATA>
+    <CHECKSUM>1428793678</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>14</OPCODE>
     <DATA>
+      <TRANSACTION_ID>17</TRANSACTION_ID>
       <PATH>/directory_mkdir</PATH>
       <NS_QUOTA>1000</NS_QUOTA>
       <DS_QUOTA>-1</DS_QUOTA>
     </DATA>
+    <CHECKSUM>-1476130374</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>15</OPCODE>
     <DATA>
+      <TRANSACTION_ID>18</TRANSACTION_ID>
       <LENGTH>3</LENGTH>
       <SOURCE>/file_create</SOURCE>
       <DESTINATION>/file_moved</DESTINATION>
-      <TIMESTAMP>1286491964858</TIMESTAMP>
+      <TIMESTAMP>1304060057605</TIMESTAMP>
       <RENAME_OPTIONS>AA</RENAME_OPTIONS>
     </DATA>
+    <CHECKSUM>-1155144192</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>10</OPCODE>
     <DATA>
+      <TRANSACTION_ID>19</TRANSACTION_ID>
       <GENERATION_STAMP>1003</GENERATION_STAMP>
     </DATA>
+    <CHECKSUM>1920677987</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>0</OPCODE>
     <DATA>
+      <TRANSACTION_ID>20</TRANSACTION_ID>
       <LENGTH>5</LENGTH>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1286491964873</MTIME>
-      <ATIME>1286491964873</ATIME>
+      <MTIME>1304060057613</MTIME>
+      <ATIME>1304060057613</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <NUMBLOCKS>0</NUMBLOCKS>
       <PERMISSION_STATUS>
-        <USERNAME>steffl</USERNAME>
+        <USERNAME>todd</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <FS_PERMISSIONS>420</FS_PERMISSIONS>
       </PERMISSION_STATUS>
-      <CLIENT_NAME>DFSClient_471171074</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-66857152_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
     </DATA>
+    <CHECKSUM>-428545606</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>9</OPCODE>
     <DATA>
+      <TRANSACTION_ID>21</TRANSACTION_ID>
       <LENGTH>5</LENGTH>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1286491965024</MTIME>
-      <ATIME>1286491964873</ATIME>
+      <MTIME>1304060057694</MTIME>
+      <ATIME>1304060057613</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <NUMBLOCKS>3</NUMBLOCKS>
       <BLOCK>
-        <BLOCK_ID>1096087107607101866</BLOCK_ID>
+        <BLOCK_ID>3459038074990663911</BLOCK_ID>
         <BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
         <BLOCK_GENERATION_STAMP>1003</BLOCK_GENERATION_STAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>8798023959648425597</BLOCK_ID>
+        <BLOCK_ID>-5555244278278879146</BLOCK_ID>
         <BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
         <BLOCK_GENERATION_STAMP>1003</BLOCK_GENERATION_STAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>4060815343079109399</BLOCK_ID>
+        <BLOCK_ID>-6344128791846831740</BLOCK_ID>
         <BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
         <BLOCK_GENERATION_STAMP>1003</BLOCK_GENERATION_STAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>steffl</USERNAME>
+        <USERNAME>todd</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <FS_PERMISSIONS>420</FS_PERMISSIONS>
       </PERMISSION_STATUS>
     </DATA>
+    <CHECKSUM>707995174</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>10</OPCODE>
     <DATA>
+      <TRANSACTION_ID>22</TRANSACTION_ID>
       <GENERATION_STAMP>1004</GENERATION_STAMP>
     </DATA>
+    <CHECKSUM>-1500977009</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>0</OPCODE>
     <DATA>
+      <TRANSACTION_ID>23</TRANSACTION_ID>
       <LENGTH>5</LENGTH>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1286491965035</MTIME>
-      <ATIME>1286491965035</ATIME>
+      <MTIME>1304060057701</MTIME>
+      <ATIME>1304060057701</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <NUMBLOCKS>0</NUMBLOCKS>
       <PERMISSION_STATUS>
-        <USERNAME>steffl</USERNAME>
+        <USERNAME>todd</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <FS_PERMISSIONS>420</FS_PERMISSIONS>
       </PERMISSION_STATUS>
-      <CLIENT_NAME>DFSClient_471171074</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-66857152_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
     </DATA>
+    <CHECKSUM>-119850856</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>9</OPCODE>
     <DATA>
+      <TRANSACTION_ID>24</TRANSACTION_ID>
       <LENGTH>5</LENGTH>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1286491965093</MTIME>
-      <ATIME>1286491965035</ATIME>
+      <MTIME>1304060057737</MTIME>
+      <ATIME>1304060057701</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <NUMBLOCKS>3</NUMBLOCKS>
       <BLOCK>
-        <BLOCK_ID>85340326229460895</BLOCK_ID>
+        <BLOCK_ID>4671949296381030428</BLOCK_ID>
         <BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
         <BLOCK_GENERATION_STAMP>1004</BLOCK_GENERATION_STAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>4456960998526419279</BLOCK_ID>
+        <BLOCK_ID>-844362243522407159</BLOCK_ID>
         <BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
         <BLOCK_GENERATION_STAMP>1004</BLOCK_GENERATION_STAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>-6161739531018161735</BLOCK_ID>
+        <BLOCK_ID>3476886462779656950</BLOCK_ID>
         <BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
         <BLOCK_GENERATION_STAMP>1004</BLOCK_GENERATION_STAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>steffl</USERNAME>
+        <USERNAME>todd</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <FS_PERMISSIONS>420</FS_PERMISSIONS>
       </PERMISSION_STATUS>
     </DATA>
+    <CHECKSUM>-766805874</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>10</OPCODE>
     <DATA>
+      <TRANSACTION_ID>25</TRANSACTION_ID>
       <GENERATION_STAMP>1005</GENERATION_STAMP>
     </DATA>
+    <CHECKSUM>238426056</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>0</OPCODE>
     <DATA>
+      <TRANSACTION_ID>26</TRANSACTION_ID>
       <LENGTH>5</LENGTH>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1286491965105</MTIME>
-      <ATIME>1286491965105</ATIME>
+      <MTIME>1304060057742</MTIME>
+      <ATIME>1304060057742</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <NUMBLOCKS>0</NUMBLOCKS>
       <PERMISSION_STATUS>
-        <USERNAME>steffl</USERNAME>
+        <USERNAME>todd</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <FS_PERMISSIONS>420</FS_PERMISSIONS>
       </PERMISSION_STATUS>
-      <CLIENT_NAME>DFSClient_471171074</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-66857152_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
     </DATA>
+    <CHECKSUM>1156254705</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>9</OPCODE>
     <DATA>
+      <TRANSACTION_ID>27</TRANSACTION_ID>
       <LENGTH>5</LENGTH>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1286491965148</MTIME>
-      <ATIME>1286491965105</ATIME>
+      <MTIME>1304060057764</MTIME>
+      <ATIME>1304060057742</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <NUMBLOCKS>3</NUMBLOCKS>
       <BLOCK>
-        <BLOCK_ID>-3894328423940677915</BLOCK_ID>
+        <BLOCK_ID>-754893470864399741</BLOCK_ID>
         <BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
         <BLOCK_GENERATION_STAMP>1005</BLOCK_GENERATION_STAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>-2833847567910728858</BLOCK_ID>
+        <BLOCK_ID>1820875380010181049</BLOCK_ID>
         <BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
         <BLOCK_GENERATION_STAMP>1005</BLOCK_GENERATION_STAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>-3654781106237722465</BLOCK_ID>
+        <BLOCK_ID>8266387560744259971</BLOCK_ID>
         <BLOCK_NUM_BYTES>512</BLOCK_NUM_BYTES>
         <BLOCK_GENERATION_STAMP>1005</BLOCK_GENERATION_STAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>steffl</USERNAME>
+        <USERNAME>todd</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <FS_PERMISSIONS>420</FS_PERMISSIONS>
       </PERMISSION_STATUS>
     </DATA>
+    <CHECKSUM>-654780301</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>16</OPCODE>
     <DATA>
+      <TRANSACTION_ID>28</TRANSACTION_ID>
       <LENGTH>4</LENGTH>
       <CONCAT_TARGET>/file_concat_target</CONCAT_TARGET>
       <CONCAT_SOURCE>/file_concat_0</CONCAT_SOURCE>
       <CONCAT_SOURCE>/file_concat_1</CONCAT_SOURCE>
-      <TIMESTAMP>1286491965157</TIMESTAMP>
+      <TIMESTAMP>1304060057767</TIMESTAMP>
     </DATA>
+    <CHECKSUM>1273279541</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>17</OPCODE>
     <DATA>
+      <TRANSACTION_ID>29</TRANSACTION_ID>
       <LENGTH>4</LENGTH>
       <SOURCE>/file_symlink</SOURCE>
       <DESTINATION>/file_concat_target</DESTINATION>
-      <MTIME>1286491965168</MTIME>
-      <ATIME>1286491965168</ATIME>
+      <MTIME>1304060057770</MTIME>
+      <ATIME>1304060057770</ATIME>
       <PERMISSION_STATUS>
-        <USERNAME>steffl</USERNAME>
+        <USERNAME>todd</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <FS_PERMISSIONS>511</FS_PERMISSIONS>
       </PERMISSION_STATUS>
     </DATA>
+    <CHECKSUM>1385678569</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>18</OPCODE>
     <DATA>
+      <TRANSACTION_ID>30</TRANSACTION_ID>
       <T_VERSION>0</T_VERSION>
-      <T_OWNER>steffl</T_OWNER>
+      <T_OWNER>todd</T_OWNER>
       <T_RENEWER>JobTracker</T_RENEWER>
       <T_REAL_USER/>
-      <T_ISSUE_DATE>1286491965176</T_ISSUE_DATE>
-      <T_MAX_DATE>1287096765176</T_MAX_DATE>
+      <T_ISSUE_DATE>1304060057773</T_ISSUE_DATE>
+      <T_MAX_DATE>1304664857773</T_MAX_DATE>
       <T_SEQUENCE_NUMBER>1</T_SEQUENCE_NUMBER>
       <T_MASTER_KEY_ID>2</T_MASTER_KEY_ID>
-      <T_EXPIRY_TIME>1286578365176</T_EXPIRY_TIME>
+      <T_EXPIRY_TIME>1304146457773</T_EXPIRY_TIME>
     </DATA>
+    <CHECKSUM>913145699</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>19</OPCODE>
     <DATA>
+      <TRANSACTION_ID>31</TRANSACTION_ID>
       <T_VERSION>0</T_VERSION>
-      <T_OWNER>steffl</T_OWNER>
+      <T_OWNER>todd</T_OWNER>
       <T_RENEWER>JobTracker</T_RENEWER>
       <T_REAL_USER/>
-      <T_ISSUE_DATE>1286491965176</T_ISSUE_DATE>
-      <T_MAX_DATE>1287096765176</T_MAX_DATE>
+      <T_ISSUE_DATE>1304060057773</T_ISSUE_DATE>
+      <T_MAX_DATE>1304664857773</T_MAX_DATE>
       <T_SEQUENCE_NUMBER>1</T_SEQUENCE_NUMBER>
       <T_MASTER_KEY_ID>2</T_MASTER_KEY_ID>
-      <T_EXPIRY_TIME>1286578365198</T_EXPIRY_TIME>
+      <T_EXPIRY_TIME>1304146457785</T_EXPIRY_TIME>
     </DATA>
+    <CHECKSUM>-1772039941</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>20</OPCODE>
     <DATA>
+      <TRANSACTION_ID>32</TRANSACTION_ID>
       <T_VERSION>0</T_VERSION>
-      <T_OWNER>steffl</T_OWNER>
+      <T_OWNER>todd</T_OWNER>
       <T_RENEWER>JobTracker</T_RENEWER>
       <T_REAL_USER/>
-      <T_ISSUE_DATE>1286491965176</T_ISSUE_DATE>
-      <T_MAX_DATE>1287096765176</T_MAX_DATE>
+      <T_ISSUE_DATE>1304060057773</T_ISSUE_DATE>
+      <T_MAX_DATE>1304664857773</T_MAX_DATE>
       <T_SEQUENCE_NUMBER>1</T_SEQUENCE_NUMBER>
       <T_MASTER_KEY_ID>2</T_MASTER_KEY_ID>
     </DATA>
+    <CHECKSUM>1382094146</CHECKSUM>
+  </RECORD>
+  <RECORD>
+    <OPCODE>22</OPCODE>
+    <DATA>
+      <TRANSACTION_ID>33</TRANSACTION_ID>
+    </DATA>
+    <CHECKSUM>1975140107</CHECKSUM>
   </RECORD>
   <RECORD>
     <OPCODE>-1</OPCODE>



Mime
View raw message