hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1333291 [3/3] - in /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project: dev-support/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs-httpfs...
Date Thu, 03 May 2012 02:14:26 GMT
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
Thu May  3 02:14:01 2012
@@ -42,7 +42,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -63,6 +62,7 @@ import org.apache.hadoop.test.GenericTes
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Level;
+import org.mockito.ArgumentMatcher;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -93,11 +93,15 @@ public class TestCheckpoint extends Test
   static final int fileSize = 8192;
   static final int numDatanodes = 3;
   short replication = 3;
+
+  private CheckpointFaultInjector faultInjector;
     
   @Override
   public void setUp() throws IOException {
     FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
-    ErrorSimulator.initializeErrorSimulationEvent(5);
+    
+    faultInjector = Mockito.mock(CheckpointFaultInjector.class);
+    CheckpointFaultInjector.instance = faultInjector;
   }
 
   static void writeFile(FileSystem fileSys, Path name, int repl)
@@ -222,14 +226,18 @@ public class TestCheckpoint extends Test
       // Make the checkpoint fail after rolling the edits log.
       //
       SecondaryNameNode secondary = startSecondaryNameNode(conf);
-      ErrorSimulator.setErrorSimulation(0);
+      
+      Mockito.doThrow(new IOException(
+          "Injecting failure after rolling edit logs"))
+          .when(faultInjector).afterSecondaryCallsRollEditLog();
 
       try {
         secondary.doCheckpoint();  // this should fail
         assertTrue(false);
       } catch (IOException e) {
       }
-      ErrorSimulator.clearErrorSimulation(0);
+      
+      Mockito.reset(faultInjector);
       secondary.shutdown();
 
       //
@@ -282,14 +290,17 @@ public class TestCheckpoint extends Test
       // Make the checkpoint fail after uploading the new fsimage.
       //
       SecondaryNameNode secondary = startSecondaryNameNode(conf);
-      ErrorSimulator.setErrorSimulation(1);
+      
+      Mockito.doThrow(new IOException(
+          "Injecting failure after uploading new image"))
+          .when(faultInjector).afterSecondaryUploadsNewImage();
 
       try {
         secondary.doCheckpoint();  // this should fail
         assertTrue(false);
       } catch (IOException e) {
       }
-      ErrorSimulator.clearErrorSimulation(1);
+      Mockito.reset(faultInjector);
       secondary.shutdown();
 
       //
@@ -341,14 +352,17 @@ public class TestCheckpoint extends Test
       // Make the checkpoint fail after rolling the edit log.
       //
       SecondaryNameNode secondary = startSecondaryNameNode(conf);
-      ErrorSimulator.setErrorSimulation(0);
+
+      Mockito.doThrow(new IOException(
+          "Injecting failure after rolling edit logs"))
+          .when(faultInjector).afterSecondaryCallsRollEditLog();
 
       try {
         secondary.doCheckpoint();  // this should fail
         assertTrue(false);
       } catch (IOException e) {
       }
-      ErrorSimulator.clearErrorSimulation(0);
+      Mockito.reset(faultInjector);
       secondary.shutdown(); // secondary namenode crash!
 
       // start new instance of secondary and verify that 
@@ -395,6 +409,28 @@ public class TestCheckpoint extends Test
    * Used to truncate primary fsimage file.
    */
   public void testSecondaryFailsToReturnImage() throws IOException {
+    Mockito.doThrow(new IOException("If this exception is not caught by the " +
+        "name-node, fs image will be truncated."))
+        .when(faultInjector).aboutToSendFile(filePathContaining("secondary"));
+
+    doSecondaryFailsToReturnImage();
+  }
+  
+  /**
+   * Similar to above test, but uses an unchecked Error, and causes it
+   * before even setting the length header. This used to cause image
+   * truncation. Regression test for HDFS-3330.
+   */
+  public void testSecondaryFailsWithErrorBeforeSettingHeaders()
+      throws IOException {
+    Mockito.doThrow(new Error("If this exception is not caught by the " +
+        "name-node, fs image will be truncated."))
+        .when(faultInjector).beforeGetImageSetsHeaders();
+
+    doSecondaryFailsToReturnImage();
+  }
+
+  private void doSecondaryFailsToReturnImage() throws IOException {
     LOG.info("Starting testSecondaryFailsToReturnImage");
     Configuration conf = new HdfsConfiguration();
     Path file1 = new Path("checkpointRI.dat");
@@ -414,7 +450,6 @@ public class TestCheckpoint extends Test
       // Make the checkpoint
       //
       SecondaryNameNode secondary = startSecondaryNameNode(conf);
-      ErrorSimulator.setErrorSimulation(2);
 
       try {
         secondary.doCheckpoint();  // this should fail
@@ -424,7 +459,7 @@ public class TestCheckpoint extends Test
         GenericTestUtils.assertExceptionContains(
             "If this exception is not caught", e);
       }
-      ErrorSimulator.clearErrorSimulation(2);
+      Mockito.reset(faultInjector);
 
       // Verify that image file sizes did not change.
       for (StorageDirectory sd2 :
@@ -442,6 +477,17 @@ public class TestCheckpoint extends Test
     }
   }
 
+  private File filePathContaining(final String substring) {
+    return Mockito.<File>argThat(
+        new ArgumentMatcher<File>() {
+          @Override
+          public boolean matches(Object argument) {
+            String path = ((File)argument).getAbsolutePath();
+            return path.contains(substring);
+          }
+        });
+  }
+
   /**
    * Simulate 2NN failing to send the whole file (error type 3)
    * The length header in the HTTP transfer should prevent
@@ -450,7 +496,10 @@ public class TestCheckpoint extends Test
   public void testNameNodeImageSendFailWrongSize()
       throws IOException {
     LOG.info("Starting testNameNodeImageSendFailWrongSize");
-    doSendFailTest(3, "is not of the advertised size");
+    
+    Mockito.doReturn(true).when(faultInjector)
+      .shouldSendShortFile(filePathContaining("fsimage"));
+    doSendFailTest("is not of the advertised size");
   }
 
   /**
@@ -461,19 +510,21 @@ public class TestCheckpoint extends Test
   public void testNameNodeImageSendFailWrongDigest()
       throws IOException {
     LOG.info("Starting testNameNodeImageSendFailWrongDigest");
-    doSendFailTest(4, "does not match advertised digest");
+
+    Mockito.doReturn(true).when(faultInjector)
+        .shouldCorruptAByte(Mockito.any(File.class));
+    doSendFailTest("does not match advertised digest");
   }
 
   /**
    * Run a test where the 2NN runs into some kind of error when
    * sending the checkpoint back to the NN.
-   * @param errorType the ErrorSimulator type to trigger
    * @param exceptionSubstring an expected substring of the triggered exception
    */
-  private void doSendFailTest(int errorType, String exceptionSubstring)
+  private void doSendFailTest(String exceptionSubstring)
       throws IOException {
     Configuration conf = new HdfsConfiguration();
-    Path file1 = new Path("checkpoint-doSendFailTest-" + errorType + ".dat");
+    Path file1 = new Path("checkpoint-doSendFailTest-" + getName() + ".dat");
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                                .numDataNodes(numDatanodes)
                                                .build();
@@ -485,7 +536,6 @@ public class TestCheckpoint extends Test
       // Make the checkpoint fail after rolling the edit log.
       //
       SecondaryNameNode secondary = startSecondaryNameNode(conf);
-      ErrorSimulator.setErrorSimulation(errorType);
 
       try {
         secondary.doCheckpoint();  // this should fail
@@ -494,7 +544,7 @@ public class TestCheckpoint extends Test
         // We only sent part of the image. Have to trigger this exception
         GenericTestUtils.assertExceptionContains(exceptionSubstring, e);
       }
-      ErrorSimulator.clearErrorSimulation(errorType);
+      Mockito.reset(faultInjector);
       secondary.shutdown(); // secondary namenode crash!
 
       // start new instance of secondary and verify that 
@@ -1017,7 +1067,9 @@ public class TestCheckpoint extends Test
   
       secondary = startSecondaryNameNode(conf);
 
-      ErrorSimulator.setErrorSimulation(1);
+      Mockito.doThrow(new IOException(
+          "Injecting failure after rolling edit logs"))
+          .when(faultInjector).afterSecondaryCallsRollEditLog();
       
       // Fail to checkpoint once
       try {
@@ -1025,7 +1077,7 @@ public class TestCheckpoint extends Test
         fail("Should have failed upload");
       } catch (IOException ioe) {
         LOG.info("Got expected failure", ioe);
-        assertTrue(ioe.toString().contains("Simulating error1"));
+        assertTrue(ioe.toString().contains("Injecting failure"));
       }
 
       // Fail to checkpoint again
@@ -1034,9 +1086,9 @@ public class TestCheckpoint extends Test
         fail("Should have failed upload");
       } catch (IOException ioe) {
         LOG.info("Got expected failure", ioe);
-        assertTrue(ioe.toString().contains("Simulating error1"));
+        assertTrue(ioe.toString().contains("Injecting failure"));
       } finally {
-        ErrorSimulator.clearErrorSimulation(1);
+        Mockito.reset(faultInjector);
       }
 
       // Now with the cleared error simulation, it should succeed

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
Thu May  3 02:14:01 2012
@@ -228,6 +228,7 @@ public class TestFsck extends TestCase {
   }
 
   public void testFsckMoveAndDelete() throws Exception {
+    final int MAX_MOVE_TRIES = 5;
     DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3, 8*1024);
     MiniDFSCluster cluster = null;
     FileSystem fs = null;
@@ -269,17 +270,19 @@ public class TestFsck extends TestCase {
       } 
       
       // After a fsck -move, the corrupted file should still exist.
-      outStr = runFsck(conf, 1, true, "/", "-move" );
-      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
-      String[] newFileNames = util.getFileNames(topDir);
-      boolean found = false;
-      for (String f : newFileNames) {
-        if (f.equals(corruptFileName)) {
-          found = true;
-          break;
+      for (int i = 0; i < MAX_MOVE_TRIES; i++) {
+        outStr = runFsck(conf, 1, true, "/", "-move" );
+        assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+        String[] newFileNames = util.getFileNames(topDir);
+        boolean found = false;
+        for (String f : newFileNames) {
+          if (f.equals(corruptFileName)) {
+            found = true;
+            break;
+          }
         }
+        assertTrue(found);
       }
-      assertTrue(found);
 
       // Fix the filesystem by moving corrupted files to lost+found
       outStr = runFsck(conf, 1, true, "/", "-move", "-delete");

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
Thu May  3 02:14:01 2012
@@ -167,6 +167,15 @@ public abstract class HATestUtil {
       Configuration conf, String logicalName, int nsIndex) {
     InetSocketAddress nnAddr1 = cluster.getNameNode(2 * nsIndex).getNameNodeAddress();
     InetSocketAddress nnAddr2 = cluster.getNameNode(2 * nsIndex + 1).getNameNodeAddress();
+    setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
+  }
+
+  /**
+   * Sets the required configurations for performing failover
+   */
+  public static void setFailoverConfigurations(Configuration conf,
+      String logicalName, InetSocketAddress nnAddr1,
+      InetSocketAddress nnAddr2) {
     String nameNodeId1 = "nn1";
     String nameNodeId2 = "nn2";
     String address1 = "hdfs://" + nnAddr1.getHostName() + ":" + nnAddr1.getPort();

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
Thu May  3 02:14:01 2012
@@ -98,38 +98,63 @@ public class TestWebHdfsUrl {
   }
   
   @Test
-  public void testSelectDelegationToken() throws Exception {
+  public void testSelectHdfsDelegationToken() throws Exception {
     SecurityUtilTestHelper.setTokenServiceUseIp(true);
 
     Configuration conf = new Configuration();
-    URI webHdfsUri = URI.create("webhdfs://localhost:0");
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    Token<?> token = null;
+    conf.setClass("fs.webhdfs.impl", MyWebHdfsFileSystem.class, FileSystem.class);
     
+    // test with implicit default port 
+    URI fsUri = URI.create("webhdfs://localhost");
+    MyWebHdfsFileSystem fs = (MyWebHdfsFileSystem) FileSystem.get(fsUri, conf);
+    checkTokenSelection(fs, conf);
+
+    // test with explicit default port
+    fsUri = URI.create("webhdfs://localhost:"+fs.getDefaultPort());
+    fs = (MyWebHdfsFileSystem) FileSystem.get(fsUri, conf);
+    checkTokenSelection(fs, conf);
+    
+    // test with non-default port
+    fsUri = URI.create("webhdfs://localhost:"+(fs.getDefaultPort()-1));
+    fs = (MyWebHdfsFileSystem) FileSystem.get(fsUri, conf);
+    checkTokenSelection(fs, conf);
+
+  }
+  
+  private void checkTokenSelection(MyWebHdfsFileSystem fs,
+                                   Configuration conf) throws IOException {
+    int port = fs.getCanonicalUri().getPort();
+    // can't clear tokens from ugi, so create a new user everytime
+    UserGroupInformation ugi =
+        UserGroupInformation.createUserForTesting(fs.getUri().getAuthority(), new String[]{});
+
+    // use ip-based tokens
+    SecurityUtilTestHelper.setTokenServiceUseIp(true);
+
     // test fallback to hdfs token
     Token<?> hdfsToken = new Token<TokenIdentifier>(
         new byte[0], new byte[0],
         DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
         new Text("127.0.0.1:8020"));
     ugi.addToken(hdfsToken);
-    
-    WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(webHdfsUri, conf);
-    token = fs.selectDelegationToken();
+
+    // test fallback to hdfs token
+    Token<?> token = fs.selectDelegationToken(ugi);
     assertNotNull(token);
     assertEquals(hdfsToken, token);
-    
+
     // test webhdfs is favored over hdfs
     Token<?> webHdfsToken = new Token<TokenIdentifier>(
         new byte[0], new byte[0],
-        WebHdfsFileSystem.TOKEN_KIND, new Text("127.0.0.1:0"));
+        WebHdfsFileSystem.TOKEN_KIND, new Text("127.0.0.1:"+port));
     ugi.addToken(webHdfsToken);
-    token = fs.selectDelegationToken();
+    token = fs.selectDelegationToken(ugi);
     assertNotNull(token);
     assertEquals(webHdfsToken, token);
     
     // switch to using host-based tokens, no token should match
     SecurityUtilTestHelper.setTokenServiceUseIp(false);
-    token = fs.selectDelegationToken();
+    token = fs.selectDelegationToken(ugi);
     assertNull(token);
     
     // test fallback to hdfs token
@@ -138,18 +163,31 @@ public class TestWebHdfsUrl {
         DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
         new Text("localhost:8020"));
     ugi.addToken(hdfsToken);
-    token = fs.selectDelegationToken();
+    token = fs.selectDelegationToken(ugi);
     assertNotNull(token);
     assertEquals(hdfsToken, token);
 
     // test webhdfs is favored over hdfs
     webHdfsToken = new Token<TokenIdentifier>(
         new byte[0], new byte[0],
-        WebHdfsFileSystem.TOKEN_KIND, new Text("localhost:0"));
+        WebHdfsFileSystem.TOKEN_KIND, new Text("localhost:"+port));
     ugi.addToken(webHdfsToken);
-    token = fs.selectDelegationToken();
+    token = fs.selectDelegationToken(ugi);
     assertNotNull(token);
     assertEquals(webHdfsToken, token);
   }
-
-}
+  
+  static class MyWebHdfsFileSystem extends WebHdfsFileSystem {
+    @Override
+    public URI getCanonicalUri() {
+      return super.getCanonicalUri();
+    }
+    @Override
+    public int getDefaultPort() {
+      return super.getDefaultPort();
+    }
+    // don't automatically get a token
+    @Override
+    protected void initDelegationToken() throws IOException {}
+  }
+}
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
Thu May  3 02:14:01 2012
@@ -123,6 +123,17 @@ public class TestNetworkTopology extends
     assertTrue(testNodes[0] == dataNodes[1]);
     assertTrue(testNodes[1] == dataNodes[3]);
     assertTrue(testNodes[2] == dataNodes[5]);
+    
+    // array contains local rack node which happens to be in position 0
+    testNodes[0] = dataNodes[1];
+    testNodes[1] = dataNodes[5];
+    testNodes[2] = dataNodes[3];
+    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    // peudoSortByDistance does not take the "data center" layer into consideration 
+    // and it doesn't sort by getDistance, so 1, 5, 3 is also valid here
+    assertTrue(testNodes[0] == dataNodes[1]);
+    assertTrue(testNodes[1] == dataNodes[5]);
+    assertTrue(testNodes[2] == dataNodes[3]);
   }
   
   public void testRemove() throws Exception {

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml?rev=1333291&r1=1333290&r2=1333291&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
Thu May  3 02:14:01 2012
@@ -247,7 +247,7 @@
       <BLOCK>
         <BLOCK_ID>-7144805496741076283</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1004</GENERATION_STAMP>
+        <GENSTAMP>1004</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -266,12 +266,12 @@
       <BLOCK>
         <BLOCK_ID>-7144805496741076283</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1004</GENERATION_STAMP>
+        <GENSTAMP>1004</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-4125931756867080767</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1005</GENERATION_STAMP>
+        <GENSTAMP>1005</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -290,17 +290,17 @@
       <BLOCK>
         <BLOCK_ID>-7144805496741076283</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1004</GENERATION_STAMP>
+        <GENSTAMP>1004</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-4125931756867080767</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1005</GENERATION_STAMP>
+        <GENSTAMP>1005</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>1562413691487277050</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1006</GENERATION_STAMP>
+        <GENSTAMP>1006</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -319,17 +319,17 @@
       <BLOCK>
         <BLOCK_ID>-7144805496741076283</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1004</GENERATION_STAMP>
+        <GENSTAMP>1004</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-4125931756867080767</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1005</GENERATION_STAMP>
+        <GENSTAMP>1005</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>1562413691487277050</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1006</GENERATION_STAMP>
+        <GENSTAMP>1006</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
         <USERNAME>todd</USERNAME>
@@ -379,7 +379,7 @@
       <BLOCK>
         <BLOCK_ID>6084289468290363112</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1008</GENERATION_STAMP>
+        <GENSTAMP>1008</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -398,12 +398,12 @@
       <BLOCK>
         <BLOCK_ID>6084289468290363112</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1008</GENERATION_STAMP>
+        <GENSTAMP>1008</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-4219431127125026105</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1009</GENERATION_STAMP>
+        <GENSTAMP>1009</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -422,17 +422,17 @@
       <BLOCK>
         <BLOCK_ID>6084289468290363112</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1008</GENERATION_STAMP>
+        <GENSTAMP>1008</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-4219431127125026105</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1009</GENERATION_STAMP>
+        <GENSTAMP>1009</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-1765119074945211374</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1010</GENERATION_STAMP>
+        <GENSTAMP>1010</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -451,17 +451,17 @@
       <BLOCK>
         <BLOCK_ID>6084289468290363112</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1008</GENERATION_STAMP>
+        <GENSTAMP>1008</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-4219431127125026105</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1009</GENERATION_STAMP>
+        <GENSTAMP>1009</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-1765119074945211374</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1010</GENERATION_STAMP>
+        <GENSTAMP>1010</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
         <USERNAME>todd</USERNAME>
@@ -511,7 +511,7 @@
       <BLOCK>
         <BLOCK_ID>-7448471719302683860</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1012</GENERATION_STAMP>
+        <GENSTAMP>1012</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -530,12 +530,12 @@
       <BLOCK>
         <BLOCK_ID>-7448471719302683860</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1012</GENERATION_STAMP>
+        <GENSTAMP>1012</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-8051065559769974521</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1013</GENERATION_STAMP>
+        <GENSTAMP>1013</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -554,17 +554,17 @@
       <BLOCK>
         <BLOCK_ID>-7448471719302683860</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1012</GENERATION_STAMP>
+        <GENSTAMP>1012</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-8051065559769974521</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1013</GENERATION_STAMP>
+        <GENSTAMP>1013</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>3808670437711973616</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1014</GENERATION_STAMP>
+        <GENSTAMP>1014</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -583,17 +583,17 @@
       <BLOCK>
         <BLOCK_ID>-7448471719302683860</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1012</GENERATION_STAMP>
+        <GENSTAMP>1012</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>-8051065559769974521</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1013</GENERATION_STAMP>
+        <GENSTAMP>1013</GENSTAMP>
       </BLOCK>
       <BLOCK>
         <BLOCK_ID>3808670437711973616</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENERATION_STAMP>1014</GENERATION_STAMP>
+        <GENSTAMP>1014</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
         <USERNAME>todd</USERNAME>
@@ -722,7 +722,7 @@
       <BLOCK>
         <BLOCK_ID>-357061736603024522</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1016</GENERATION_STAMP>
+        <GENSTAMP>1016</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -734,7 +734,7 @@
       <BLOCK>
         <BLOCK_ID>-357061736603024522</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENERATION_STAMP>1016</GENERATION_STAMP>
+        <GENSTAMP>1016</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -769,7 +769,7 @@
       <BLOCK>
         <BLOCK_ID>-357061736603024522</BLOCK_ID>
         <NUM_BYTES>11</NUM_BYTES>
-        <GENERATION_STAMP>1017</GENERATION_STAMP>
+        <GENSTAMP>1017</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
         <USERNAME>todd</USERNAME>
@@ -779,9 +779,17 @@
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_END_LOG_SEGMENT</OPCODE>
+    <OPCODE>OP_SET_OWNER</OPCODE>
     <DATA>
       <TXID>59</TXID>
+      <SRC>/file_create</SRC>
+      <GROUPNAME>newGroup</GROUPNAME>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_END_LOG_SEGMENT</OPCODE>
+    <DATA>
+      <TXID>60</TXID>
     </DATA>
   </RECORD>
 </EDITS>



Mime
View raw message