hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ji...@apache.org
Subject svn commit: r1579814 [2/2] - in /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: ./ src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/ src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/ src/m...
Date Thu, 20 Mar 2014 23:08:34 GMT
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java?rev=1579814&r1=1579813&r2=1579814&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java Thu Mar 20 23:08:32 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.qjournal.c
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto;
 import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
@@ -112,30 +113,39 @@ public class TestQuorumJournalManagerUni
 
   @Test
   public void testAllLoggersStartOk() throws Exception {
-    futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong());
-    futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong());
-    futureReturns(null).when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong());
-    qjm.startLogSegment(1);
+    futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong(),
+        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
+    futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong(),
+        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
+    futureReturns(null).when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong(),
+        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
+    qjm.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
   }
 
   @Test
   public void testQuorumOfLoggersStartOk() throws Exception {
-    futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong());
-    futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong());
+    futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong(),
+        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
+    futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong(),
+        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
     futureThrows(new IOException("logger failed"))
-      .when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong());
-    qjm.startLogSegment(1);
+      .when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong(),
+        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
+    qjm.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
   }
 
   @Test
   public void testQuorumOfLoggersFail() throws Exception {
-    futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong());
+    futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong(),
+        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
     futureThrows(new IOException("logger failed"))
-    .when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong());
+    .when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong(),
+        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
     futureThrows(new IOException("logger failed"))
-      .when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong());
+      .when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong(),
+        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
     try {
-      qjm.startLogSegment(1);
+      qjm.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       fail("Did not throw when quorum failed");
     } catch (QuorumException qe) {
       GenericTestUtils.assertExceptionContains("logger failed", qe);
@@ -144,10 +154,14 @@ public class TestQuorumJournalManagerUni
   
   @Test
   public void testQuorumOutputStreamReport() throws Exception {
-    futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong());
-    futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong());
-    futureReturns(null).when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong());
-    QuorumOutputStream os = (QuorumOutputStream) qjm.startLogSegment(1);
+    futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong(),
+        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
+    futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong(),
+        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
+    futureReturns(null).when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong(),
+        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
+    QuorumOutputStream os = (QuorumOutputStream) qjm.startLogSegment(1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     String report = os.generateReport();
     Assert.assertFalse("Report should be plain text", report.contains("<"));
   }
@@ -203,10 +217,14 @@ public class TestQuorumJournalManagerUni
   }
 
   private EditLogOutputStream createLogSegment() throws IOException {
-    futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong());
-    futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong());
-    futureReturns(null).when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong());
-    EditLogOutputStream stm = qjm.startLogSegment(1);
+    futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong(),
+        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
+    futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong(),
+        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
+    futureReturns(null).when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong(),
+        Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
+    EditLogOutputStream stm = qjm.startLogSegment(1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     return stm;
   }
 }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java?rev=1579814&r1=1579813&r2=1579814&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java Thu Mar 20 23:08:32 2014
@@ -17,7 +17,10 @@
  */
 package org.apache.hadoop.hdfs.qjournal.server;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.IOException;
@@ -26,18 +29,23 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.qjournal.QJMTestUtil;
-import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo;
 import org.apache.hadoop.hdfs.qjournal.protocol.JournalOutOfSyncException;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProtoOrBuilder;
-import org.apache.hadoop.hdfs.qjournal.server.Journal;
-import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
+import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo;
 import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.*;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Test;
 import org.mockito.Mockito;
 
 public class TestJournal {
@@ -77,7 +85,36 @@ public class TestJournal {
   public void cleanup() {
     IOUtils.closeStream(journal);
   }
-  
+
+  /**
+   * Test whether JNs can correctly handle editlog that cannot be decoded.
+   */
+  @Test
+  public void testScanEditLog() throws Exception {
+    // use a future layout version
+    journal.startLogSegment(makeRI(1), 1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1);
+
+    // in the segment we write garbage editlog, which can be scanned but
+    // cannot be decoded
+    final int numTxns = 5;
+    byte[] ops = QJMTestUtil.createGabageTxns(1, 5);
+    journal.journal(makeRI(2), 1, 1, numTxns, ops);
+
+    // verify the in-progress editlog segment
+    SegmentStateProto segmentState = journal.getSegmentInfo(1);
+    assertTrue(segmentState.getIsInProgress());
+    Assert.assertEquals(numTxns, segmentState.getEndTxId());
+    Assert.assertEquals(1, segmentState.getStartTxId());
+    
+    // finalize the segment and verify it again
+    journal.finalizeLogSegment(makeRI(3), 1, numTxns);
+    segmentState = journal.getSegmentInfo(1);
+    assertFalse(segmentState.getIsInProgress());
+    Assert.assertEquals(numTxns, segmentState.getEndTxId());
+    Assert.assertEquals(1, segmentState.getStartTxId());
+  }
+
   @Test (timeout = 10000)
   public void testEpochHandling() throws Exception {
     assertEquals(0, journal.getLastPromisedEpoch());
@@ -96,7 +133,8 @@ public class TestJournal {
           "Proposed epoch 3 <= last promise 3", ioe);
     }
     try {
-      journal.startLogSegment(makeRI(1), 12345L);
+      journal.startLogSegment(makeRI(1), 12345L,
+          NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       fail("Should have rejected call from prior epoch");
     } catch (IOException ioe) {
       GenericTestUtils.assertExceptionContains(
@@ -114,7 +152,8 @@ public class TestJournal {
   @Test (timeout = 10000)
   public void testMaintainCommittedTxId() throws Exception {
     journal.newEpoch(FAKE_NSINFO, 1);
-    journal.startLogSegment(makeRI(1), 1);
+    journal.startLogSegment(makeRI(1), 1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     // Send txids 1-3, with a request indicating only 0 committed
     journal.journal(new RequestInfo(JID, 1, 2, 0), 1, 1, 3,
         QJMTestUtil.createTxnData(1, 3));
@@ -129,7 +168,8 @@ public class TestJournal {
   @Test (timeout = 10000)
   public void testRestartJournal() throws Exception {
     journal.newEpoch(FAKE_NSINFO, 1);
-    journal.startLogSegment(makeRI(1), 1);
+    journal.startLogSegment(makeRI(1), 1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     journal.journal(makeRI(2), 1, 1, 2, 
         QJMTestUtil.createTxnData(1, 2));
     // Don't finalize.
@@ -153,7 +193,8 @@ public class TestJournal {
   @Test (timeout = 10000)
   public void testFormatResetsCachedValues() throws Exception {
     journal.newEpoch(FAKE_NSINFO, 12345L);
-    journal.startLogSegment(new RequestInfo(JID, 12345L, 1L, 0L), 1L);
+    journal.startLogSegment(new RequestInfo(JID, 12345L, 1L, 0L), 1L,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
 
     assertEquals(12345L, journal.getLastPromisedEpoch());
     assertEquals(12345L, journal.getLastWriterEpoch());
@@ -176,11 +217,13 @@ public class TestJournal {
   @Test (timeout = 10000)
   public void testNewEpochAtBeginningOfSegment() throws Exception {
     journal.newEpoch(FAKE_NSINFO, 1);
-    journal.startLogSegment(makeRI(1), 1);
+    journal.startLogSegment(makeRI(1), 1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     journal.journal(makeRI(2), 1, 1, 2, 
         QJMTestUtil.createTxnData(1, 2));
     journal.finalizeLogSegment(makeRI(3), 1, 2);
-    journal.startLogSegment(makeRI(4), 3);
+    journal.startLogSegment(makeRI(4), 3,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     NewEpochResponseProto resp = journal.newEpoch(FAKE_NSINFO, 2);
     assertEquals(1, resp.getLastSegmentTxId());
   }
@@ -219,7 +262,8 @@ public class TestJournal {
   @Test (timeout = 10000)
   public void testFinalizeWhenEditsAreMissed() throws Exception {
     journal.newEpoch(FAKE_NSINFO, 1);
-    journal.startLogSegment(makeRI(1), 1);
+    journal.startLogSegment(makeRI(1), 1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     journal.journal(makeRI(2), 1, 1, 3,
         QJMTestUtil.createTxnData(1, 3));
     
@@ -276,7 +320,8 @@ public class TestJournal {
     journal.newEpoch(FAKE_NSINFO, 1);
     
     // Start a segment at txid 1, and write a batch of 3 txns.
-    journal.startLogSegment(makeRI(1), 1);
+    journal.startLogSegment(makeRI(1), 1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     journal.journal(makeRI(2), 1, 1, 3,
         QJMTestUtil.createTxnData(1, 3));
 
@@ -285,7 +330,8 @@ public class TestJournal {
     
     // Try to start new segment at txid 6, this should abort old segment and
     // then succeed, allowing us to write txid 6-9.
-    journal.startLogSegment(makeRI(3), 6);
+    journal.startLogSegment(makeRI(3), 6,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     journal.journal(makeRI(4), 6, 6, 3,
         QJMTestUtil.createTxnData(6, 3));
 
@@ -306,14 +352,16 @@ public class TestJournal {
     
     // Start a segment at txid 1, and write just 1 transaction. This
     // would normally be the START_LOG_SEGMENT transaction.
-    journal.startLogSegment(makeRI(1), 1);
+    journal.startLogSegment(makeRI(1), 1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     journal.journal(makeRI(2), 1, 1, 1,
         QJMTestUtil.createTxnData(1, 1));
     
     // Try to start new segment at txid 1, this should succeed, because
     // we are allowed to re-start a segment if we only ever had the
     // START_LOG_SEGMENT transaction logged.
-    journal.startLogSegment(makeRI(3), 1);
+    journal.startLogSegment(makeRI(3), 1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     journal.journal(makeRI(4), 1, 1, 1,
         QJMTestUtil.createTxnData(1, 1));
 
@@ -323,7 +371,8 @@ public class TestJournal {
         QJMTestUtil.createTxnData(2, 3));
 
     try {
-      journal.startLogSegment(makeRI(6), 1);
+      journal.startLogSegment(makeRI(6), 1,
+          NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       fail("Did not fail to start log segment which would overwrite " +
           "an existing one");
     } catch (IllegalStateException ise) {
@@ -335,7 +384,8 @@ public class TestJournal {
     
     // Ensure that we cannot overwrite a finalized segment
     try {
-      journal.startLogSegment(makeRI(8), 1);
+      journal.startLogSegment(makeRI(8), 1,
+          NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       fail("Did not fail to start log segment which would overwrite " +
           "an existing one");
     } catch (IllegalStateException ise) {

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java?rev=1579814&r1=1579813&r2=1579814&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java Thu Mar 20 23:08:32 2014
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.qjournal.p
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
 import org.apache.hadoop.hdfs.qjournal.server.Journal;
 import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -111,7 +112,7 @@ public class TestJournalNode {
         conf, FAKE_NSINFO, journalId, jn.getBoundIpcAddress());
     ch.newEpoch(1).get();
     ch.setEpoch(1);
-    ch.startLogSegment(1).get();
+    ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
     ch.sendEdits(1L, 1, 1, "hello".getBytes(Charsets.UTF_8)).get();
     
     metrics = MetricsAsserts.getMetrics(
@@ -136,7 +137,7 @@ public class TestJournalNode {
   public void testReturnsSegmentInfoAtEpochTransition() throws Exception {
     ch.newEpoch(1).get();
     ch.setEpoch(1);
-    ch.startLogSegment(1).get();
+    ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
     ch.sendEdits(1L, 1, 2, QJMTestUtil.createTxnData(1, 2)).get();
     
     // Switch to a new epoch without closing earlier segment
@@ -152,7 +153,7 @@ public class TestJournalNode {
     assertEquals(1, response.getLastSegmentTxId());
     
     // Start a segment but don't write anything, check newEpoch segment info
-    ch.startLogSegment(3).get();
+    ch.startLogSegment(3, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
     response = ch.newEpoch(4).get();
     ch.setEpoch(4);
     // Because the new segment is empty, it is equivalent to not having
@@ -181,7 +182,7 @@ public class TestJournalNode {
         conf, FAKE_NSINFO, journalId, jn.getBoundIpcAddress());
     ch.newEpoch(1).get();
     ch.setEpoch(1);
-    ch.startLogSegment(1).get();
+    ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
     ch.sendEdits(1L, 1, 3, EDITS_DATA).get();
     ch.finalizeLogSegment(1, 3).get();
 
@@ -233,7 +234,7 @@ public class TestJournalNode {
     
     // Make a log segment, and prepare again -- this time should see the
     // segment existing.
-    ch.startLogSegment(1L).get();
+    ch.startLogSegment(1L, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
     ch.sendEdits(1L, 1L, 1, QJMTestUtil.createTxnData(1, 1)).get();
 
     prep = ch.prepareRecovery(1L).get();
@@ -322,7 +323,7 @@ public class TestJournalNode {
     byte[] data = new byte[editsSize];
     ch.newEpoch(1).get();
     ch.setEpoch(1);
-    ch.startLogSegment(1).get();
+    ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
     
     Stopwatch sw = new Stopwatch().start();
     for (int i = 1; i < numEdits; i++) {

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java?rev=1579814&r1=1579813&r2=1579814&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java Thu Mar 20 23:08:32 2014
@@ -67,7 +67,7 @@ public class TestDatanodeRegister { 
     // Return a a good software version.
     doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion();
     // Return a good layout version for now.
-    doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(fakeNsInfo)
+    doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION).when(fakeNsInfo)
         .getLayoutVersion();
     
     DatanodeProtocolClientSideTranslatorPB fakeDnProt = 

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1579814&r1=1579813&r2=1579814&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Thu Mar 20 23:08:32 2014
@@ -27,6 +27,7 @@ import static org.junit.Assert.fail;
 import java.io.BufferedInputStream;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
+import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FilenameFilter;
 import java.io.IOException;
@@ -68,6 +69,8 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
+import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
@@ -76,6 +79,8 @@ import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.junit.Test;
 import org.mockito.Mockito;
+import org.xml.sax.ContentHandler;
+import org.xml.sax.SAXException;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
@@ -88,7 +93,42 @@ public class TestEditLog {
   static {
     ((Log4JLogger)FSEditLog.LOG).getLogger().setLevel(Level.ALL);
   }
-  
+
+  /**
+   * A garbage mkdir op which is used for testing
+   * {@link EditLogFileInputStream#scanEditLog(File)}
+   */
+  public static class GarbageMkdirOp extends FSEditLogOp {
+    public GarbageMkdirOp() {
+      super(FSEditLogOpCodes.OP_MKDIR);
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion) throws IOException {
+      throw new IOException("cannot decode GarbageMkdirOp");
+    }
+
+    @Override
+    public void writeFields(DataOutputStream out) throws IOException {
+      // write in some garbage content
+      Random random = new Random();
+      byte[] content = new byte[random.nextInt(16) + 1];
+      random.nextBytes(content);
+      out.write(content);
+    }
+
+    @Override
+    protected void toXml(ContentHandler contentHandler) throws SAXException {
+      throw new UnsupportedOperationException(
+          "Not supported for GarbageMkdirOp");
+    }
+    @Override
+    void fromXml(Stanza st) throws InvalidXmlException {
+      throw new UnsupportedOperationException(
+          "Not supported for GarbageMkdirOp");
+    }
+  }
+
   static final Log LOG = LogFactory.getLog(TestEditLog.class);
   
   static final int NUM_DATA_NODES = 0;
@@ -767,7 +807,7 @@ public class TestEditLog {
 
       EditLogFileOutputStream stream = new EditLogFileOutputStream(conf, log, 1024);
       try {
-        stream.create();
+        stream.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
         if (!inBothDirs) {
           break;
         }
@@ -820,7 +860,7 @@ public class TestEditLog {
 
       BufferedInputStream bin = new BufferedInputStream(input);
       DataInputStream in = new DataInputStream(bin);
-      version = EditLogFileInputStream.readLogVersion(in);
+      version = EditLogFileInputStream.readLogVersion(in, true);
       tracker = new FSEditLogLoader.PositionTrackingInputStream(in);
       in = new DataInputStream(tracker);
             
@@ -853,7 +893,7 @@ public class TestEditLog {
     }
 
     @Override
-    public int getVersion() throws IOException {
+    public int getVersion(boolean verifyVersion) throws IOException {
       return version;
     }
 
@@ -1237,7 +1277,7 @@ public class TestEditLog {
     EditLogFileInputStream elfis = null;
     try {
       elfos = new EditLogFileOutputStream(new Configuration(), TEST_LOG_NAME, 0);
-      elfos.create();
+      elfos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       elfos.writeRaw(garbage, 0, garbage.length);
       elfos.setReadyToFlush();
       elfos.flushAndSync(true);

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java?rev=1579814&r1=1579813&r2=1579814&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java Thu Mar 20 23:08:32 2014
@@ -82,7 +82,7 @@ public class TestEditLogFileOutputStream
         TEST_EDITS, 0);
     try {
       byte[] small = new byte[] { 1, 2, 3, 4, 5, 8, 7 };
-      elos.create();
+      elos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       // The first (small) write we make extends the file by 1 MB due to
       // preallocation.
       elos.writeRaw(small, 0, small.length);

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java?rev=1579814&r1=1579813&r2=1579814&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java Thu Mar 20 23:08:32 2014
@@ -22,7 +22,6 @@ import static org.apache.hadoop.hdfs.ser
 import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.setupEdits;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.FilenameFilter;
@@ -43,7 +42,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -224,7 +222,7 @@ public class TestFileJournalManager {
    */
   private void corruptAfterStartSegment(File f) throws IOException {
     RandomAccessFile raf = new RandomAccessFile(f, "rw");
-    raf.seek(0x16); // skip version and first tranaction and a bit of next transaction
+    raf.seek(0x20); // skip version and first tranaction and a bit of next transaction
     for (int i = 0; i < 1000; i++) {
       raf.writeInt(0xdeadbeef);
     }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java?rev=1579814&r1=1579813&r2=1579814&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java Thu Mar 20 23:08:32 2014
@@ -162,7 +162,8 @@ public class TestGenericJournalConf {
     }
     
     @Override
-    public EditLogOutputStream startLogSegment(long txId) throws IOException {
+    public EditLogOutputStream startLogSegment(long txId, int layoutVersion)
+        throws IOException {
       return mock(EditLogOutputStream.class);
     }
     

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java?rev=1579814&r1=1579813&r2=1579814&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java Thu Mar 20 23:08:32 2014
@@ -73,7 +73,7 @@ public class TestNameNodeRecovery {
     EditLogFileInputStream elfis = null;
     try {
       elfos = new EditLogFileOutputStream(new Configuration(), TEST_LOG_NAME, 0);
-      elfos.create();
+      elfos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
 
       elts.addTransactionsToLog(elfos, cache);
       elfos.setReadyToFlush();
@@ -271,7 +271,7 @@ public class TestNameNodeRecovery {
     } 
     
     public int getMaxOpSize() {
-      return 36;
+      return 40;
     }
   }
 

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java?rev=1579814&r1=1579813&r2=1579814&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java Thu Mar 20 23:08:32 2014
@@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -444,7 +445,8 @@ public class TestHAStateTransitions {
     if (writeHeader) {
       DataOutputStream out = new DataOutputStream(new FileOutputStream(
           inProgressFile));
-      EditLogFileOutputStream.writeHeader(out);
+      EditLogFileOutputStream.writeHeader(
+          NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
       out.close();
     }
   }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1579814&r1=1579813&r2=1579814&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml?rev=1579814&r1=1579813&r2=1579814&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Thu Mar 20 23:08:32 2014
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <EDITS>
-  <EDITS_VERSION>-55</EDITS_VERSION>
+  <EDITS_VERSION>-56</EDITS_VERSION>
   <RECORD>
     <OPCODE>OP_START_LOG_SEGMENT</OPCODE>
     <DATA>
@@ -13,8 +13,8 @@
       <TXID>2</TXID>
       <DELEGATION_KEY>
         <KEY_ID>1</KEY_ID>
-        <EXPIRY_DATE>1393648283650</EXPIRY_DATE>
-        <KEY>76e6d2854a753680</KEY>
+        <EXPIRY_DATE>1394849922137</EXPIRY_DATE>
+        <KEY>37e1a64049bbef35</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -24,8 +24,8 @@
       <TXID>3</TXID>
       <DELEGATION_KEY>
         <KEY_ID>2</KEY_ID>
-        <EXPIRY_DATE>1393648283653</EXPIRY_DATE>
-        <KEY>939fb7b875c956cd</KEY>
+        <EXPIRY_DATE>1394849922140</EXPIRY_DATE>
+        <KEY>7c0bf5039242fc54</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -37,18 +37,18 @@
       <INODEID>16386</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1392957084379</MTIME>
-      <ATIME>1392957084379</ATIME>
+      <MTIME>1394158722811</MTIME>
+      <ATIME>1394158722811</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1178237747_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_221786725_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>szetszwo</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>7</RPC_CALLID>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>6</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -59,13 +59,13 @@
       <INODEID>0</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1392957084397</MTIME>
-      <ATIME>1392957084379</ATIME>
+      <MTIME>1394158722832</MTIME>
+      <ATIME>1394158722811</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>szetszwo</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -78,9 +78,9 @@
       <LENGTH>0</LENGTH>
       <SRC>/file_create</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1392957084400</TIMESTAMP>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>9</RPC_CALLID>
+      <TIMESTAMP>1394158722836</TIMESTAMP>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>8</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -89,9 +89,9 @@
       <TXID>7</TXID>
       <LENGTH>0</LENGTH>
       <PATH>/file_moved</PATH>
-      <TIMESTAMP>1392957084413</TIMESTAMP>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>10</RPC_CALLID>
+      <TIMESTAMP>1394158722842</TIMESTAMP>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>9</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -101,9 +101,9 @@
       <LENGTH>0</LENGTH>
       <INODEID>16387</INODEID>
       <PATH>/directory_mkdir</PATH>
-      <TIMESTAMP>1392957084419</TIMESTAMP>
+      <TIMESTAMP>1394158722848</TIMESTAMP>
       <PERMISSION_STATUS>
-        <USERNAME>szetszwo</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>493</MODE>
       </PERMISSION_STATUS>
@@ -136,8 +136,8 @@
       <TXID>12</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot1</SNAPSHOTNAME>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>15</RPC_CALLID>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>14</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -147,8 +147,8 @@
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME>
       <SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>16</RPC_CALLID>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>15</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -157,8 +157,8 @@
       <TXID>14</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>17</RPC_CALLID>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>16</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -169,18 +169,18 @@
       <INODEID>16388</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1392957084440</MTIME>
-      <ATIME>1392957084440</ATIME>
+      <MTIME>1394158722872</MTIME>
+      <ATIME>1394158722872</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1178237747_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_221786725_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>szetszwo</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>18</RPC_CALLID>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>17</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -191,13 +191,13 @@
       <INODEID>0</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1392957084441</MTIME>
-      <ATIME>1392957084440</ATIME>
+      <MTIME>1394158722874</MTIME>
+      <ATIME>1394158722872</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>szetszwo</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -253,10 +253,10 @@
       <LENGTH>0</LENGTH>
       <SRC>/file_create</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1392957084455</TIMESTAMP>
+      <TIMESTAMP>1394158722890</TIMESTAMP>
       <OPTIONS>NONE</OPTIONS>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>25</RPC_CALLID>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>24</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -267,18 +267,18 @@
       <INODEID>16389</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1392957084459</MTIME>
-      <ATIME>1392957084459</ATIME>
+      <MTIME>1394158722895</MTIME>
+      <ATIME>1394158722895</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1178237747_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_221786725_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>szetszwo</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>27</RPC_CALLID>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>26</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -383,8 +383,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1392957084525</MTIME>
-      <ATIME>1392957084459</ATIME>
+      <MTIME>1394158722986</MTIME>
+      <ATIME>1394158722895</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -404,7 +404,7 @@
         <GENSTAMP>1003</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>szetszwo</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -418,18 +418,18 @@
       <INODEID>16390</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1392957084527</MTIME>
-      <ATIME>1392957084527</ATIME>
+      <MTIME>1394158722989</MTIME>
+      <ATIME>1394158722989</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1178237747_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_221786725_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>szetszwo</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>40</RPC_CALLID>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>39</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -534,8 +534,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1392957084542</MTIME>
-      <ATIME>1392957084527</ATIME>
+      <MTIME>1394158723010</MTIME>
+      <ATIME>1394158722989</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -555,7 +555,7 @@
         <GENSTAMP>1006</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>szetszwo</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -569,18 +569,18 @@
       <INODEID>16391</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1392957084544</MTIME>
-      <ATIME>1392957084544</ATIME>
+      <MTIME>1394158723012</MTIME>
+      <ATIME>1394158723012</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1178237747_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_221786725_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>szetszwo</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>52</RPC_CALLID>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>51</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -685,8 +685,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1392957084559</MTIME>
-      <ATIME>1392957084544</ATIME>
+      <MTIME>1394158723035</MTIME>
+      <ATIME>1394158723012</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -706,7 +706,7 @@
         <GENSTAMP>1009</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>szetszwo</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -718,13 +718,13 @@
       <TXID>56</TXID>
       <LENGTH>0</LENGTH>
       <TRG>/file_concat_target</TRG>
-      <TIMESTAMP>1392957084561</TIMESTAMP>
+      <TIMESTAMP>1394158723039</TIMESTAMP>
       <SOURCES>
         <SOURCE1>/file_concat_0</SOURCE1>
         <SOURCE2>/file_concat_1</SOURCE2>
       </SOURCES>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>63</RPC_CALLID>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>62</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -735,15 +735,15 @@
       <INODEID>16392</INODEID>
       <PATH>/file_symlink</PATH>
       <VALUE>/file_concat_target</VALUE>
-      <MTIME>1392957084564</MTIME>
-      <ATIME>1392957084564</ATIME>
+      <MTIME>1394158723044</MTIME>
+      <ATIME>1394158723044</ATIME>
       <PERMISSION_STATUS>
-        <USERNAME>szetszwo</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>511</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>64</RPC_CALLID>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>63</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -754,18 +754,18 @@
       <INODEID>16393</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1392957084567</MTIME>
-      <ATIME>1392957084567</ATIME>
+      <MTIME>1394158723047</MTIME>
+      <ATIME>1394158723047</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1178237747_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_221786725_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>szetszwo</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>65</RPC_CALLID>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>64</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -821,7 +821,7 @@
     <OPCODE>OP_REASSIGN_LEASE</OPCODE>
     <DATA>
       <TXID>64</TXID>
-      <LEASEHOLDER>DFSClient_NONMAPREDUCE_-1178237747_1</LEASEHOLDER>
+      <LEASEHOLDER>DFSClient_NONMAPREDUCE_221786725_1</LEASEHOLDER>
       <PATH>/hard-lease-recovery-test</PATH>
       <NEWHOLDER>HDFS_NameNode</NEWHOLDER>
     </DATA>
@@ -834,8 +834,8 @@
       <INODEID>0</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1392957087263</MTIME>
-      <ATIME>1392957084567</ATIME>
+      <MTIME>1394158725708</MTIME>
+      <ATIME>1394158723047</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -845,7 +845,7 @@
         <GENSTAMP>1011</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>szetszwo</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -856,13 +856,13 @@
     <DATA>
       <TXID>66</TXID>
       <POOLNAME>pool1</POOLNAME>
-      <OWNERNAME>szetszwo</OWNERNAME>
+      <OWNERNAME>jing</OWNERNAME>
       <GROUPNAME>staff</GROUPNAME>
       <MODE>493</MODE>
       <LIMIT>9223372036854775807</LIMIT>
       <MAXRELATIVEEXPIRY>2305843009213693951</MAXRELATIVEEXPIRY>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>72</RPC_CALLID>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>71</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -871,8 +871,8 @@
       <TXID>67</TXID>
       <POOLNAME>pool1</POOLNAME>
       <LIMIT>99</LIMIT>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>73</RPC_CALLID>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>72</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -883,9 +883,9 @@
       <PATH>/path</PATH>
       <REPLICATION>1</REPLICATION>
       <POOL>pool1</POOL>
-      <EXPIRATION>2305844402170781554</EXPIRATION>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>74</RPC_CALLID>
+      <EXPIRATION>2305844403372420029</EXPIRATION>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>73</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -894,8 +894,8 @@
       <TXID>69</TXID>
       <ID>1</ID>
       <REPLICATION>2</REPLICATION>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>75</RPC_CALLID>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>74</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -903,8 +903,8 @@
     <DATA>
       <TXID>70</TXID>
       <ID>1</ID>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>76</RPC_CALLID>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>75</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -912,8 +912,8 @@
     <DATA>
       <TXID>71</TXID>
       <POOLNAME>pool1</POOLNAME>
-      <RPC_CLIENTID>ad7d1b9e-e5d3-4d8d-ae1a-060f579be11e</RPC_CLIENTID>
-      <RPC_CALLID>77</RPC_CALLID>
+      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
+      <RPC_CALLID>76</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -927,14 +927,14 @@
     <OPCODE>OP_ROLLING_UPGRADE_START</OPCODE>
     <DATA>
       <TXID>73</TXID>
-      <STARTTIME>1392957087621</STARTTIME>
+      <STARTTIME>1394158726098</STARTTIME>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ROLLING_UPGRADE_FINALIZE</OPCODE>
     <DATA>
       <TXID>74</TXID>
-      <FINALIZETIME>1392957087621</FINALIZETIME>
+      <FINALIZETIME>1394158726098</FINALIZETIME>
     </DATA>
   </RECORD>
   <RECORD>



Mime
View raw message