hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ji...@apache.org
Subject svn commit: r1579813 [1/2] - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/ src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/ src/main/java/org...
Date Thu, 20 Mar 2014 23:06:07 GMT
Author: jing9
Date: Thu Mar 20 23:06:06 2014
New Revision: 1579813

URL: http://svn.apache.org/r1579813
Log:
HDFS-6038. Allow JournalNode to handle editlog produced by new release with future layoutversion. Contributed by Jing Zhao.

Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogOutputStream.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Mar 20 23:06:06 2014
@@ -972,6 +972,9 @@ BREAKDOWN OF HDFS-5535 ROLLING UPGRADE S
     DatanodeRegistration with namenode layout version and namenode node type.
     (szetszwo)
 
+    HDFS-6038. Allow JournalNode to handle editlog produced by new release with
+    future layoutversion. (jing9)
+
 Release 2.3.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java Thu Mar 20 23:06:06 2014
@@ -97,7 +97,7 @@ class BookKeeperEditLogInputStream exten
   }
   
   @Override
-  public int getVersion() throws IOException {
+  public int getVersion(boolean verifyVersion) throws IOException {
     return logVersion;
   }
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogOutputStream.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogOutputStream.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogOutputStream.java Thu Mar 20 23:06:06 2014
@@ -77,7 +77,7 @@ class BookKeeperEditLogOutputStream
   }
 
   @Override
-  public void create() throws IOException {
+  public void create(int layoutVersion) throws IOException {
     // noop
   }
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java Thu Mar 20 23:06:06 2014
@@ -364,7 +364,8 @@ public class BookKeeperJournalManager im
    * @param txId First transaction id to be written to the stream
    */
   @Override
-  public EditLogOutputStream startLogSegment(long txId) throws IOException {
+  public EditLogOutputStream startLogSegment(long txId, int layoutVersion)
+      throws IOException {
     checkEnv();
 
     if (txId <= maxTxId.get()) {
@@ -397,7 +398,7 @@ public class BookKeeperJournalManager im
     try {
       String znodePath = inprogressZNode(txId);
       EditLogLedgerMetadata l = new EditLogLedgerMetadata(znodePath,
-          HdfsConstants.NAMENODE_LAYOUT_VERSION, currentLedger.getId(), txId);
+          layoutVersion, currentLedger.getId(), txId);
       /* Write the ledger metadata out to the inprogress ledger znode
        * This can fail if for some reason our write lock has
        * expired (@see WriteLock) and another process has managed to

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java Thu Mar 20 23:06:06 2014
@@ -30,7 +30,6 @@ import java.io.IOException;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.ArrayList;
 import java.util.Random;
 
 import java.util.concurrent.Executors;
@@ -47,6 +46,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.JournalManager;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 
 import org.apache.bookkeeper.proto.BookieServer;
@@ -101,7 +101,8 @@ public class TestBookKeeperJournalManage
         BKJMUtil.createJournalURI("/hdfsjournal-simplewrite"), nsi);
     bkjm.format(nsi);
 
-    EditLogOutputStream out = bkjm.startLogSegment(1);
+    EditLogOutputStream out = bkjm.startLogSegment(1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     for (long i = 1 ; i <= 100; i++) {
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
       op.setTransactionId(i);
@@ -124,7 +125,8 @@ public class TestBookKeeperJournalManage
         BKJMUtil.createJournalURI("/hdfsjournal-txncount"), nsi);
     bkjm.format(nsi);
 
-    EditLogOutputStream out = bkjm.startLogSegment(1);
+    EditLogOutputStream out = bkjm.startLogSegment(1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     for (long i = 1 ; i <= 100; i++) {
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
       op.setTransactionId(i);
@@ -147,7 +149,8 @@ public class TestBookKeeperJournalManage
     long txid = 1;
     for (long i = 0; i < 3; i++) {
       long start = txid;
-      EditLogOutputStream out = bkjm.startLogSegment(start);
+      EditLogOutputStream out = bkjm.startLogSegment(start,
+          NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE; j++) {
         FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
         op.setTransactionId(txid++);
@@ -185,7 +188,8 @@ public class TestBookKeeperJournalManage
     long txid = 1;
     for (long i = 0; i < 3; i++) {
       long start = txid;
-      EditLogOutputStream out = bkjm.startLogSegment(start);
+      EditLogOutputStream out = bkjm.startLogSegment(start,
+          NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE; j++) {
         FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
         op.setTransactionId(txid++);
@@ -198,7 +202,8 @@ public class TestBookKeeperJournalManage
           zkc.exists(bkjm.finalizedLedgerZNode(start, (txid-1)), false));
     }
     long start = txid;
-    EditLogOutputStream out = bkjm.startLogSegment(start);
+    EditLogOutputStream out = bkjm.startLogSegment(start,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE/2; j++) {
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
       op.setTransactionId(txid++);
@@ -226,7 +231,8 @@ public class TestBookKeeperJournalManage
 
     long txid = 1;
     long start = txid;
-    EditLogOutputStream out = bkjm.startLogSegment(txid);
+    EditLogOutputStream out = bkjm.startLogSegment(txid,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE; j++) {
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
       op.setTransactionId(txid++);
@@ -237,7 +243,8 @@ public class TestBookKeeperJournalManage
     
     txid = 1;
     try {
-      out = bkjm.startLogSegment(txid);
+      out = bkjm.startLogSegment(txid,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       fail("Shouldn't be able to start another journal from " + txid
           + " when one already exists");
     } catch (Exception ioe) {
@@ -247,7 +254,8 @@ public class TestBookKeeperJournalManage
     // test border case
     txid = DEFAULT_SEGMENT_SIZE;
     try {
-      out = bkjm.startLogSegment(txid);
+      out = bkjm.startLogSegment(txid,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       fail("Shouldn't be able to start another journal from " + txid
           + " when one already exists");
     } catch (IOException ioe) {
@@ -257,7 +265,8 @@ public class TestBookKeeperJournalManage
     // open journal continuing from before
     txid = DEFAULT_SEGMENT_SIZE + 1;
     start = txid;
-    out = bkjm.startLogSegment(start);
+    out = bkjm.startLogSegment(start,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     assertNotNull(out);
 
     for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE; j++) {
@@ -270,7 +279,8 @@ public class TestBookKeeperJournalManage
 
     // open journal arbitarily far in the future
     txid = DEFAULT_SEGMENT_SIZE * 4;
-    out = bkjm.startLogSegment(txid);
+    out = bkjm.startLogSegment(txid,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     assertNotNull(out);
   }
 
@@ -287,9 +297,11 @@ public class TestBookKeeperJournalManage
         BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi);
 
 
-    EditLogOutputStream out1 = bkjm1.startLogSegment(start);
+    EditLogOutputStream out1 = bkjm1.startLogSegment(start,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     try {
-      bkjm2.startLogSegment(start);
+      bkjm2.startLogSegment(start,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       fail("Shouldn't have been able to open the second writer");
     } catch (IOException ioe) {
       LOG.info("Caught exception as expected", ioe);
@@ -307,7 +319,8 @@ public class TestBookKeeperJournalManage
     bkjm.format(nsi);
 
     final long numTransactions = 10000;
-    EditLogOutputStream out = bkjm.startLogSegment(1);
+    EditLogOutputStream out = bkjm.startLogSegment(1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);;
     for (long i = 1 ; i <= numTransactions; i++) {
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
       op.setTransactionId(i);
@@ -334,7 +347,8 @@ public class TestBookKeeperJournalManage
         nsi);
     bkjm.format(nsi);
 
-    EditLogOutputStream out = bkjm.startLogSegment(1);
+    EditLogOutputStream out = bkjm.startLogSegment(1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);;
     for (long i = 1 ; i <= 100; i++) {
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
       op.setTransactionId(i);
@@ -384,7 +398,8 @@ public class TestBookKeeperJournalManage
           BKJMUtil.createJournalURI("/hdfsjournal-allbookiefailure"),
           nsi);
       bkjm.format(nsi);
-      EditLogOutputStream out = bkjm.startLogSegment(txid);
+      EditLogOutputStream out = bkjm.startLogSegment(txid,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
 
       for (long i = 1 ; i <= 3; i++) {
         FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
@@ -416,7 +431,8 @@ public class TestBookKeeperJournalManage
       assertEquals("New bookie didn't start",
                    numBookies+1, bkutil.checkBookiesUp(numBookies+1, 10));
       bkjm.recoverUnfinalizedSegments();
-      out = bkjm.startLogSegment(txid);
+      out = bkjm.startLogSegment(txid,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       for (long i = 1 ; i <= 3; i++) {
         FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
         op.setTransactionId(txid++);
@@ -471,7 +487,8 @@ public class TestBookKeeperJournalManage
           nsi);
       bkjm.format(nsi);
 
-      EditLogOutputStream out = bkjm.startLogSegment(txid);
+      EditLogOutputStream out = bkjm.startLogSegment(txid,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       for (long i = 1 ; i <= 3; i++) {
         FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
         op.setTransactionId(txid++);
@@ -522,7 +539,8 @@ public class TestBookKeeperJournalManage
                                                                  nsi);
     bkjm.format(nsi);
 
-    EditLogOutputStream out = bkjm.startLogSegment(1);
+    EditLogOutputStream out = bkjm.startLogSegment(1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);;
     for (long i = 1; i <= 100; i++) {
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
       op.setTransactionId(i);
@@ -531,7 +549,8 @@ public class TestBookKeeperJournalManage
     out.close();
     bkjm.finalizeLogSegment(1, 100);
 
-    out = bkjm.startLogSegment(101);
+    out = bkjm.startLogSegment(101,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     out.close();
     bkjm.close();
     String inprogressZNode = bkjm.inprogressZNode(101);
@@ -564,7 +583,8 @@ public class TestBookKeeperJournalManage
                                                                  nsi);
     bkjm.format(nsi);
 
-    EditLogOutputStream out = bkjm.startLogSegment(1);
+    EditLogOutputStream out = bkjm.startLogSegment(1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);;
     for (long i = 1; i <= 100; i++) {
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
       op.setTransactionId(i);
@@ -573,7 +593,8 @@ public class TestBookKeeperJournalManage
     out.close();
     bkjm.finalizeLogSegment(1, 100);
 
-    out = bkjm.startLogSegment(101);
+    out = bkjm.startLogSegment(101,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     out.close();
     bkjm.close();
 
@@ -607,7 +628,8 @@ public class TestBookKeeperJournalManage
                                                                  nsi);
     bkjm.format(nsi);
 
-    EditLogOutputStream out = bkjm.startLogSegment(1);
+    EditLogOutputStream out = bkjm.startLogSegment(1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);;
     for (long i = 1; i <= 100; i++) {
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
       op.setTransactionId(i);
@@ -616,13 +638,15 @@ public class TestBookKeeperJournalManage
     out.close();
     bkjm.finalizeLogSegment(1, 100);
 
-    out = bkjm.startLogSegment(101);
+    out = bkjm.startLogSegment(101,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     out.close();
     bkjm.close();
 
     bkjm = new BookKeeperJournalManager(conf, uri, nsi);
     bkjm.recoverUnfinalizedSegments();
-    out = bkjm.startLogSegment(101);
+    out = bkjm.startLogSegment(101,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     for (long i = 1; i <= 100; i++) {
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
       op.setTransactionId(i);
@@ -647,7 +671,8 @@ public class TestBookKeeperJournalManage
                                                                  nsi);
     bkjm.format(nsi);
 
-    EditLogOutputStream out = bkjm.startLogSegment(1);
+    EditLogOutputStream out = bkjm.startLogSegment(1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);;
     for (long i = 1; i <= 100; i++) {
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
       op.setTransactionId(i);
@@ -739,7 +764,7 @@ public class TestBookKeeperJournalManage
       = new BookKeeperJournalManager(conf, uri, nsi);
     bkjm.format(nsi);
     for (int i = 1; i < 100*2; i += 2) {
-      bkjm.startLogSegment(i);
+      bkjm.startLogSegment(i, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       bkjm.finalizeLogSegment(i, i+1);
     }
     bkjm.close();
@@ -800,7 +825,8 @@ public class TestBookKeeperJournalManage
   private String startAndFinalizeLogSegment(BookKeeperJournalManager bkjm,
       int startTxid, int endTxid) throws IOException, KeeperException,
       InterruptedException {
-    EditLogOutputStream out = bkjm.startLogSegment(startTxid);
+    EditLogOutputStream out = bkjm.startLogSegment(startTxid,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     for (long i = startTxid; i <= endTxid; i++) {
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
       op.setTransactionId(i);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java Thu Mar 20 23:06:06 2014
@@ -67,8 +67,9 @@ interface AsyncLogger {
    * Begin writing a new log segment.
    * 
    * @param txid the first txid to be written to the new log
+   * @param layoutVersion the LayoutVersion of the log
    */
-  public ListenableFuture<Void> startLogSegment(long txid);
+  public ListenableFuture<Void> startLogSegment(long txid, int layoutVersion);
 
   /**
    * Finalize a log segment.

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java Thu Mar 20 23:06:06 2014
@@ -233,10 +233,10 @@ class AsyncLoggerSet {
   }
 
   public QuorumCall<AsyncLogger, Void> startLogSegment(
-      long txid) {
+      long txid, int layoutVersion) {
     Map<AsyncLogger, ListenableFuture<Void>> calls = Maps.newHashMap();
     for (AsyncLogger logger : loggers) {
-      calls.put(logger, logger.startLogSegment(txid));
+      calls.put(logger, logger.startLogSegment(txid, layoutVersion));
     }
     return QuorumCall.create(calls);
   }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java Thu Mar 20 23:06:06 2014
@@ -258,8 +258,7 @@ public class IPCLoggerChannel implements
 
   private synchronized RequestInfo createReqInfo() {
     Preconditions.checkState(epoch > 0, "bad epoch: " + epoch);
-    return new RequestInfo(journalId, epoch, ipcSerial++,
-        committedTxId);
+    return new RequestInfo(journalId, epoch, ipcSerial++, committedTxId);
   }
 
   @VisibleForTesting
@@ -475,11 +474,12 @@ public class IPCLoggerChannel implements
   }
   
   @Override
-  public ListenableFuture<Void> startLogSegment(final long txid) {
+  public ListenableFuture<Void> startLogSegment(final long txid,
+      final int layoutVersion) {
     return executor.submit(new Callable<Void>() {
       @Override
       public Void call() throws IOException {
-        getProxy().startLogSegment(createReqInfo(), txid);
+        getProxy().startLogSegment(createReqInfo(), txid, layoutVersion);
         synchronized (IPCLoggerChannel.this) {
           if (outOfSync) {
             outOfSync = false;

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java Thu Mar 20 23:06:06 2014
@@ -394,10 +394,12 @@ public class QuorumJournalManager implem
   }
   
   @Override
-  public EditLogOutputStream startLogSegment(long txId) throws IOException {
+  public EditLogOutputStream startLogSegment(long txId, int layoutVersion)
+      throws IOException {
     Preconditions.checkState(isActiveWriter,
         "must recover segments before starting a new one");
-    QuorumCall<AsyncLogger,Void> q = loggers.startLogSegment(txId);
+    QuorumCall<AsyncLogger, Void> q = loggers.startLogSegment(txId,
+        layoutVersion);
     loggers.waitForWriteQuorum(q, startSegmentTimeoutMs,
         "startLogSegment(" + txId + ")");
     return new QuorumOutputStream(loggers, txId,

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java Thu Mar 20 23:06:06 2014
@@ -55,7 +55,7 @@ class QuorumOutputStream extends EditLog
   }
 
   @Override
-  public void create() throws IOException {
+  public void create(int layoutVersion) throws IOException {
     throw new UnsupportedOperationException();
   }
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java Thu Mar 20 23:06:06 2014
@@ -100,9 +100,10 @@ public interface QJournalProtocol {
    * using {@link #finalizeLogSegment(RequestInfo, long, long)}.
    * 
    * @param txid the first txid in the new log
+   * @param layoutVersion the LayoutVersion of the new log
    */
   public void startLogSegment(RequestInfo reqInfo,
-      long txid) throws IOException;
+      long txid, int layoutVersion) throws IOException;
 
   /**
    * Finalize the given log segment on the JournalNode. The segment

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java Thu Mar 20 23:06:06 2014
@@ -68,6 +68,7 @@ import org.apache.hadoop.hdfs.qjournal.p
 import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
 
 import com.google.protobuf.RpcController;
@@ -180,8 +181,10 @@ public class QJournalProtocolServerSideT
   public StartLogSegmentResponseProto startLogSegment(RpcController controller,
       StartLogSegmentRequestProto req) throws ServiceException {
     try {
-      impl.startLogSegment(convert(req.getReqInfo()),
-          req.getTxid());
+      int layoutVersion = req.hasLayoutVersion() ? req.getLayoutVersion()
+          : NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
+      impl.startLogSegment(convert(req.getReqInfo()), req.getTxid(),
+          layoutVersion);
     } catch (IOException e) {
       throw new ServiceException(e);
     }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java Thu Mar 20 23:06:06 2014
@@ -194,11 +194,11 @@ public class QJournalProtocolTranslatorP
   }
 
   @Override
-  public void startLogSegment(RequestInfo reqInfo, long txid)
+  public void startLogSegment(RequestInfo reqInfo, long txid, int layoutVersion)
       throws IOException {
     StartLogSegmentRequestProto req = StartLogSegmentRequestProto.newBuilder()
         .setReqInfo(convert(reqInfo))
-        .setTxid(txid)
+        .setTxid(txid).setLayoutVersion(layoutVersion)
         .build();
     try {
       rpcProxy.startLogSegment(NULL_CONTROLLER, req);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java Thu Mar 20 23:06:06 2014
@@ -188,7 +188,7 @@ public class Journal implements Closeabl
     
     while (!files.isEmpty()) {
       EditLogFile latestLog = files.remove(files.size() - 1);
-      latestLog.validateLog();
+      latestLog.scanLog();
       LOG.info("Latest log is " + latestLog);
       if (latestLog.getLastTxId() == HdfsConstants.INVALID_TXID) {
         // the log contains no transactions
@@ -489,8 +489,8 @@ public class Journal implements Closeabl
    * Start a new segment at the given txid. The previous segment
    * must have already been finalized.
    */
-  public synchronized void startLogSegment(RequestInfo reqInfo, long txid)
-      throws IOException {
+  public synchronized void startLogSegment(RequestInfo reqInfo, long txid,
+      int layoutVersion) throws IOException {
     assert fjm != null;
     checkFormatted();
     checkRequest(reqInfo);
@@ -518,7 +518,7 @@ public class Journal implements Closeabl
       // If it's in-progress, it should only contain one transaction,
       // because the "startLogSegment" transaction is written alone at the
       // start of each segment. 
-      existing.validateLog();
+      existing.scanLog();
       if (existing.getLastTxId() != existing.getFirstTxId()) {
         throw new IllegalStateException("The log file " +
             existing + " seems to contain valid transactions");
@@ -539,7 +539,7 @@ public class Journal implements Closeabl
     // remove the record of the older segment here.
     purgePaxosDecision(txid);
     
-    curSegment = fjm.startLogSegment(txid);
+    curSegment = fjm.startLogSegment(txid, layoutVersion);
     curSegmentTxId = txid;
     nextTxId = txid;
   }
@@ -581,7 +581,7 @@ public class Journal implements Closeabl
       if (needsValidation) {
         LOG.info("Validating log segment " + elf.getFile() + " about to be " +
             "finalized");
-        elf.validateLog();
+        elf.scanLog();
   
         checkSync(elf.getLastTxId() == endTxId,
             "Trying to finalize in-progress log segment %s to end at " +
@@ -660,14 +660,15 @@ public class Journal implements Closeabl
    * @return the current state of the given segment, or null if the
    * segment does not exist.
    */
-  private SegmentStateProto getSegmentInfo(long segmentTxId)
+  @VisibleForTesting
+  SegmentStateProto getSegmentInfo(long segmentTxId)
       throws IOException {
     EditLogFile elf = fjm.getLogFile(segmentTxId);
     if (elf == null) {
       return null;
     }
     if (elf.isInProgress()) {
-      elf.validateLog();
+      elf.scanLog();
     }
     if (elf.getLastTxId() == HdfsConstants.INVALID_TXID) {
       LOG.info("Edit log file " + elf + " appears to be empty. " +

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java Thu Mar 20 23:06:06 2014
@@ -156,10 +156,10 @@ class JournalNodeRpcServer implements QJ
   }
 
   @Override
-  public void startLogSegment(RequestInfo reqInfo, long txid)
+  public void startLogSegment(RequestInfo reqInfo, long txid, int layoutVersion)
       throws IOException {
     jn.getOrCreateJournal(reqInfo.getJournalId())
-      .startLogSegment(reqInfo, txid);
+      .startLogSegment(reqInfo, txid, layoutVersion);
   }
 
   @Override

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java Thu Mar 20 23:06:06 2014
@@ -56,7 +56,8 @@ class BackupJournalManager implements Jo
 
   
   @Override
-  public EditLogOutputStream startLogSegment(long txId) throws IOException {
+  public EditLogOutputStream startLogSegment(long txId, int layoutVersion)
+      throws IOException {
     EditLogBackupOutputStream stm = new EditLogBackupOutputStream(bnReg,
         journalInfo);
     stm.startLogSegment(txId);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java Thu Mar 20 23:06:06 2014
@@ -92,7 +92,7 @@ class EditLogBackupInputStream extends E
   }
 
   @Override
-  public int getVersion() throws IOException {
+  public int getVersion(boolean verifyVersion) throws IOException {
     return this.version;
   }
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java Thu Mar 20 23:06:06 2014
@@ -86,7 +86,7 @@ class EditLogBackupOutputStream extends 
    * There is no persistent storage. Just clear the buffers.
    */
   @Override // EditLogOutputStream
-  public void create() throws IOException {
+  public void create(int layoutVersion) throws IOException {
     assert doubleBuf.isFlushed() : "previous data is not flushed yet";
     this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
   }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java Thu Mar 20 23:06:06 2014
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.LayoutFlags;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage.HttpGetFailedException;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.io.IOUtils;
@@ -135,7 +136,8 @@ public class EditLogFileInputStream exte
     this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
   }
 
-  private void init() throws LogHeaderCorruptException, IOException {
+  private void init(boolean verifyLayoutVersion)
+      throws LogHeaderCorruptException, IOException {
     Preconditions.checkState(state == State.UNINIT);
     BufferedInputStream bin = null;
     try {
@@ -144,12 +146,14 @@ public class EditLogFileInputStream exte
       tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);
       dataIn = new DataInputStream(tracker);
       try {
-        logVersion = readLogVersion(dataIn);
+        logVersion = readLogVersion(dataIn, verifyLayoutVersion);
       } catch (EOFException eofe) {
         throw new LogHeaderCorruptException("No header found in log");
       }
+      // We assume future layout will also support ADD_LAYOUT_FLAGS
       if (NameNodeLayoutVersion.supports(
-          LayoutVersion.Feature.ADD_LAYOUT_FLAGS, logVersion)) {
+          LayoutVersion.Feature.ADD_LAYOUT_FLAGS, logVersion) ||
+          logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION) {
         try {
           LayoutFlags.read(dataIn);
         } catch (EOFException eofe) {
@@ -188,7 +192,7 @@ public class EditLogFileInputStream exte
     switch (state) {
     case UNINIT:
       try {
-        init();
+        init(true);
       } catch (Throwable e) {
         LOG.error("caught exception initializing " + this, e);
         if (skipBrokenEdits) {
@@ -238,6 +242,13 @@ public class EditLogFileInputStream exte
   }
 
   @Override
+  protected long scanNextOp() throws IOException {
+    Preconditions.checkState(state == State.OPEN);
+    FSEditLogOp cachedNext = getCachedOp();
+    return cachedNext == null ? reader.scanOp() : cachedNext.txid;
+  }
+
+  @Override
   protected FSEditLogOp nextOp() throws IOException {
     return nextOpImpl(false);
   }
@@ -253,9 +264,9 @@ public class EditLogFileInputStream exte
   }
 
   @Override
-  public int getVersion() throws IOException {
+  public int getVersion(boolean verifyVersion) throws IOException {
     if (state == State.UNINIT) {
-      init();
+      init(verifyVersion);
     }
     return logVersion;
   }
@@ -293,11 +304,12 @@ public class EditLogFileInputStream exte
     return getName();
   }
 
-  static FSEditLogLoader.EditLogValidation validateEditLog(File file) throws IOException {
+  static FSEditLogLoader.EditLogValidation validateEditLog(File file)
+      throws IOException {
     EditLogFileInputStream in;
     try {
       in = new EditLogFileInputStream(file);
-      in.getVersion(); // causes us to read the header
+      in.getVersion(true); // causes us to read the header
     } catch (LogHeaderCorruptException e) {
       // If the header is malformed or the wrong value, this indicates a corruption
       LOG.warn("Log file " + file + " has no valid header", e);
@@ -312,6 +324,51 @@ public class EditLogFileInputStream exte
     }
   }
 
+  static FSEditLogLoader.EditLogValidation scanEditLog(File file)
+      throws IOException {
+    EditLogFileInputStream in;
+    try {
+      in = new EditLogFileInputStream(file);
+      // read the header, initialize the inputstream, but do not check the
+      // layoutversion
+      in.getVersion(false);
+    } catch (LogHeaderCorruptException e) {
+      LOG.warn("Log file " + file + " has no valid header", e);
+      return new FSEditLogLoader.EditLogValidation(0,
+          HdfsConstants.INVALID_TXID, true);
+    }
+
+    long lastPos = 0;
+    long lastTxId = HdfsConstants.INVALID_TXID;
+    long numValid = 0;
+    try {
+      while (true) {
+        long txid = HdfsConstants.INVALID_TXID;
+        lastPos = in.getPosition();
+        try {
+          if ((txid = in.scanNextOp()) == HdfsConstants.INVALID_TXID) {
+            break;
+          }
+        } catch (Throwable t) {
+          FSImage.LOG.warn("Caught exception after scanning through "
+              + numValid + " ops from " + in
+              + " while determining its valid length. Position was "
+              + lastPos, t);
+          in.resync();
+          FSImage.LOG.warn("After resync, position is " + in.getPosition());
+          continue;
+        }
+        if (lastTxId == HdfsConstants.INVALID_TXID || txid > lastTxId) {
+          lastTxId = txid;
+        }
+        numValid++;
+      }
+      return new EditLogValidation(lastPos, lastTxId, false);
+    } finally {
+      IOUtils.closeStream(in);
+    }
+  }
+
   /**
    * Read the header of fsedit log
    * @param in fsedit stream
@@ -319,7 +376,7 @@ public class EditLogFileInputStream exte
    * @throws IOException if error occurs
    */
   @VisibleForTesting
-  static int readLogVersion(DataInputStream in)
+  static int readLogVersion(DataInputStream in, boolean verifyLayoutVersion)
       throws IOException, LogHeaderCorruptException {
     int logVersion;
     try {
@@ -328,8 +385,9 @@ public class EditLogFileInputStream exte
       throw new LogHeaderCorruptException(
           "Reached EOF when reading log header");
     }
-    if (logVersion < HdfsConstants.NAMENODE_LAYOUT_VERSION || // future version
-        logVersion > Storage.LAST_UPGRADABLE_LAYOUT_VERSION) { // unsupported
+    if (verifyLayoutVersion &&
+        (logVersion < HdfsConstants.NAMENODE_LAYOUT_VERSION || // future version
+         logVersion > Storage.LAST_UPGRADABLE_LAYOUT_VERSION)) { // unsupported
       throw new LogHeaderCorruptException(
           "Unexpected version of the file system log file: "
           + logVersion + ". Current version = "

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java Thu Mar 20 23:06:06 2014
@@ -31,7 +31,6 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutFlags;
 import org.apache.hadoop.io.IOUtils;
 
@@ -115,10 +114,10 @@ public class EditLogFileOutputStream ext
    * Create empty edits logs file.
    */
   @Override
-  public void create() throws IOException {
+  public void create(int layoutVersion) throws IOException {
     fc.truncate(0);
     fc.position(0);
-    writeHeader(doubleBuf.getCurrentBuf());
+    writeHeader(layoutVersion, doubleBuf.getCurrentBuf());
     setReadyToFlush();
     flush();
   }
@@ -127,12 +126,14 @@ public class EditLogFileOutputStream ext
    * Write header information for this EditLogFileOutputStream to the provided
    * DataOutputSream.
    * 
+   * @param layoutVersion the LayoutVersion of the EditLog
    * @param out the output stream to write the header to.
    * @throws IOException in the event of error writing to the stream.
    */
   @VisibleForTesting
-  public static void writeHeader(DataOutputStream out) throws IOException {
-    out.writeInt(HdfsConstants.NAMENODE_LAYOUT_VERSION);
+  public static void writeHeader(int layoutVersion, DataOutputStream out)
+      throws IOException {
+    out.writeInt(layoutVersion);
     LayoutFlags.write(out);
   }
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java Thu Mar 20 23:06:06 2014
@@ -19,6 +19,8 @@ package org.apache.hadoop.hdfs.server.na
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+
 import java.io.Closeable;
 import java.io.IOException;
 
@@ -103,6 +105,15 @@ public abstract class EditLogInputStream
    * @throws IOException if there is an error reading from the stream
    */
   protected abstract FSEditLogOp nextOp() throws IOException;
+
+  /**
+   * Go through the next operation from the stream storage.
+   * @return the txid of the next operation.
+   */
+  protected long scanNextOp() throws IOException {
+    FSEditLogOp next = readOp();
+    return next != null ? next.txid : HdfsConstants.INVALID_TXID;
+  }
   
   /** 
    * Get the next valid operation from the stream storage.
@@ -147,13 +158,22 @@ public abstract class EditLogInputStream
       }
     }
   }
+
+  /**
+   * return the cachedOp, and reset it to null. 
+   */
+  FSEditLogOp getCachedOp() {
+    FSEditLogOp op = this.cachedOp;
+    cachedOp = null;
+    return op;
+  }
   
   /** 
    * Get the layout version of the data in the stream.
    * @return the layout version of the ops in the stream.
    * @throws IOException if there is an error reading the version
    */
-  public abstract int getVersion() throws IOException;
+  public abstract int getVersion(boolean verifyVersion) throws IOException;
 
   /**
    * Get the "position" of in the stream. This is useful for 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java Thu Mar 20 23:06:06 2014
@@ -65,9 +65,10 @@ public abstract class EditLogOutputStrea
   /**
    * Create and initialize underlying persistent edits log storage.
    * 
+   * @param layoutVersion The LayoutVersion of the journal
    * @throws IOException
    */
-  abstract public void create() throws IOException;
+  abstract public void create(int layoutVersion) throws IOException;
 
   /**
    * Close the journal.

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Thu Mar 20 23:06:06 2014
@@ -1158,7 +1158,8 @@ public class FSEditLog implements LogsPu
     storage.attemptRestoreRemovedStorage();
     
     try {
-      editLogStream = journalSet.startLogSegment(segmentTxId);
+      editLogStream = journalSet.startLogSegment(segmentTxId,
+          NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     } catch (IOException ex) {
       throw new IOException("Unable to start log segment " +
           segmentTxId + ": too few journals successfully started.", ex);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Thu Mar 20 23:06:06 2014
@@ -182,7 +182,7 @@ public class FSEditLogLoader {
             }
           } catch (Throwable e) {
             // Handle a problem with our input
-            check203UpgradeFailure(in.getVersion(), e);
+            check203UpgradeFailure(in.getVersion(true), e);
             String errorMessage =
               formatEditLogReplayError(in, recentOpcodeOffsets, expectedTxId);
             FSImage.LOG.error(errorMessage, e);
@@ -221,7 +221,7 @@ public class FSEditLogLoader {
                   + ", numEdits=" + numEdits + ", totalEdits=" + totalEdits);
             }
             long inodeId = applyEditLogOp(op, fsDir, startOpt,
-                in.getVersion(), lastInodeId);
+                in.getVersion(true), lastInodeId);
             if (lastInodeId < inodeId) {
               lastInodeId = inodeId;
             }
@@ -1024,6 +1024,34 @@ public class FSEditLogLoader {
     return new EditLogValidation(lastPos, lastTxId, false);
   }
 
+  static EditLogValidation scanEditLog(EditLogInputStream in) {
+    long lastPos = 0;
+    long lastTxId = HdfsConstants.INVALID_TXID;
+    long numValid = 0;
+    FSEditLogOp op = null;
+    while (true) {
+      lastPos = in.getPosition();
+      try {
+        if ((op = in.readOp()) == null) { // TODO
+          break;
+        }
+      } catch (Throwable t) {
+        FSImage.LOG.warn("Caught exception after reading " + numValid +
+            " ops from " + in + " while determining its valid length." +
+            "Position was " + lastPos, t);
+        in.resync();
+        FSImage.LOG.warn("After resync, position is " + in.getPosition());
+        continue;
+      }
+      if (lastTxId == HdfsConstants.INVALID_TXID
+          || op.getTransactionId() > lastTxId) {
+        lastTxId = op.getTransactionId();
+      }
+      numValid++;
+    }
+    return new EditLogValidation(lastPos, lastTxId, false);
+  }
+
   static class EditLogValidation {
     private final long validLength;
     private final long endTxId;

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Thu Mar 20 23:06:06 2014
@@ -116,6 +116,7 @@ import org.xml.sax.ContentHandler;
 import org.xml.sax.SAXException;
 import org.xml.sax.helpers.AttributesImpl;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
@@ -206,7 +207,8 @@ public abstract class FSEditLogOp {
    * Constructor for an EditLog Op. EditLog ops cannot be constructed
    * directly, but only through Reader#readOp.
    */
-  private FSEditLogOp(FSEditLogOpCodes opCode) {
+  @VisibleForTesting
+  protected FSEditLogOp(FSEditLogOpCodes opCode) {
     this.opCode = opCode;
   }
 
@@ -3504,6 +3506,9 @@ public abstract class FSEditLogOp {
     @Override
     void readFields(DataInputStream in, int logVersion) throws IOException {
       AclEditLogProto p = AclEditLogProto.parseDelimitedFrom((DataInputStream)in);
+      if (p == null) {
+        throw new IOException("Failed to read fields from SetAclOp");
+      }
       src = p.getSrc();
       aclEntries = PBHelper.convertAclEntry(p.getEntriesList());
     }
@@ -3658,10 +3663,18 @@ public abstract class FSEditLogOp {
      */
     public void writeOp(FSEditLogOp op) throws IOException {
       int start = buf.getLength();
+      // write the op code first to make padding and terminator verification
+      // work
       buf.writeByte(op.opCode.getOpCode());
+      buf.writeInt(0); // write 0 for the length first
       buf.writeLong(op.txid);
       op.writeFields(buf);
       int end = buf.getLength();
+      
+      // write the length back: content of the op + 4 bytes checksum - op_code
+      int length = end - start - 1;
+      buf.writeInt(length, start + 1);
+
       checksum.reset();
       checksum.update(buf.getData(), start, end-start);
       int sum = (int)checksum.getValue();
@@ -3679,6 +3692,7 @@ public abstract class FSEditLogOp {
     private final Checksum checksum;
     private final OpInstanceCache cache;
     private int maxOpSize;
+    private final boolean supportEditLogLength;
 
     /**
      * Construct the reader
@@ -3693,6 +3707,12 @@ public abstract class FSEditLogOp {
       } else {
         this.checksum = null;
       }
+      // It is possible that the logVersion is actually a future layoutversion
+      // during the rolling upgrade (e.g., the NN gets upgraded first). We
+      // assume future layout will also support length of editlog op.
+      this.supportEditLogLength = NameNodeLayoutVersion.supports(
+          NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)
+          || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
 
       if (this.checksum != null) {
         this.in = new DataInputStream(
@@ -3827,6 +3847,10 @@ public abstract class FSEditLogOp {
         throw new IOException("Read invalid opcode " + opCode);
       }
 
+      if (supportEditLogLength) {
+        in.readInt();
+      }
+
       if (NameNodeLayoutVersion.supports(
           LayoutVersion.Feature.STORED_TXIDS, logVersion)) {
         // Read the txid
@@ -3842,6 +3866,42 @@ public abstract class FSEditLogOp {
     }
 
     /**
+     * Similar with decodeOp(), but instead of doing the real decoding, we skip
+     * the content of the op if the length of the editlog is supported.
+     * @return the last txid of the segment, or INVALID_TXID on exception
+     */
+    public long scanOp() throws IOException {
+      if (supportEditLogLength) {
+        limiter.setLimit(maxOpSize);
+        in.mark(maxOpSize);
+
+        final byte opCodeByte;
+        try {
+          opCodeByte = in.readByte(); // op code
+        } catch (EOFException e) {
+          return HdfsConstants.INVALID_TXID;
+        }
+
+        FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte);
+        if (opCode == OP_INVALID) {
+          verifyTerminator();
+          return HdfsConstants.INVALID_TXID;
+        }
+
+        int length = in.readInt(); // read the length of the op
+        long txid = in.readLong(); // read the txid
+
+        // skip the remaining content
+        IOUtils.skipFully(in, length - 8); 
+        // TODO: do we want to verify checksum for JN? For now we don't.
+        return txid;
+      } else {
+        FSEditLogOp op = decodeOp();
+        return op == null ? HdfsConstants.INVALID_TXID : op.getTransactionId();
+      }
+    }
+
+    /**
      * Validate a transaction's checksum
      */
     private void validateChecksum(DataInputStream in,

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java Thu Mar 20 23:06:06 2014
@@ -103,13 +103,13 @@ public class FileJournalManager implemen
   }
 
   @Override
-  synchronized public EditLogOutputStream startLogSegment(long txid) 
-      throws IOException {
+  synchronized public EditLogOutputStream startLogSegment(long txid,
+      int layoutVersion) throws IOException {
     try {
       currentInProgress = NNStorage.getInProgressEditsFile(sd, txid);
       EditLogOutputStream stm = new EditLogFileOutputStream(conf,
           currentInProgress, outputBufferCapacity);
-      stm.create();
+      stm.create(layoutVersion);
       return stm;
     } catch (IOException e) {
       LOG.warn("Unable to start log segment " + txid +
@@ -476,6 +476,12 @@ public class FileJournalManager implemen
       this.hasCorruptHeader = val.hasCorruptHeader();
     }
 
+    public void scanLog() throws IOException {
+      EditLogValidation val = EditLogFileInputStream.scanEditLog(file);
+      this.lastTxId = val.getEndTxId();
+      this.hasCorruptHeader = val.hasCorruptHeader();
+    }
+
     public boolean isInProgress() {
       return isInProgress;
     }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java Thu Mar 20 23:06:06 2014
@@ -49,7 +49,8 @@ public interface JournalManager extends 
    * Begin writing to a new segment of the log stream, which starts at
    * the given transaction ID.
    */
-  EditLogOutputStream startLogSegment(long txId) throws IOException;
+  EditLogOutputStream startLogSegment(long txId, int layoutVersion)
+      throws IOException;
 
   /**
    * Mark the log segment that spans from firstTxId to lastTxId

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java Thu Mar 20 23:06:06 2014
@@ -89,10 +89,10 @@ public class JournalSet implements Journ
       this.shared = shared;
     }
 
-    public void startLogSegment(long txId) throws IOException {
+    public void startLogSegment(long txId, int layoutVersion) throws IOException {
       Preconditions.checkState(stream == null);
       disabled = false;
-      stream = journal.startLogSegment(txId);
+      stream = journal.startLogSegment(txId, layoutVersion);
     }
 
     /**
@@ -200,11 +200,12 @@ public class JournalSet implements Journ
 
   
   @Override
-  public EditLogOutputStream startLogSegment(final long txId) throws IOException {
+  public EditLogOutputStream startLogSegment(final long txId,
+      final int layoutVersion) throws IOException {
     mapJournalsAndReportErrors(new JournalClosure() {
       @Override
       public void apply(JournalAndStream jas) throws IOException {
-        jas.startLogSegment(txId);
+        jas.startLogSegment(txId, layoutVersion);
       }
     }, "starting log segment " + txId);
     return new JournalSetOutputStream();
@@ -433,12 +434,12 @@ public class JournalSet implements Journ
     }
 
     @Override
-    public void create() throws IOException {
+    public void create(final int layoutVersion) throws IOException {
       mapJournalsAndReportErrors(new JournalClosure() {
         @Override
         public void apply(JournalAndStream jas) throws IOException {
           if (jas.isActive()) {
-            jas.getCurrentStream().create();
+            jas.getCurrentStream().create(layoutVersion);
           }
         }
       }, "create");

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java Thu Mar 20 23:06:06 2014
@@ -63,7 +63,8 @@ public class NameNodeLayoutVersion { 
    * </ul>
    */
   public static enum Feature implements LayoutFeature {
-    ROLLING_UPGRADE(-55, -53, "Support rolling upgrade", false);
+    ROLLING_UPGRADE(-55, -53, "Support rolling upgrade", false),
+    EDITLOG_LENGTH(-56, "Add length field to every edit log op");
     
     private final FeatureInfo info;
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java Thu Mar 20 23:06:06 2014
@@ -247,8 +247,8 @@ class RedundantEditLogInputStream extend
   }
 
   @Override
-  public int getVersion() throws IOException {
-    return streams[curIdx].getVersion();
+  public int getVersion(boolean verifyVersion) throws IOException {
+    return streams[curIdx].getVersion(verifyVersion);
   }
 
   @Override

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java Thu Mar 20 23:06:06 2014
@@ -25,6 +25,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 
 /**
  * BinaryEditsVisitor implements a binary EditsVisitor
@@ -42,7 +43,7 @@ public class BinaryEditsVisitor implemen
   public BinaryEditsVisitor(String outputName) throws IOException {
     this.elfos = new EditLogFileOutputStream(new Configuration(),
       new File(outputName), 0);
-    elfos.create();
+    elfos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
   }
 
   /**

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java Thu Mar 20 23:06:06 2014
@@ -61,7 +61,7 @@ class OfflineEditsBinaryLoader implement
   @Override
   public void loadEdits() throws IOException {
     try {
-      visitor.start(inputStream.getVersion());
+      visitor.start(inputStream.getVersion(true));
       while (true) {
         try {
           FSEditLogOp op = inputStream.readOp();

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto Thu Mar 20 23:06:06 2014
@@ -94,6 +94,7 @@ message HeartbeatResponseProto { // void
 message StartLogSegmentRequestProto {
   required RequestInfoProto reqInfo = 1;
   required uint64 txid = 2; // Transaction ID
+  optional sint32 layoutVersion = 3; // the LayoutVersion in the client
 }
 
 message StartLogSegmentResponseProto { 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java Thu Mar 20 23:06:06 2014
@@ -36,6 +36,8 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
+import org.apache.hadoop.hdfs.server.namenode.TestEditLog;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.IOUtils;
@@ -59,11 +61,28 @@ public abstract class QJMTestUtil {
     
     return Arrays.copyOf(buf.getData(), buf.getLength());
   }
-  
+
+  /**
+   * Generate byte array representing a set of GarbageMkdirOp
+   */
+  public static byte[] createGabageTxns(long startTxId, int numTxns)
+      throws IOException {
+    DataOutputBuffer buf = new DataOutputBuffer();
+    FSEditLogOp.Writer writer = new FSEditLogOp.Writer(buf);
+
+    for (long txid = startTxId; txid < startTxId + numTxns; txid++) {
+      FSEditLogOp op = new TestEditLog.GarbageMkdirOp();
+      op.setTransactionId(txid);
+      writer.writeOp(op);
+    }
+    return Arrays.copyOf(buf.getData(), buf.getLength());
+  }
+
   public static EditLogOutputStream writeSegment(MiniJournalCluster cluster,
       QuorumJournalManager qjm, long startTxId, int numTxns,
       boolean finalize) throws IOException {
-    EditLogOutputStream stm = qjm.startLogSegment(startTxId);
+    EditLogOutputStream stm = qjm.startLogSegment(startTxId,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     // Should create in-progress
     assertExistsInQuorum(cluster,
         NNStorage.getInProgressEditsFileName(startTxId));

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java Thu Mar 20 23:06:06 2014
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.qjournal.c
 import org.apache.hadoop.hdfs.qjournal.client.LoggerTooFarBehindException;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
 import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
@@ -172,7 +173,7 @@ public class TestIPCLoggerChannel {
         Mockito.<RequestInfo>any());
     
     // After a roll, sending new edits should not fail.
-    ch.startLogSegment(3L).get();
+    ch.startLogSegment(3L, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
     assertFalse(ch.isOutOfSync());
 
     ch.sendEdits(3L, 3L, 1, FAKE_DATA).get();

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java Thu Mar 20 23:06:06 2014
@@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.qjournal.p
 import org.apache.hadoop.hdfs.qjournal.server.JournalFaultInjector;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.util.Holder;
 import org.apache.hadoop.io.IOUtils;
@@ -287,7 +288,8 @@ public class TestQJMWithFaults {
     long firstTxId = txid;
     long lastAcked = txid - 1;
     try {
-      EditLogOutputStream stm = qjm.startLogSegment(txid);
+      EditLogOutputStream stm = qjm.startLogSegment(txid,
+          NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       
       for (int i = 0; i < numTxns; i++) {
         QJMTestUtil.writeTxns(stm, txid++, 1);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java?rev=1579813&r1=1579812&r2=1579813&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java Thu Mar 20 23:06:06 2014
@@ -17,13 +17,17 @@
  */
 package org.apache.hadoop.hdfs.qjournal.client;
 
-import static org.junit.Assert.*;
-import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.JID;
 import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.FAKE_NSINFO;
+import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.JID;
+import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.verifyEdits;
 import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.writeSegment;
 import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.writeTxns;
-import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.verifyEdits;
 import static org.apache.hadoop.hdfs.qjournal.client.TestQuorumJournalManagerUnit.futureThrows;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.Closeable;
 import java.io.File;
@@ -49,6 +53,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.FileJournalManager;
 import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
@@ -259,7 +264,8 @@ public class TestQuorumJournalManager {
     writeSegment(cluster, qjm, 1, 3, true);
     waitForAllPendingCalls(qjm.getLoggerSetForTests());
     
-    EditLogOutputStream stm = qjm.startLogSegment(4);
+    EditLogOutputStream stm = qjm.startLogSegment(4,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     try {
       waitForAllPendingCalls(qjm.getLoggerSetForTests());
     } finally {
@@ -306,7 +312,8 @@ public class TestQuorumJournalManager {
     cluster.getJournalNode(nodeMissingSegment).stopAndJoin(0);
     
     // Open segment on 2/3 nodes
-    EditLogOutputStream stm = qjm.startLogSegment(4);
+    EditLogOutputStream stm = qjm.startLogSegment(4,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     try {
       waitForAllPendingCalls(qjm.getLoggerSetForTests());
       
@@ -456,13 +463,15 @@ public class TestQuorumJournalManager {
     futureThrows(new IOException("injected")).when(spies.get(0))
       .finalizeLogSegment(Mockito.eq(1L), Mockito.eq(3L));
     futureThrows(new IOException("injected")).when(spies.get(0))
-      .startLogSegment(Mockito.eq(4L));
+        .startLogSegment(Mockito.eq(4L),
+            Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
     
     // Logger 1: fail at txn id 4
     failLoggerAtTxn(spies.get(1), 4L);
     
     writeSegment(cluster, qjm, 1, 3, true);
-    EditLogOutputStream stm = qjm.startLogSegment(4);
+    EditLogOutputStream stm = qjm.startLogSegment(4,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     try {
       writeTxns(stm, 4, 1);
       fail("Did not fail to write");
@@ -544,7 +553,8 @@ public class TestQuorumJournalManager {
    * None of the loggers have any associated paxos info.
    */
   private void setupLoggers345() throws Exception {
-    EditLogOutputStream stm = qjm.startLogSegment(1);
+    EditLogOutputStream stm = qjm.startLogSegment(1,
+        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     
     failLoggerAtTxn(spies.get(0), 4);
     failLoggerAtTxn(spies.get(1), 5);



Mime
View raw message