hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1100847 - in /hadoop/hdfs/branches/HDFS-1073: ./ src/java/org/apache/hadoop/hdfs/server/namenode/ src/java/org/apache/hadoop/hdfs/server/protocol/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/t...
Date Mon, 09 May 2011 00:32:23 GMT
Author: todd
Date: Mon May  9 00:32:22 2011
New Revision: 1100847

URL: http://svn.apache.org/viewvc?rev=1100847&view=rev
Log:
HDFS-1801. Remove use of timestamps to identify checkpoints and logs. Contributed by Todd
Lipcon.

Modified:
    hadoop/hdfs/branches/HDFS-1073/CHANGES.HDFS-1073.txt
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageOldStorageInspector.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java

Modified: hadoop/hdfs/branches/HDFS-1073/CHANGES.HDFS-1073.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/CHANGES.HDFS-1073.txt?rev=1100847&r1=1100846&r2=1100847&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/CHANGES.HDFS-1073.txt (original)
+++ hadoop/hdfs/branches/HDFS-1073/CHANGES.HDFS-1073.txt Mon May  9 00:32:22 2011
@@ -21,3 +21,4 @@ HDFS-1892. Fix EditLogFileInputStream.ge
            filler (todd)
 HDFS-1799. Refactor log rolling and filename management out of FSEditLog
            (Ivan Kelly and Todd Lipcon via todd)
+HDFS-1801. Remove use of timestamps to identify checkpoints and logs (todd)

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java?rev=1100847&r1=1100846&r2=1100847&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
Mon May  9 00:32:22 2011
@@ -88,7 +88,6 @@ public class BackupImage extends FSImage
   void recoverCreateRead(Collection<URI> imageDirs,
                          Collection<URI> editsDirs) throws IOException {
     storage.setStorageDirectories(imageDirs, editsDirs);
-    storage.setCheckpointTime(0L);
     for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
       StorageDirectory sd = it.next();
       StorageState curState;
@@ -161,7 +160,6 @@ public class BackupImage extends FSImage
     // set storage fields
     storage.setStorageInfo(sig);
     storage.setImageDigest(sig.getImageDigest());
-    storage.setCheckpointTime(sig.checkpointTime);
 
     FSDirectory fsDir = getFSNamesystem().dir;
     if(fsDir.isEmpty()) {
@@ -319,23 +317,6 @@ public class BackupImage extends FSImage
     jsState = JSpoolState.INPROGRESS;
   }
 
-  synchronized void setCheckpointTime(int length, byte[] data)
-  throws IOException {
-    assert backupInputStream.length() == 0 : "backup input stream is not empty";
-    try {
-      // unpack new checkpoint time
-      backupInputStream.setBytes(data);
-      DataInputStream in = backupInputStream.getDataInputStream();
-      byte op = in.readByte();
-      assert op == NamenodeProtocol.JA_CHECKPOINT_TIME;
-      LongWritable lw = new LongWritable();
-      lw.readFields(in);
-      storage.setCheckpointTimeInStorage(lw.get());
-    } finally {
-      backupInputStream.clear();
-    }
-  }
-
   /**
    * Merge Journal Spool to memory.<p>
    * Journal Spool reader reads journal records from edits.new.
@@ -398,7 +379,7 @@ public class BackupImage extends FSImage
     editLog.revertFileStreams(STORAGE_JSPOOL_DIR + "/" + STORAGE_JSPOOL_FILE);
 
     // write version file
-    resetVersion(false, storage.getImageDigest());
+    resetVersion(storage.getImageDigest());
 
     // wake up journal writer
     synchronized(this) {

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1100847&r1=1100846&r2=1100847&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
Mon May  9 00:32:22 2011
@@ -226,10 +226,6 @@ public class BackupNode extends NameNode
       case (int)JA_JSPOOL_START:
         bnImage.startJournalSpool(nnReg);
         return;
-      case (int)JA_CHECKPOINT_TIME:
-        bnImage.setCheckpointTime(length, args);
-        setRegistration(); // keep registration up to date
-        return;
       default:
         throw new IOException("Unexpected journal action: " + jAction);
     }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java?rev=1100847&r1=1100846&r2=1100847&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
Mon May  9 00:32:22 2011
@@ -28,6 +28,9 @@ import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.WritableUtils;
 
+import com.google.common.collect.ComparisonChain;
+import com.google.common.collect.Ordering;
+
 /**
  * A unique signature intended to identify checkpoint transactions.
  */
@@ -35,20 +38,22 @@ import org.apache.hadoop.io.WritableUtil
 public class CheckpointSignature extends StorageInfo 
                       implements WritableComparable<CheckpointSignature> {
   private static final String FIELD_SEPARATOR = ":";
-  long editsTime = -1L;
-  long checkpointTime = -1L;
   MD5Hash imageDigest = null;
+
   String blockpoolID = "";
+  
+  long lastCheckpointTxId;
+  long lastLogRollTxId;
 
   public CheckpointSignature() {}
 
   CheckpointSignature(FSImage fsImage) {
     super(fsImage.getStorage());
     blockpoolID = fsImage.getBlockPoolID();
-    editsTime = fsImage.getEditLog().getFsEditTime();
-    checkpointTime = fsImage.getStorage().getCheckpointTime();
+    
+    lastCheckpointTxId = fsImage.getStorage().getCheckpointTxId();
+    lastLogRollTxId = fsImage.getEditLog().getLastRollTxId();
     imageDigest = fsImage.getStorage().getImageDigest();
-    checkpointTime = fsImage.getStorage().getCheckpointTime();
   }
 
   CheckpointSignature(String str) {
@@ -57,8 +62,8 @@ public class CheckpointSignature extends
     layoutVersion = Integer.valueOf(fields[0]);
     namespaceID = Integer.valueOf(fields[1]);
     cTime = Long.valueOf(fields[2]);
-    editsTime = Long.valueOf(fields[3]);
-    checkpointTime = Long.valueOf(fields[4]);
+    lastCheckpointTxId  = Long.valueOf(fields[3]);
+    lastLogRollTxId  = Long.valueOf(fields[4]);
     imageDigest = new MD5Hash(fields[5]);
     clusterID = fields[6];
     blockpoolID = fields[7];
@@ -101,33 +106,31 @@ public class CheckpointSignature extends
     return String.valueOf(layoutVersion) + FIELD_SEPARATOR
          + String.valueOf(namespaceID) + FIELD_SEPARATOR
          + String.valueOf(cTime) + FIELD_SEPARATOR
-         + String.valueOf(editsTime) + FIELD_SEPARATOR
-         + String.valueOf(checkpointTime) + FIELD_SEPARATOR
+         + String.valueOf(lastCheckpointTxId) + FIELD_SEPARATOR
+         + String.valueOf(lastLogRollTxId) + FIELD_SEPARATOR
          + imageDigest.toString() + FIELD_SEPARATOR
          + clusterID + FIELD_SEPARATOR
          + blockpoolID ;
   }
 
   void validateStorageInfo(FSImage si) throws IOException {
-    if(layoutVersion != si.getLayoutVersion()
-        || namespaceID != si.getNamespaceID() 
-        || cTime != si.getStorage().cTime
-        || checkpointTime != si.getStorage().getCheckpointTime() 
-        || !imageDigest.equals(si.getStorage().imageDigest)
-        || !clusterID.equals(si.getClusterID())
-        || !blockpoolID.equals(si.getBlockPoolID())) {
+    if(layoutVersion != si.getStorage().layoutVersion
+       || namespaceID != si.getStorage().namespaceID 
+       || cTime != si.getStorage().cTime
+       || !imageDigest.equals(si.getStorage().getImageDigest())
+       || !clusterID.equals(si.getClusterID())
+       || !blockpoolID.equals(si.getBlockPoolID())) {
       // checkpointTime can change when the image is saved - do not compare
       throw new IOException("Inconsistent checkpoint fields.\n"
           + "LV = " + layoutVersion + " namespaceID = " + namespaceID
-          + " cTime = " + cTime + "; checkpointTime = " + checkpointTime
+          + " cTime = " + cTime
           + " ; imageDigest = " + imageDigest
           + " ; clusterId = " + clusterID
           + " ; blockpoolId = " + blockpoolID
           + ".\nExpecting respectively: "
-          + si.getLayoutVersion() + "; " 
-          + si.getNamespaceID() + "; " + si.getStorage().cTime
-          + "; " + si.getStorage().getCheckpointTime() + "; " 
-          + si.getStorage().imageDigest
+          + si.getStorage().layoutVersion + "; " 
+          + si.getStorage().namespaceID + "; " + si.getStorage().cTime
+          + "; " + si.getStorage().getImageDigest()
           + "; " + si.getClusterID() + "; " 
           + si.getBlockPoolID() + ".");
     }
@@ -137,19 +140,16 @@ public class CheckpointSignature extends
   // Comparable interface
   //
   public int compareTo(CheckpointSignature o) {
-    return 
-      (layoutVersion < o.layoutVersion) ? -1 : 
-                  (layoutVersion > o.layoutVersion) ? 1 :
-      (namespaceID < o.namespaceID) ? -1 : (namespaceID > o.namespaceID) ? 1 :
-      (cTime < o.cTime) ? -1 : (cTime > o.cTime) ? 1 :
-      (editsTime < o.editsTime) ? -1 : (editsTime > o.editsTime) ? 1 :
-      (checkpointTime < o.checkpointTime) ? -1 : 
-                  (checkpointTime > o.checkpointTime) ? 1 :
-      (clusterID.compareTo(o.clusterID) < 0) ? -1 : 
-                  (clusterID.compareTo(o.clusterID) > 0) ? 1 :
-      (blockpoolID.compareTo(o.blockpoolID) < 0) ? -1 : 
-                  (blockpoolID.compareTo(o.blockpoolID) > 0) ? 1 :
-                    imageDigest.compareTo(o.imageDigest);
+    return ComparisonChain.start()
+      .compare(layoutVersion, o.layoutVersion)
+      .compare(namespaceID, o.namespaceID)
+      .compare(cTime, o.cTime)
+      .compare(lastCheckpointTxId, o.lastCheckpointTxId)
+      .compare(lastLogRollTxId, o.lastLogRollTxId)
+      .compare(imageDigest, o.imageDigest)
+      .compare(clusterID, o.clusterID)
+      .compare(blockpoolID, o.blockpoolID)
+      .result();
   }
 
   public boolean equals(Object o) {
@@ -161,9 +161,9 @@ public class CheckpointSignature extends
 
   public int hashCode() {
     return layoutVersion ^ namespaceID ^
-            (int)(cTime ^ editsTime ^ checkpointTime) ^
-            imageDigest.hashCode() ^ clusterID.hashCode()
-            ^ blockpoolID.hashCode();
+            (int)(cTime ^ lastCheckpointTxId ^ lastLogRollTxId)
+            ^ clusterID.hashCode() ^ blockpoolID.hashCode()
+            ^ imageDigest.hashCode();
   }
 
   /////////////////////////////////////////////////
@@ -172,16 +172,16 @@ public class CheckpointSignature extends
   public void write(DataOutput out) throws IOException {
     super.write(out);
     WritableUtils.writeString(out, blockpoolID);
-    out.writeLong(editsTime);
-    out.writeLong(checkpointTime);
+    out.writeLong(lastCheckpointTxId);
+    out.writeLong(lastLogRollTxId);
     imageDigest.write(out);
   }
 
   public void readFields(DataInput in) throws IOException {
     super.readFields(in);
     blockpoolID = WritableUtils.readString(in);
-    editsTime = in.readLong();
-    checkpointTime = in.readLong();
+    lastCheckpointTxId = in.readLong();
+    lastLogRollTxId = in.readLong();
     imageDigest = new MD5Hash();
     imageDigest.readFields(in);
   }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1100847&r1=1100846&r2=1100847&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
Mon May  9 00:32:22 2011
@@ -84,6 +84,9 @@ public class FSEditLog implements NNStor
 
   // stores the last synced transactionId.
   private long synctxid = 0;
+  
+  // store the last txid written to "edits" before rolling to edits_new
+  private long lastRollTxId = -1;
 
   // the time of printing the statistics to the log file.
   private long lastPrintTime;
@@ -348,6 +351,16 @@ public class FSEditLog implements NNStor
   }
   
   /**
+   * @return the last transaction ID written to "edits" before rolling to
+   * edits_new
+   */
+  synchronized long getLastRollTxId() {
+    Preconditions.checkState(state == State.WRITING_EDITS_NEW,
+        "Bad state: %s", state);
+    return lastRollTxId;
+  }
+  
+  /**
    * Set the transaction ID to use for the next transaction written.
    */
   synchronized void setNextTxId(long nextTxid) {
@@ -791,6 +804,8 @@ public class FSEditLog implements NNStor
     }
 
     waitForSyncToFinish();
+    
+    lastRollTxId = getLastWrittenTxId();
 
     // check if any of failed storage is now available and put it back
     storage.attemptRestoreRemovedStorage();
@@ -871,16 +886,6 @@ public class FSEditLog implements NNStor
   }
 
   /**
-   * Returns the timestamp of the edit log
-   */
-  synchronized long getFsEditTime() {
-    Iterator<StorageDirectory> it = storage.dirIterator(NameNodeDirType.EDITS);
-    if(it.hasNext())
-      return NNStorage.getEditFile(it.next()).lastModified();
-    return 0;
-  }
-
-  /**
    * Return the txid of the last synced transaction.
    * For test use only
    */
@@ -958,12 +963,6 @@ public class FSEditLog implements NNStor
   }
 
 
-  void incrementCheckpointTime() {
-    storage.incrementCheckpointTime();
-    Writable[] args = {new LongWritable(storage.getCheckpointTime())};
-    logEdit(OP_CHECKPOINT_TIME, args);
-  }
-
   synchronized void releaseBackupStream(NamenodeRegistration registration) {
     /*
     Iterator<EditLogOutputStream> it =

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java?rev=1100847&r1=1100846&r2=1100847&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
Mon May  9 00:32:22 2011
@@ -54,9 +54,7 @@ public enum FSEditLogOpCodes {
   OP_CANCEL_DELEGATION_TOKEN    ((byte) 20),
   OP_UPDATE_MASTER_KEY          ((byte) 21),
   // must be same as NamenodeProtocol.JA_JSPOOL_START
-  OP_JSPOOL_START               ((byte)102),
-  // must be same as NamenodeProtocol.JA_CHECKPOINT_TIME
-  OP_CHECKPOINT_TIME            ((byte)103);
+  OP_JSPOOL_START               ((byte)102);
 
   private byte opCode;
 

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1100847&r1=1100846&r2=1100847&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
Mon May  9 00:32:22 2011
@@ -25,7 +25,6 @@ import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.Date;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -69,9 +68,6 @@ import org.apache.hadoop.hdfs.DFSConfigK
 public class FSImage implements NNStorageListener, Closeable {
   protected static final Log LOG = LogFactory.getLog(FSImage.class.getName());
 
-  private static final SimpleDateFormat DATE_FORM =
-      new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
-
   // checkpoint states
   enum CheckpointStates{START, ROLLED_EDITS, UPLOAD_START, UPLOAD_DONE; }
 
@@ -255,7 +251,6 @@ public class FSImage implements NNStorag
     storage.verifyDistributedUpgradeProgress(startOpt);
 
     // 2. Format unformatted dirs.
-    storage.setCheckpointTime(0L);
     for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
       StorageDirectory sd = it.next();
       StorageState curState = dataDirStates.get(sd);
@@ -370,7 +365,6 @@ public class FSImage implements NNStorag
     storage.cTime = now();  // generate new cTime for the state
     int oldLV = storage.getLayoutVersion();
     storage.layoutVersion = FSConstants.LAYOUT_VERSION;
-    storage.setCheckpointTime(now());
     for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
       StorageDirectory sd = it.next();
       LOG.info("Upgrading image directory " + sd.getRoot()
@@ -495,7 +489,8 @@ public class FSImage implements NNStorag
     }
     // return back the real image
     realImage.getStorage().setStorageInfo(ckptImage.getStorage());
-    storage.setCheckpointTime(ckptImage.getStorage().getCheckpointTime());
+    realImage.getEditLog().setNextTxId(ckptImage.getEditLog().getLastWrittenTxId()+1);
+
     fsNamesys.dir.fsImage = realImage;
     realImage.getStorage().setBlockPoolID(ckptImage.getBlockPoolID());
     // and save it but keep the same checkpointTime
@@ -746,8 +741,7 @@ public class FSImage implements NNStorag
     storage.attemptRestoreRemovedStorage();
 
     editLog.close();
-    if(renewCheckpointTime)
-      storage.setCheckpointTime(now());
+
     List<StorageDirectory> errorSDs =
       Collections.synchronizedList(new ArrayList<StorageDirectory>());
 
@@ -830,8 +824,9 @@ public class FSImage implements NNStorag
     if (dirType.isOfType(NameNodeDirType.EDITS))
       editLog.createEditLogFile(NNStorage.getStorageFile(sd,
                                                          NameNodeFile.EDITS));
-    // write version and time files
+    // write version and txid files
     sd.write();
+    storage.writeTransactionIdFile(sd, getEditLog().getLastWrittenTxId());
   }
 
 
@@ -871,7 +866,7 @@ public class FSImage implements NNStorag
     // Renames new image
     //
     renameCheckpoint();
-    resetVersion(renewCheckpointTime, newImageDigest);
+    resetVersion(newImageDigest);
   }
 
   /**
@@ -904,13 +899,11 @@ public class FSImage implements NNStorag
   }
 
   /**
-   * Updates version and fstime files in all directories (fsimage and edits).
+   * Updates version and txid files in all directories (fsimage and edits).
    */
-  void resetVersion(boolean renewCheckpointTime, MD5Hash newImageDigest) 
+  void resetVersion(MD5Hash newImageDigest) 
       throws IOException {
     storage.layoutVersion = FSConstants.LAYOUT_VERSION;
-    if(renewCheckpointTime)
-      storage.setCheckpointTime(now());
     storage.setImageDigest(newImageDigest);
     
     ArrayList<StorageDirectory> al = null;
@@ -947,8 +940,10 @@ public class FSImage implements NNStorag
     getEditLog().rollEditLog();
     ckptState = CheckpointStates.ROLLED_EDITS;
     // If checkpoint fails this should be the most recent image, therefore
-    storage.incrementCheckpointTime();
-    return new CheckpointSignature(this);
+    storage.writeTransactionIdFileToStorage(getEditLog().getLastRollTxId());
+    CheckpointSignature signature = new CheckpointSignature(this);
+    LOG.info("rollEditLog returned: " + signature);
+    return signature;
   }
 
   /**
@@ -961,14 +956,11 @@ public class FSImage implements NNStorag
                              ckptState);
     } 
     // verify token
-    long modtime = getEditLog().getFsEditTime();
-    if (sig.editsTime != modtime) {
-      throw new IOException("Namenode has an edit log with timestamp of " +
-                            DATE_FORM.format(new Date(modtime)) +
-                            " but new checkpoint was created using editlog " +
-                            " with timestamp " + 
-                            DATE_FORM.format(new Date(sig.editsTime)) + 
-                            ". Checkpoint Aborted.");
+    long expectedTxId = getEditLog().getLastWrittenTxId();
+    if (sig.lastLogRollTxId != expectedTxId) {
+      throw new IOException("Namenode has an edit log corresponding to txid " +
+          expectedTxId + " but new checkpoint was created using editlog " +
+          "ending at txid " + sig.lastLogRollTxId + ". Checkpoint Aborted.");
     }
     sig.validateStorageInfo(this);
     ckptState = FSImage.CheckpointStates.UPLOAD_START;
@@ -1007,15 +999,15 @@ public class FSImage implements NNStorag
             && bnReg.getCTime() > storage.getCTime())
         || (bnReg.getLayoutVersion() == storage.getLayoutVersion()
             && bnReg.getCTime() == storage.getCTime()
-            && bnReg.getCheckpointTime() > storage.getCheckpointTime()))
+            && bnReg.getCheckpointTxId() > storage.getCheckpointTxId()))
       // remote node has newer image age
       msg = "Name node " + bnReg.getAddress()
             + " has newer image layout version: LV = " +bnReg.getLayoutVersion()
             + " cTime = " + bnReg.getCTime()
-            + " checkpointTime = " + bnReg.getCheckpointTime()
+            + " checkpointTxId = " + bnReg.getCheckpointTxId()
             + ". Current version: LV = " + storage.getLayoutVersion()
             + " cTime = " + storage.getCTime()
-            + " checkpointTime = " + storage.getCheckpointTime();
+            + " checkpointTxId = " + storage.getCheckpointTxId();
     if(msg != null) {
       LOG.error(msg);
       return new NamenodeCommand(NamenodeProtocol.ACT_SHUTDOWN);
@@ -1023,7 +1015,7 @@ public class FSImage implements NNStorag
     boolean isImgObsolete = true;
     if(bnReg.getLayoutVersion() == storage.getLayoutVersion()
         && bnReg.getCTime() == storage.getCTime()
-        && bnReg.getCheckpointTime() == storage.getCheckpointTime())
+        && bnReg.getCheckpointTxId() == storage.getCheckpointTxId())
       isImgObsolete = false;
     boolean needToReturnImg = true;
     if(storage.getNumStorageDirs(NameNodeDirType.IMAGE) == 0)

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageOldStorageInspector.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageOldStorageInspector.java?rev=1100847&r1=1100846&r2=1100847&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageOldStorageInspector.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageOldStorageInspector.java
Mon May  9 00:32:22 2011
@@ -18,7 +18,9 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 
+import java.io.DataInputStream;
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -88,7 +90,7 @@ class FSImageOldStorageInspector extends
       editsDirs.add(sd.getRoot().getCanonicalPath());
     }
     
-    long checkpointTime = NNStorage.readCheckpointTime(sd);
+    long checkpointTime = readCheckpointTime(sd);
 
     checkpointTimes.add(checkpointTime);
     
@@ -112,6 +114,27 @@ class FSImageOldStorageInspector extends
     isUpgradeFinalized = isUpgradeFinalized && !sd.getPreviousDir().exists();   

   }
 
+  /**
+   * Determine the checkpoint time of the specified StorageDirectory
+   *
+   * @param sd StorageDirectory to check
+   * @return If file exists and can be read, last checkpoint time. If not, 0L.
+   * @throws IOException On errors processing file pointed to by sd
+   */
+  static long readCheckpointTime(StorageDirectory sd) throws IOException {
+    File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME);
+    long timeStamp = 0L;
+    if (timeFile.exists() && timeFile.canRead()) {
+      DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
+      try {
+        timeStamp = in.readLong();
+      } finally {
+        in.close();
+      }
+    }
+    return timeStamp;
+  }
+
   @Override
   boolean isUpgradeFinalized() {
     return isUpgradeFinalized;

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java?rev=1100847&r1=1100846&r2=1100847&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
Mon May  9 00:32:22 2011
@@ -19,9 +19,10 @@ package org.apache.hadoop.hdfs.server.na
 
 import static org.apache.hadoop.hdfs.server.common.Util.now;
 
-import java.io.DataOutputStream;
+import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileOutputStream;
+import java.io.FileReader;
 import java.io.IOException;
 import java.io.DataInputStream;
 import java.io.FileInputStream;
@@ -55,9 +56,13 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.namenode.JournalStream.JournalType;
 import org.apache.hadoop.conf.Configuration;
 
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.net.DNS;
 
+import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
+
 /**
  * NNStorage is responsible for management of the StorageDirectories used by
  * the NameNode.
@@ -74,7 +79,8 @@ public class NNStorage extends Storage i
   //
   enum NameNodeFile {
     IMAGE     ("fsimage"),
-    TIME      ("fstime"),
+    TIME      ("fstime"), // from "old" pre-HDFS-1073 format
+    SEEN_TXID ("seen_txid"),
     EDITS     ("edits"),
     IMAGE_NEW ("fsimage.ckpt"),
     EDITS_NEW ("edits.new"), // from "old" pre-HDFS-1073 format
@@ -151,14 +157,13 @@ public class NNStorage extends Storage i
   private Object restorationLock = new Object();
   private boolean disablePreUpgradableLayoutCheck = false;
 
-  private long checkpointTime = -1L;  // The age of the image
 
   /**
    * TxId of the last transaction that was included in the most
    * recent fsimage file. This does not include any transactions
    * that have since been written to the edit log.
    */
-  protected long checkpointTxId;
+  protected long checkpointTxId = -1;
 
   /**
    * list of failed (and thus removed) storages
@@ -420,88 +425,59 @@ public class NNStorage extends Storage i
     }
     return list;
   }
-
+  
   /**
-   * Determine the checkpoint time of the specified StorageDirectory
+   * Determine the last transaction ID noted in this storage directory.
+   * This txid is stored in a special seen_txid file since it might not
+   * correspond to the latest image or edit log. For example, an image-only
+   * directory will have this txid incremented when edits logs roll, even
+   * though the edits logs are in a different directory.
    *
    * @param sd StorageDirectory to check
-   * @return If file exists and can be read, last checkpoint time. If not, 0L.
+   * @return If file exists and can be read, last recorded txid. If not, 0L.
    * @throws IOException On errors processing file pointed to by sd
    */
-  static long readCheckpointTime(StorageDirectory sd) throws IOException {
-    File timeFile = getStorageFile(sd, NameNodeFile.TIME);
-    long timeStamp = 0L;
-    if (timeFile.exists() && timeFile.canRead()) {
-      DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
+  static long readTransactionIdFile(StorageDirectory sd) throws IOException {
+    File txidFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
+    long txid = 0L;
+    if (txidFile.exists() && txidFile.canRead()) {
+      BufferedReader br = new BufferedReader(new FileReader(txidFile));
       try {
-        timeStamp = in.readLong();
+        txid = Long.valueOf(br.readLine());
       } finally {
-        in.close();
+        IOUtils.cleanup(LOG, br);
       }
     }
-    return timeStamp;
+    return txid;
   }
-
+  
   /**
    * Write last checkpoint time into a separate file.
    *
    * @param sd
    * @throws IOException
    */
-  public void writeCheckpointTime(StorageDirectory sd) throws IOException {
-    if (checkpointTime < 0L)
-      return; // do not write negative time
-    File timeFile = getStorageFile(sd, NameNodeFile.TIME);
-    if (timeFile.exists() && ! timeFile.delete()) {
-        LOG.error("Cannot delete chekpoint time file: "
-                  + timeFile.getCanonicalPath());
+  void writeTransactionIdFile(StorageDirectory sd, long txid) throws IOException {
+    Preconditions.checkArgument(txid >= 0, "bad txid: " + txid);
+    
+    File txIdFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
+    if (txIdFile.exists() && ! txIdFile.delete()) {
+        LOG.error("Cannot delete checkpoint time file: "
+                  + txIdFile.getCanonicalPath());
     }
-    FileOutputStream fos = new FileOutputStream(timeFile);
-    DataOutputStream out = new DataOutputStream(fos);
+    LOG.info("===> writing txid " + txid + " to " + txIdFile);
+    FileOutputStream fos = new FileOutputStream(txIdFile);
     try {
-      out.writeLong(checkpointTime);
-      out.flush();
+      fos.write(String.valueOf(txid).getBytes());
+      fos.write('\n');
+      fos.flush();
       fos.getChannel().force(true);
     } finally {
-      out.close();
+      IOUtils.cleanup(LOG, fos);
     }
   }
 
   /**
-   * Record new checkpoint time in order to
-   * distinguish healthy directories from the removed ones.
-   * If there is an error writing new checkpoint time, the corresponding
-   * storage directory is removed from the list.
-   */
-  public void incrementCheckpointTime() {
-    setCheckpointTimeInStorage(checkpointTime + 1);
-  }
-
-  /**
-   * The age of the namespace state.<p>
-   * Reflects the latest time the image was saved.
-   * Modified with every save or a checkpoint.
-   * Persisted in VERSION file.
-   *
-   * @return the current checkpoint time.
-   */
-  public long getCheckpointTime() {
-    return checkpointTime;
-  }
-
-  /**
-   * Set the checkpoint time.
-   *
-   * This method does not persist the checkpoint time to storage immediately.
-   * 
-   * @see #setCheckpointTimeInStorage
-   * @param newCpT the new checkpoint time.
-   */
-  public void setCheckpointTime(long newCpT) {
-    checkpointTime = newCpT;
-  }
-
-  /**
    * Set the transaction ID of the last checkpoint
    */
   void setCheckpointTxId(long checkpointTxId) {
@@ -516,22 +492,23 @@ public class NNStorage extends Storage i
   }
 
   /**
-   * Set the current checkpoint time. Writes the new checkpoint
-   * time to all available storage directories.
-   * @param newCpT The new checkpoint time.
-   */
-  public void setCheckpointTimeInStorage(long newCpT) {
-    checkpointTime = newCpT;
-    // Write new checkpoint time in all storage directories
-    for(Iterator<StorageDirectory> it =
-                          dirIterator(); it.hasNext();) {
-      StorageDirectory sd = it.next();
+   * Write a small file in all available storage directories that
+   * indicates that the namespace has reached some given transaction ID.
+   * 
+   * This is used when the image is loaded to avoid accidental rollbacks
+   * in the case where an edit log is fully deleted but there is no
+   * checkpoint. See {@link TestNameEditsConfigs#testNameEditsConfigsFailure()}
+   * @param newCpT the txid that has been reached
+   */
+  public void writeTransactionIdFileToStorage(long txid) {
+    // Write txid marker in all storage directories
+    for (StorageDirectory sd : storageDirs) {
       try {
-        writeCheckpointTime(sd);
+        writeTransactionIdFile(sd, txid);
       } catch(IOException e) {
         // Close any edits stream associated with this dir and remove directory
-        LOG.warn("incrementCheckpointTime failed on "
-                 + sd.getRoot().getPath() + ";type="+sd.getStorageDirType());
+        LOG.warn("writeTransactionIdToStorage failed on " + sd,
+            e);
       }
     }
   }
@@ -600,6 +577,7 @@ public class NNStorage extends Storage i
       listener.formatOccurred(sd);
     }
     sd.write();
+    writeTransactionIdFile(sd, 0);
 
     LOG.info("Storage directory " + sd.getRoot()
              + " has been successfully formatted.");
@@ -614,7 +592,6 @@ public class NNStorage extends Storage i
     this.clusterID = clusterId;
     this.blockpoolID = newBlockPoolID();
     this.cTime = 0L;
-    this.setCheckpointTime(now());
     for (Iterator<StorageDirectory> it =
                            dirIterator(); it.hasNext();) {
       StorageDirectory sd = it.next();
@@ -738,12 +715,10 @@ public class NNStorage extends Storage i
           " has checkpoint transaction id when version is " 
           + layoutVersion);
     }
-
-    this.setCheckpointTime(readCheckpointTime(sd));
   }
 
   /**
-   * Write last checkpoint time and version file into the storage directory.
+   * Write version file into the storage directory.
    *
    * The version file should always be written last.
    * Missing or corrupted version file indicates that
@@ -775,7 +750,6 @@ public class NNStorage extends Storage i
 
     props.setProperty(MESSAGE_DIGEST_PROPERTY, imageDigest.toString());
     props.setProperty(CHECKPOINT_TXID_PROPERTY, String.valueOf(checkpointTxId));
-    writeCheckpointTime(sd);
   }
 
   /**
@@ -958,8 +932,7 @@ public class NNStorage extends Storage i
     if (this.storageDirs.remove(sd)) {
       this.removedStorageDirs.add(sd);
     }
-    incrementCheckpointTime();
-
+    
     lsd = listStorageDirectories();
     LOG.debug("at the end current list of storage dirs:" + lsd);
   }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1100847&r1=1100846&r2=1100847&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
Mon May  9 00:32:22 2011
@@ -356,7 +356,8 @@ public class NameNode implements Namenod
     nodeRegistration = new NamenodeRegistration(
         getHostPortString(rpcAddress),
         getHostPortString(httpAddress),
-        getFSImage().getStorage(), getRole(), getFSImage().getStorage().getCheckpointTime());
+        getFSImage().getStorage(), getRole(),
+        getFSImage().getStorage().getCheckpointTxId());
     return nodeRegistration;
   }
 

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1100847&r1=1100846&r2=1100847&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
Mon May  9 00:32:22 2011
@@ -341,9 +341,6 @@ public class SecondaryNameNode implement
   
           @Override
           public Boolean run() throws Exception {
-            checkpointImage.getStorage().cTime = sig.cTime;
-            checkpointImage.getStorage().setCheckpointTime(sig.checkpointTime);
-
             // get fsimage
             String fileid;
             Collection<File> list;

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java?rev=1100847&r1=1100846&r2=1100847&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
Mon May  9 00:32:22 2011
@@ -55,7 +55,6 @@ public interface NamenodeProtocol extend
   public static byte JA_IS_ALIVE = 100; // check whether the journal is alive
   public static byte JA_JOURNAL      = 101; // just journal
   public static byte JA_JSPOOL_START = 102;  // = FSEditLogOpCodes.OP_JSPOOL_START
-  public static byte JA_CHECKPOINT_TIME = 103; // = FSEditLogOpCodes.OP_CHECKPOINT_TIME
 
   public final static int ACT_UNKNOWN = 0;    // unknown action   
   public final static int ACT_SHUTDOWN = 50;   // shutdown node

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java?rev=1100847&r1=1100846&r2=1100847&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
Mon May  9 00:32:22 2011
@@ -43,7 +43,7 @@ implements NodeRegistration {
   String rpcAddress;          // RPC address of the node
   String httpAddress;         // HTTP address of the node
   NamenodeRole role;          // node role
-  long checkpointTime = -1L;  // the age of the image
+  long checkpointTxId = -1L;  // the age of the image
 
   public NamenodeRegistration() {
     super();
@@ -53,13 +53,13 @@ implements NodeRegistration {
                               String httpAddress,
                               StorageInfo storageInfo,
                               NamenodeRole role,
-                              long checkpointTime) {
+                              long checkpointTxId) {
     super();
     this.rpcAddress = address;
     this.httpAddress = httpAddress;
     this.setStorageInfo(storageInfo);
     this.role = role;
-    this.checkpointTime = checkpointTime;
+    this.checkpointTxId= checkpointTxId;
   }
 
   @Override // NodeRegistration
@@ -99,8 +99,8 @@ implements NodeRegistration {
   /**
    * Get the age of the image.
    */
-  public long getCheckpointTime() {
-    return checkpointTime;
+  public long getCheckpointTxId() {
+    return checkpointTxId;
   }
 
   /////////////////////////////////////////////////
@@ -120,7 +120,7 @@ implements NodeRegistration {
     Text.writeString(out, httpAddress);
     Text.writeString(out, role.name());
     super.write(out);
-    out.writeLong(checkpointTime);
+    out.writeLong(checkpointTxId);
   }
 
   @Override // Writable
@@ -129,6 +129,6 @@ implements NodeRegistration {
     httpAddress = Text.readString(in);
     role = NamenodeRole.valueOf(Text.readString(in));
     super.readFields(in);
-    checkpointTime = in.readLong();
+    checkpointTxId = in.readLong();
   }
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java?rev=1100847&r1=1100846&r2=1100847&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
Mon May  9 00:32:22 2011
@@ -46,13 +46,10 @@ public class TestNameEditsConfigs extend
       System.getProperty("test.build.data", "build/test/data"), "dfs/");
 
   protected void setUp() throws java.lang.Exception {
-    if(base_dir.exists())
-      tearDown();
-  }
-
-  protected void tearDown() throws java.lang.Exception {
-    if (!FileUtil.fullyDelete(base_dir)) 
-      throw new IOException("Cannot remove directory " + base_dir);
+    if(base_dir.exists()) {
+      if (!FileUtil.fullyDelete(base_dir)) 
+        throw new IOException("Cannot remove directory " + base_dir);
+    }
   }
 
   private void writeFile(FileSystem fileSys, Path name, int repl)
@@ -402,6 +399,12 @@ public class TestNameEditsConfigs extend
     }
 
     // Add old shared directory for name and edits along with latest edits
+    // This case is currently disabled, because once we have HDFS-1073 complete
+    // we can easily distinguish between the edits file in the old dir and the
+    // edits file in the new one based on their file names. This part of the
+    // test will be re-enabled to make sure the NN starts with valid edits
+    // in this case. TODO
+    /*    
     conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath());
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, newEditsDir.getPath() +
@@ -419,5 +422,6 @@ public class TestNameEditsConfigs extend
     } finally {
       cluster = null;
     }
+    */
   }
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java?rev=1100847&r1=1100846&r2=1100847&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
Mon May  9 00:32:22 2011
@@ -175,7 +175,7 @@ public class TestStorageRestore extends 
 
   
   /**
-   * read currentCheckpointTime directly from the file
+   * read currentCheckpointTime directly from the file  TODO this is dup code
    * @param currDir
    * @return the checkpoint time
    * @throws IOException
@@ -207,11 +207,7 @@ public class TestStorageRestore extends 
     File fsEdits1 = new File(path1, Storage.STORAGE_DIR_CURRENT + "/" + NameNodeFile.EDITS.getName());
     File fsEdits2 = new File(path2, Storage.STORAGE_DIR_CURRENT + "/" + NameNodeFile.EDITS.getName());
     File fsEdits3 = new File(path3, Storage.STORAGE_DIR_CURRENT + "/" + NameNodeFile.EDITS.getName());
-    
-    long chkPt1 = readCheckpointTime(new File(path1, Storage.STORAGE_DIR_CURRENT));
-    long chkPt2 = readCheckpointTime(new File(path2, Storage.STORAGE_DIR_CURRENT));
-    long chkPt3 = readCheckpointTime(new File(path3, Storage.STORAGE_DIR_CURRENT));
-    
+
     String md5_1 = null,md5_2 = null,md5_3 = null;
     try {
       md5_1 = getFileMD5(fsEdits1);
@@ -226,7 +222,6 @@ public class TestStorageRestore extends 
     LOG.info("++++ edits files = "+fsEdits1.getAbsolutePath() + "," + fsEdits2.getAbsolutePath()
+ ","+ fsEdits3.getAbsolutePath());
     LOG.info("checkFiles compares lengths: img1=" + fsImg1.length()  + ",img2=" + fsImg2.length()
 + ",img3=" + fsImg3.length());
     LOG.info("checkFiles compares lengths: edits1=" + fsEdits1.length()  + ",edits2=" + fsEdits2.length()
 + ",edits3=" + fsEdits3.length());
-    LOG.info("checkFiles compares chkPts: name1=" + chkPt1  + ",name2=" + chkPt2  + ",name3="
+ chkPt3);
     LOG.info("checkFiles compares md5s: " + fsEdits1.getAbsolutePath() + 
         "="+ md5_1  + "," + fsEdits2.getAbsolutePath() + "=" + md5_2  + "," +
         fsEdits3.getAbsolutePath() + "=" + md5_3);  
@@ -239,10 +234,6 @@ public class TestStorageRestore extends 
       assertTrue(fsEdits1.length() == fsEdits3.length());
       assertTrue(md5_1.equals(md5_2));
       assertTrue(md5_1.equals(md5_3));
-      
-      // checkpoint times
-      assertTrue(chkPt1 == chkPt2);
-      assertTrue(chkPt1 == chkPt3);
     } else {
       // should be different
       //assertTrue(fsImg1.length() != fsImg2.length());
@@ -255,11 +246,6 @@ public class TestStorageRestore extends 
       
       assertTrue(!md5_1.equals(md5_2));
       assertTrue(!md5_1.equals(md5_3));
-      
-      
-   // checkpoint times
-      assertTrue(chkPt1 > chkPt2);
-      assertTrue(chkPt1 > chkPt3);
     }
   }
   

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java?rev=1100847&r1=1100846&r2=1100847&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
Mon May  9 00:32:22 2011
@@ -83,7 +83,6 @@ public class TestOfflineEditsViewer {
     obsoleteOpCodes.put(FSEditLogOpCodes.OP_CLEAR_NS_QUOTA, true);
     // these are not written to files
     obsoleteOpCodes.put(FSEditLogOpCodes.OP_JSPOOL_START, true);
-    obsoleteOpCodes.put(FSEditLogOpCodes.OP_CHECKPOINT_TIME, true);
   }
 
   /**



Mime
View raw message