hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1073857 - in /hadoop/hdfs/branches/HDFS-1052: ./ src/java/org/apache/hadoop/hdfs/server/common/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/java/org/apache/hadoop/hdfs/server/protoc...
Date Wed, 23 Feb 2011 18:06:11 GMT
Author: suresh
Date: Wed Feb 23 18:06:11 2011
New Revision: 1073857

URL: http://svn.apache.org/viewvc?rev=1073857&view=rev
Log:
HDFS-1632. Federation: data node storage structure changes and introduce block pool storage. Contributed by Tanping Wang.


Added:
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolStorage.java
Modified:
    hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestStorageInfo.java

Modified: hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/CHANGES.txt?rev=1073857&r1=1073856&r2=1073857&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-1052/CHANGES.txt Wed Feb 23 18:06:11 2011
@@ -19,6 +19,9 @@ Trunk (unreleased changes)
     HDFS-1450. Federation: Introduce block pool ID into FSDatasetInterface.
     (suresh)
 
+    HDFS-1632. Federation: data node storage structure changes and
+    introduce block pool storage. (tanping via suresh)
+
   IMPROVEMENTS
 
     HDFS-1510. Added test-patch.properties required by test-patch.sh (nigel)

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java?rev=1073857&r1=1073856&r2=1073857&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java Wed Feb 23 18:06:11 2011
@@ -78,10 +78,13 @@ public abstract class Storage extends St
   // last layout version that did not support persistent rbw replicas
   public static final int PRE_RBW_LAYOUT_VERSION = -19;
   
+  // last layout version that is before federation
+  public static final int LAST_PRE_FEDERATION_LAYOUT_VERSION = -24;
+  
   private   static final String STORAGE_FILE_LOCK     = "in_use.lock";
   protected static final String STORAGE_FILE_VERSION  = "VERSION";
-  public static final String STORAGE_DIR_CURRENT   = "current";
-  private   static final String STORAGE_DIR_PREVIOUS  = "previous";
+  public static final String STORAGE_DIR_CURRENT      = "current";
+  protected static final String STORAGE_DIR_PREVIOUS  = "previous";
   private   static final String STORAGE_TMP_REMOVED   = "removed.tmp";
   private   static final String STORAGE_TMP_PREVIOUS  = "previous.tmp";
   private   static final String STORAGE_TMP_FINALIZED = "finalized.tmp";
@@ -112,7 +115,7 @@ public abstract class Storage extends St
     public boolean isOfType(StorageDirType type);
   }
   
-  private NodeType storageType;    // Type of the node using this storage 
+  protected NodeType storageType;    // Type of the node using this storage 
   protected List<StorageDirectory> storageDirs = new ArrayList<StorageDirectory>();
   
   private class DirIterator implements Iterator<StorageDirectory> {
@@ -669,11 +672,6 @@ public abstract class Storage extends St
     this.storageType = type;
   }
   
-  protected Storage(NodeType type, int nsID, String cid, String bpid, long cT) {
-    super(FSConstants.LAYOUT_VERSION, nsID, cid, bpid, cT);
-    this.storageType = type;
-  }
-  
   protected Storage(NodeType type, StorageInfo storageInfo) {
     super(storageInfo);
     this.storageType = type;
@@ -738,39 +736,27 @@ public abstract class Storage extends St
   protected void getFields(Properties props, 
                            StorageDirectory sd 
                            ) throws IOException {
-    String sv, st, sid, scid, sbpid, sct;
+    String sv, st, sid, scid, sct;
     sv = props.getProperty("layoutVersion");
     st = props.getProperty("storageType");
     sid = props.getProperty("namespaceID");
     scid = props.getProperty("clusterID");
-    sbpid = props.getProperty("blockpoolID");
     sct = props.getProperty("cTime");
-    if (sv == null || st == null || sid == null || scid == null || sbpid == null
-        || sct == null) {
+    if (sv == null || st == null || sid == null || scid == null || 
+        sct == null) {
       throw new InconsistentFSStateException(sd.root,
         "file " + STORAGE_FILE_VERSION + " is invalid.");
     }
+    
     int rv = Integer.parseInt(sv);
     NodeType rt = NodeType.valueOf(st);
     int rid = Integer.parseInt(sid);
-    String rcid = scid;
-    String rbpid = sbpid;
     long rct = Long.parseLong(sct);
-    if (!storageType.equals(rt) ||
-        !((namespaceID == 0) || (rid == 0) || namespaceID == rid) ||
-        !( (clusterID.equals(rcid)) || (clusterID.equals("")) )   ||
-        !( (blockpoolID.equals(rbpid)) || (clusterID.equals("")) )
-        )
-      throw new InconsistentFSStateException(sd.root,
-                                             "is incompatible with others.");
-    if (rv < FSConstants.LAYOUT_VERSION) // future version
-      throw new IncorrectVersionException(rv, "storage directory " 
-                                          + sd.root.getCanonicalPath());
-    layoutVersion = rv;
-    storageType = rt;
-    namespaceID = rid;
-    clusterID = rcid;
-    blockpoolID = rbpid;
+    
+    setClusterID(sd.root, scid);
+    setNamespaceID(sd.root, rid);
+    setLayoutVersion(sd.root, rv);
+    setStorageType(sd.root, rt);
     cTime = rct;
   }
   
@@ -788,7 +774,6 @@ public abstract class Storage extends St
     props.setProperty("storageType", storageType.toString());
     props.setProperty("namespaceID", String.valueOf(namespaceID));
     props.setProperty("clusterID", clusterID);
-    props.setProperty("blockpoolID", blockpoolID);
     props.setProperty("cTime", String.valueOf(cTime));
   }
 
@@ -866,7 +851,6 @@ public abstract class Storage extends St
   public static String getRegistrationID(StorageInfo storage) {
     return "NS-" + Integer.toString(storage.getNamespaceID())
       + "-" + storage.getClusterID()
-      + "-" + storage.getBlockPoolID()
       + "-" + Integer.toString(storage.getLayoutVersion())
       + "-" + Long.toString(storage.getCTime());
   }
@@ -886,4 +870,14 @@ public abstract class Storage extends St
     file.writeBytes(messageForPreUpgradeVersion);
     file.getFD().sync();
   }
+  
+  /** Validate and set storage type */
+  protected void setStorageType(File storage, NodeType type)
+      throws InconsistentFSStateException {
+    if (!storageType.equals(type)) {
+      throw new InconsistentFSStateException(storage,
+          "node type is incompatible with others.");
+    }
+    storageType = type;
+  }
 }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java?rev=1073857&r1=1073856&r2=1073857&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java Wed Feb 23 18:06:11 2011
@@ -19,13 +19,14 @@ package org.apache.hadoop.hdfs.server.co
 
 import java.io.DataInput;
 import java.io.DataOutput;
+import java.io.File;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
 
-
 /**
  * Common class for storage information.
  * 
@@ -36,17 +37,15 @@ public class StorageInfo implements Writ
   public int   layoutVersion;   // layout version of the storage data
   public int   namespaceID;     // id of the file system
   public String clusterID;      // id of the cluster
-  public String blockpoolID;    // id of the blockpool
   public long  cTime;           // creation time of the file system state
   
   public StorageInfo () {
-    this(0, 0, "", "", 0L);
+    this(0, 0, "", 0L);
   }
   
-  public StorageInfo(int layoutV, int nsID, String cid, String bpid, long cT) {
+  public StorageInfo(int layoutV, int nsID, String cid, long cT) {
     layoutVersion = layoutV;
     clusterID = cid;
-    blockpoolID = bpid;
     namespaceID = nsID;
     cTime = cT;
   }
@@ -73,11 +72,6 @@ public class StorageInfo implements Writ
   public String    getClusterID()  { return clusterID; }
   
   /**
-   * blockpool id of the file system.<p>
-   */
-  public String    getBlockPoolID()  { return blockpoolID; }
-  
-  /**
    * Creation time of the file system state.<p>
    * Modified during upgrades.
    */
@@ -86,7 +80,6 @@ public class StorageInfo implements Writ
   public void   setStorageInfo(StorageInfo from) {
     layoutVersion = from.layoutVersion;
     clusterID = from.clusterID;
-    blockpoolID = from.blockpoolID;
     namespaceID = from.namespaceID;
     cTime = from.cTime;
   }
@@ -98,7 +91,6 @@ public class StorageInfo implements Writ
     out.writeInt(getLayoutVersion());
     out.writeInt(getNamespaceID());
     WritableUtils.writeString(out, clusterID);
-    WritableUtils.writeString(out, blockpoolID); 
     out.writeLong(getCTime());
   }
 
@@ -106,7 +98,36 @@ public class StorageInfo implements Writ
     layoutVersion = in.readInt();
     namespaceID = in.readInt();
     clusterID = WritableUtils.readString(in);
-    blockpoolID = WritableUtils.readString(in);
     cTime = in.readLong();
   }
+  
+  /** validate and set namespaceID */
+  protected void setNamespaceID(File storage, int nsId)
+      throws InconsistentFSStateException {
+    if (namespaceID != 0 && nsId != 0 && namespaceID != nsId) {
+      throw new InconsistentFSStateException(storage,
+          "namespaceID is incompatible with others.");
+    }
+    namespaceID = nsId;
+  }
+  
+  /** validate and set layout version */ 
+  protected void setLayoutVersion(File storage, int lv)
+      throws IncorrectVersionException, IOException {
+    if (lv < FSConstants.LAYOUT_VERSION) { // future version
+      throw new IncorrectVersionException(lv, "storage directory "
+          + storage.getCanonicalPath());
+    }
+    layoutVersion = lv;
+  }
+  
+  /** validate and set ClusterID */
+  protected void setClusterID(File storage, String cid)
+      throws InconsistentFSStateException {
+    if (!clusterID.equals("") && !cid.equals("") && !clusterID.equals(cid)) {
+      throw new InconsistentFSStateException(storage,
+          "cluster id is incompatible with others.");
+    }
+    clusterID = cid;
+  }
 }

Added: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolStorage.java?rev=1073857&view=auto
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolStorage.java (added)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolStorage.java Wed Feb 23 18:06:11 2011
@@ -0,0 +1,511 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Properties;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.util.Daemon;
+
+/**
+ * Manages storage for a block pool.
+ * 
+ * Block pool is a collection of blocks and is stored under the directory:
+ * <StorageDirectory>/current/<Block pool Id>.
+ * 
+ * This class supports the following functionality:
+ * <ol>
+ * <li> Formatting a new block pool storage</li>
+ * <li> Recovering a storage state to a consistent state (if possible></li>
+ * <li> Taking a snapshot of the block pool during upgrade</li>
+ * <li> Rolling back a block pool to a previous snapshot</li>
+ * <li> Finalizing block storage by deletion of a snapshot</li>
+ * </ul>
+ * 
+ * @see Storage
+ */
+@InterfaceAudience.Private
+public class BlockPoolStorage extends Storage {
+  private static final Pattern BLOCK_POOL_PATH_PATTERN = Pattern
+      .compile("^(.*)"
+          + "(\\/BP-[0-9]+\\-\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\-[0-9]+\\/.*)$");
+
+  private String blockpoolID = ""; // id of the blockpool
+
+  BlockPoolStorage() {
+    super(NodeType.DATA_NODE);
+  }
+
+  BlockPoolStorage(int namespaceID, String bpID, long cTime) {
+    super(NodeType.DATA_NODE);
+    this.namespaceID = namespaceID;
+    this.blockpoolID = bpID;
+    this.cTime = cTime;
+  }
+
+  /**
+   * Analyze storage directories. Recover from previous transitions if required.
+   * 
+   * @param nsInfo namespace information
+   * @param dataDirs storage directories of block pool
+   * @param startOpt startup option
+   * @throws IOException on error
+   */
+  void recoverTransitionRead(NamespaceInfo nsInfo, Collection<File> dataDirs,
+      StartupOption startOpt) throws IOException {
+    assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() 
+        : "Block-pool and name-node layout versions must be the same.";
+
+    // 1. For each BP data directory analyze the state and
+    // check whether all is consistent before transitioning.
+    this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
+    ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>(
+        dataDirs.size());
+    for (Iterator<File> it = dataDirs.iterator(); it.hasNext();) {
+      File dataDir = it.next();
+      StorageDirectory sd = new StorageDirectory(dataDir);
+      StorageState curState;
+      try {
+        curState = sd.analyzeStorage(startOpt);
+        // sd is locked but not opened
+        switch (curState) {
+        case NORMAL:
+          break;
+        case NON_EXISTENT:
+          // ignore this storage
+          LOG.info("Storage directory " + dataDir + " does not exist.");
+          it.remove();
+          continue;
+        case NOT_FORMATTED: // format
+          LOG.info("Storage directory " + dataDir + " is not formatted.");
+          LOG.info("Formatting ...");
+          format(sd, nsInfo);
+          break;
+        default: // recovery part is common
+          sd.doRecover(curState);
+        }
+      } catch (IOException ioe) {
+        sd.unlock();
+        throw ioe;
+      }
+      // add to the storage list. This is inherited from parent class, Storage.
+      addStorageDir(sd);
+      dataDirStates.add(curState);
+    }
+
+    if (dataDirs.size() == 0) // none of the data dirs exist
+      throw new IOException(
+          "All specified directories are not accessible or do not exist.");
+
+    // 2. Do transitions
+    // Each storage directory is treated individually.
+    // During startup some of them can upgrade or roll back
+    // while others could be up-to-date for the regular startup.
+    for (int idx = 0; idx < getNumStorageDirs(); idx++) {
+      doTransition(getStorageDir(idx), nsInfo, startOpt);
+      assert getLayoutVersion() == nsInfo.getLayoutVersion() 
+          : "Data-node and name-node layout versions must be the same.";
+      assert getCTime() == nsInfo.getCTime() 
+          : "Data-node and name-node CTimes must be the same.";
+    }
+
+    // 3. Update all storages. Some of them might have just been formatted.
+    this.writeAll();
+  }
+
+  /**
+   * Format a block pool storage. 
+   * @param sd the block pool storage
+   * @param nsInfo the name space info
+   * @throws IOException Signals that an I/O exception has occurred.
+   */
+  void format(StorageDirectory bpSdir, NamespaceInfo nsInfo) throws IOException {
+    LOG.info("Formatting block pool " + blockpoolID + " directory "
+        + bpSdir.getCurrentDir());
+    bpSdir.clearDirectory(); // create directory
+    this.layoutVersion = FSConstants.LAYOUT_VERSION;
+    this.cTime = nsInfo.getCTime();
+    this.namespaceID = nsInfo.getNamespaceID();
+    this.blockpoolID = nsInfo.getBlockPoolID();
+    this.storageType = NodeType.DATA_NODE;
+    bpSdir.write();
+  }
+
+  /**
+   * Set layoutVersion, namespaceID and blockpoolID into block pool storage
+   * VERSION file
+   */
+  @Override
+  protected void setFields(Properties props, StorageDirectory sd)
+      throws IOException {
+    props.setProperty("layoutVersion", String.valueOf(layoutVersion));
+    props.setProperty("namespaceID", String.valueOf(namespaceID));
+    props.setProperty("blockpoolID", blockpoolID);
+    props.setProperty("cTime", String.valueOf(cTime));
+    props.setProperty("storageType", storageType.toString());
+  }
+
+  /** Validate and set block pool ID */
+  private void setBlockPoolID(File storage, String bpid)
+      throws InconsistentFSStateException {
+    if (bpid == null || bpid.equals("")) {
+      throw new InconsistentFSStateException(storage, "file "
+          + STORAGE_FILE_VERSION + " is invalid.");
+    }
+    
+    if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
+      throw new InconsistentFSStateException(storage,
+          "Unexepcted blockpoolID " + bpid + " . Expected " + blockpoolID);
+    }
+    blockpoolID = bpid;
+  }
+  
+  @Override
+  protected void getFields(Properties props, StorageDirectory sd)
+      throws IOException {
+    String sv = props.getProperty("layoutVersion");
+    String sctime = props.getProperty("cTime");
+    String sid = props.getProperty("namespaceID");
+    String st = props.getProperty("storageType");
+    if (st == null || sv == null || sctime == null || sid == null) {
+      throw new InconsistentFSStateException(sd.getRoot(), "file "
+          + STORAGE_FILE_VERSION + " is invalid.");
+    }
+    int rv = Integer.parseInt(sv);
+    setLayoutVersion(sd.getRoot(), rv);
+    
+    int rid = Integer.parseInt(sid);
+    setNamespaceID(sd.getRoot(), rid);
+    
+    NodeType rt = NodeType.valueOf(st);
+    setStorageType(sd.getRoot(), rt);
+    
+    String sbpid = props.getProperty("blockpoolID");
+    setBlockPoolID(sd.getRoot(), sbpid);
+    
+    cTime = Long.parseLong(sctime);
+  }
+
+  /**
+   * Analyze whether a transition of the BP state is required and
+   * perform it if necessary.
+   * <br>
+   * Rollback if previousLV >= LAYOUT_VERSION && prevCTime <= namenode.cTime.
+   * Upgrade if this.LV > LAYOUT_VERSION || this.cTime < namenode.cTime Regular
+   * startup if this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime
+   * 
+   * @param sd storage directory,
+   * @param nsInfo namespace info
+   * @param startOpt startup option
+   * @throws IOException
+   */
+  private void doTransition(StorageDirectory sd, // i.e. <SD>/current/<bpid>
+      NamespaceInfo nsInfo, StartupOption startOpt) throws IOException {
+    if (startOpt == StartupOption.ROLLBACK)
+      doRollback(sd, nsInfo); // rollback if applicable
+    
+    sd.read();
+    checkVersionUpgradable(this.layoutVersion);
+    assert this.layoutVersion >= FSConstants.LAYOUT_VERSION 
+       : "Future version is not allowed";
+    if (getNamespaceID() != nsInfo.getNamespaceID()) {
+      throw new IOException("Incompatible namespaceIDs in "
+          + sd.getRoot().getCanonicalPath() + ": namenode namespaceID = "
+          + nsInfo.getNamespaceID() + "; datanode namespaceID = "
+          + getNamespaceID());
+    }
+    if (!blockpoolID.equals(nsInfo.getBlockPoolID())) {
+      throw new IOException("Incompatible blockpoolIDs in "
+          + sd.getRoot().getCanonicalPath() + ": namenode blockpoolID = "
+          + nsInfo.getBlockPoolID() + "; datanode blockpoolID = "
+          + blockpoolID);
+    }
+    if (this.layoutVersion == FSConstants.LAYOUT_VERSION
+        && this.cTime == nsInfo.getCTime())
+      return; // regular startup
+    
+    // verify necessity of a distributed upgrade
+    verifyDistributedUpgradeProgress(nsInfo);
+    if (this.layoutVersion > FSConstants.LAYOUT_VERSION
+        || this.cTime < nsInfo.getCTime()) {
+      doUpgrade(sd, nsInfo); // upgrade
+      return;
+    }
+    // layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime
+    // must shutdown
+    throw new IOException("Datanode state: LV = " + this.getLayoutVersion()
+        + " CTime = " + this.getCTime()
+        + " is newer than the namespace state: LV = "
+        + nsInfo.getLayoutVersion() + " CTime = " + nsInfo.getCTime());
+  }
+
+  /**
+   * Upgrade to any release after 0.22 (0.22 included) release e.g. 0.22 => 0.23
+   * Upgrade procedure is as follows:
+   * <ol>
+   * <li>If <SD>/current/<bpid>/previous exists then delete it</li>
+   * <li>Rename <SD>/current/<bpid>/current to
+   * <SD>/current/bpid/current/previous.tmp</li>
+   * <li>Create new <SD>current/<bpid>/current directory</li>
+   * <ol>
+   * <li>Hard links for block files are created from previous.tmp to current</li>
+   * <li>Save new version file in current directory</li>
+   * </ol>
+   * <li>Rename previous.tmp to previous</li> </ol>
+   * 
+   * @param bpSd storage directory <SD>/current/<bpid>
+   * @param nsInfo Namespace Info from the namenode
+   * @throws IOException on error
+   */
+  void doUpgrade(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException {
+    // have to be upgrading between any after 0.22 (0.22 included) release
+    // stored version <= 0.22 && software version < 0.22
+    if (!(this.getLayoutVersion() < LAST_PRE_FEDERATION_LAYOUT_VERSION)) {
+      return;
+    }
+    LOG.info("Upgrading storage directory " + bpSd.getRoot()
+        + ".\n   old LV = " + this.getLayoutVersion() + "; old CTime = "
+        + this.getCTime() + ".\n   new LV = " + nsInfo.getLayoutVersion()
+        + "; new CTime = " + nsInfo.getCTime());
+    // get <SD>/previous directory
+    String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath());
+    StorageDirectory dnSdStorage = new StorageDirectory(new File(dnRoot));
+    File dnPrevDir = dnSdStorage.getPreviousDir();
+    
+    // If <SD>/previous directory exists delete it
+    if (dnPrevDir.exists()) {
+      deleteDir(dnPrevDir);
+    }
+    File bpCurDir = bpSd.getCurrentDir();
+    File bpPrevDir = bpSd.getPreviousDir();
+    assert bpCurDir.exists() : "BP level current directory must exist.";
+    cleanupDetachDir(new File(bpCurDir, DataStorage.STORAGE_DIR_DETACHED));
+    
+    // 1. Delete <SD>/current/<bpid>/previous dir before upgrading
+    if (bpPrevDir.exists()) {
+      deleteDir(bpPrevDir);
+    }
+    File bpTmpDir = bpSd.getPreviousTmp();
+    assert !bpTmpDir.exists() : "previous.tmp directory must not exist.";
+    
+    // 2. Rename <SD>/curernt/<bpid>/current to <SD>/curernt/<bpid>/previous.tmp
+    rename(bpCurDir, bpTmpDir);
+    
+    // 3. Create new <SD>/current with block files hardlinks and VERSION
+    linkAllBlocks(bpTmpDir, bpCurDir);
+    this.layoutVersion = FSConstants.LAYOUT_VERSION;
+    assert this.namespaceID == nsInfo.getNamespaceID() 
+        : "Data-node and name-node layout versions must be the same.";
+    this.cTime = nsInfo.getCTime();
+    bpSd.write();
+    
+    // 4.rename <SD>/curernt/<bpid>/previous.tmp to <SD>/curernt/<bpid>/previous
+    rename(bpTmpDir, bpPrevDir);
+    LOG.info("Upgrade of block pool " + blockpoolID + " at " + bpSd.getRoot()
+        + " is complete.");
+  }
+
+  /**
+   * Cleanup the detachDir.
+   * 
+   * If the directory is not empty report an error; Otherwise remove the
+   * directory.
+   * 
+   * @param detachDir detach directory
+   * @throws IOException if the directory is not empty or it can not be removed
+   */
+  private void cleanupDetachDir(File detachDir) throws IOException {
+    if (layoutVersion >= PRE_RBW_LAYOUT_VERSION && detachDir.exists()
+        && detachDir.isDirectory()) {
+
+      if (detachDir.list().length != 0) {
+        throw new IOException("Detached directory " + detachDir
+            + " is not empty. Please manually move each file under this "
+            + "directory to the finalized directory if the finalized "
+            + "directory tree does not have the file.");
+      } else if (!detachDir.delete()) {
+        throw new IOException("Cannot remove directory " + detachDir);
+      }
+    }
+  }
+
+  /*
+   * Roll back to old snapshot at the block pool level
+   * If previous directory exists: 
+   * <ol>
+   * <li>Rename <SD>/current/<bpid>/current to removed.tmp</li>
+   * <li>Rename * <SD>/current/<bpid>/previous to current</li>
+   * <li>Remove removed.tmp</li>
+   * </ol>
+   * 
+   * Do nothing if previous directory does not exist.
+   * @param bpSd Block pool storage directory at <SD>/current/<bpid>
+   */
+  void doRollback(StorageDirectory bpSd, NamespaceInfo nsInfo)
+      throws IOException {
+    File prevDir = bpSd.getPreviousDir();
+    // regular startup if previous dir does not exist
+    if (!prevDir.exists())
+      return;
+    // read attributes out of the VERSION file of previous directory
+    DataStorage prevInfo = new DataStorage();
+    StorageDirectory prevSD = prevInfo.new StorageDirectory(bpSd.getRoot());
+    prevSD.read(prevSD.getPreviousVersionFile());
+
+    // We allow rollback to a state, which is either consistent with
+    // the namespace state or can be further upgraded to it.
+    // In another word, we can only roll back when ( storedLV >= software LV)
+    // && ( DN.previousCTime <= NN.ctime)
+    if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION && 
+        prevInfo.getCTime() <= nsInfo.getCTime())) { // cannot rollback
+      throw new InconsistentFSStateException(prevSD.getRoot(),
+          "Cannot rollback to a newer state.\nDatanode previous state: LV = "
+              + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime()
+              + " is newer than the namespace state: LV = "
+              + nsInfo.getLayoutVersion() + " CTime = " + nsInfo.getCTime());
+    }
+    
+    LOG.info("Rolling back storage directory " + bpSd.getRoot()
+        + ".\n   target LV = " + nsInfo.getLayoutVersion()
+        + "; target CTime = " + nsInfo.getCTime());
+    File tmpDir = bpSd.getRemovedTmp();
+    assert !tmpDir.exists() : "removed.tmp directory must not exist.";
+    // 1. rename current to tmp
+    File curDir = bpSd.getCurrentDir();
+    assert curDir.exists() : "Current directory must exist.";
+    rename(curDir, tmpDir);
+    
+    // 2. rename previous to current
+    rename(prevDir, curDir);
+    
+    // 3. delete removed.tmp dir
+    deleteDir(tmpDir);
+    LOG.info("Rollback of " + bpSd.getRoot() + " is complete.");
+  }
+
+  /*
+   * Finalize the block pool storage by deleting <BP>/previous directory
+   * that holds the snapshot.
+   */
+  void doFinalize(StorageDirectory bpSd) throws IOException {
+    // block pool level previous directory
+    File prevDir = bpSd.getPreviousDir();
+    if (!prevDir.exists()) {
+      return; // already finalized
+    }
+    final String dataDirPath = bpSd.getRoot().getCanonicalPath();
+    LOG.info("Finalizing upgrade for storage directory " + dataDirPath
+        + ".\n   cur LV = " + this.getLayoutVersion() + "; cur CTime = "
+        + this.getCTime());
+    assert bpSd.getCurrentDir().exists() : "Current directory must exist.";
+    
+    // rename previous to finalized.tmp
+    final File tmpDir = bpSd.getFinalizedTmp();
+    rename(prevDir, tmpDir);
+
+    // delete finalized.tmp dir in a separate thread
+    new Daemon(new Runnable() {
+      public void run() {
+        try {
+          deleteDir(tmpDir);
+        } catch (IOException ex) {
+          LOG.error("Finalize upgrade for " + dataDirPath + " failed.", ex);
+        }
+        LOG.info("Finalize upgrade for " + dataDirPath + " is complete.");
+      }
+
+      public String toString() {
+        return "Finalize " + dataDirPath;
+      }
+    }).start();
+  }
+
+  /**
+   * Hardlink all finalized and RBW blocks in fromDir to toDir
+   * 
+   * @param fromDir directory where the snapshot is stored
+   * @param toDir the current data directory
+   * @throws IOException if error occurs during hardlink
+   */
+  private void linkAllBlocks(File fromDir, File toDir) throws IOException {
+    // do the link
+    int diskLayoutVersion = this.getLayoutVersion();
+    // hardlink finalized blocks in tmpDir
+    DataStorage.linkBlocks(fromDir, new File(toDir,
+        DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion);
+  }
+
+  protected void corruptPreUpgradeStorage(File rootDir) throws IOException {
+    File oldF = new File(rootDir, "storage");
+    if (oldF.exists())
+      return;
+    // recreate old storage file to let pre-upgrade versions fail
+    if (!oldF.createNewFile())
+      throw new IOException("Cannot create file " + oldF);
+    RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
+    // write new version into old storage file
+    try {
+      writeCorruptedData(oldFile);
+    } finally {
+      oldFile.close();
+    }
+  }
+
+  private void verifyDistributedUpgradeProgress(NamespaceInfo nsInfo)
+      throws IOException {
+    UpgradeManagerDatanode um = DataNode.getDataNode().upgradeManager;
+    assert um != null : "DataNode.upgradeManager is null.";
+    um.setUpgradeState(false, getLayoutVersion());
+    um.initializeUpgrade(nsInfo);
+  }
+
+  /**
+   * gets the data node storage directory based on block pool storage
+   * 
+   * @param bpRoot
+   * @return
+   */
+  private static String getDataNodeStorageRoot(String bpRoot) {
+    Matcher matcher = BLOCK_POOL_PATH_PATTERN.matcher(bpRoot);
+    if (matcher.matches()) {
+      // return the data node root directory
+      return matcher.group(1);
+    }
+    return bpRoot;
+  }
+
+  @Override
+  public boolean isConversionNeeded(StorageDirectory sd) throws IOException {
+    return false;
+  }
+}

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1073857&r1=1073856&r2=1073857&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Wed Feb 23 18:06:11 2011
@@ -377,7 +377,6 @@ public class DataNode extends Configured
         dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION;
         dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID;
         dnRegistration.storageInfo.clusterID = nsInfo.clusterID;
-        dnRegistration.storageInfo.blockpoolID = nsInfo.blockpoolID;
         // it would have been better to pass storage as a parameter to
         // constructor below - need to augment ReflectionUtils used below.
         conf.set(DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY, dnRegistration.getStorageID());
@@ -1040,7 +1039,7 @@ public class DataNode extends Configured
       }
       break;
     case DatanodeProtocol.DNA_FINALIZE:
-      storage.finalizeUpgrade();
+      storage.finalizeUpgrade(bcmd.getPoolId());
       break;
     case UpgradeCommand.UC_ACTION_START_UPGRADE:
       // start distributed upgrade here

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1073857&r1=1073856&r2=1073857&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Wed Feb 23 18:06:11 2011
@@ -26,13 +26,22 @@ import java.io.RandomAccessFile;
 import java.nio.channels.FileLock;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashMap;
 import java.util.Iterator;
+import java.util.Map;
 import java.util.Properties;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileUtil.HardLink;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@@ -43,6 +52,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.DiskChecker;
 
 /** 
  * Data storage information file.
@@ -58,19 +68,25 @@ public class DataStorage extends Storage
   final static String STORAGE_DIR_RBW = "rbw";
   final static String STORAGE_DIR_FINALIZED = "finalized";
   final static String STORAGE_DIR_DETACHED = "detach";
+
+  private static final Pattern PRE_GENSTAMP_META_FILE_PATTERN = 
+    Pattern.compile("(.*blk_[-]*\\d+)\\.meta$");
   
   private String storageID;
 
+  // flag to ensure initialzing storage occurs only once
+  private boolean initilized = false;
+  
+  // BlockPoolStorage is map of <Block pool Id, BlockPoolStorage>
+  private Map<String, BlockPoolStorage> bpStorageMap
+    = new HashMap<String, BlockPoolStorage>();
+
+
   DataStorage() {
     super(NodeType.DATA_NODE);
     storageID = "";
   }
   
-  DataStorage(int nsID, String cID, String bpID, long cT, String strgID) {
-    super(NodeType.DATA_NODE, nsID, cID, bpID, cT);
-    this.storageID = strgID;
-  }
-  
   public DataStorage(StorageInfo storageInfo, String strgID) {
     super(NodeType.DATA_NODE, storageInfo);
     this.storageID = strgID;
@@ -88,17 +104,24 @@ public class DataStorage extends Storage
    * Analyze storage directories.
    * Recover from previous transitions if required. 
    * Perform fs state transition if necessary depending on the namespace info.
-   * Read storage info. 
+   * Read storage info.
+   * <br>
+   * This method should be synchronized between multiple DN threads.  Only the 
+   * first DN thread does DN level storage dir recoverTransitionRead.
    * 
    * @param nsInfo namespace information
    * @param dataDirs array of data storage directories
    * @param startOpt startup option
    * @throws IOException
    */
-  void recoverTransitionRead(NamespaceInfo nsInfo,
+  synchronized void recoverTransitionRead(NamespaceInfo nsInfo,
                              Collection<File> dataDirs,
                              StartupOption startOpt
                              ) throws IOException {
+    if (this.initilized) {
+      // DN storage has been initialized, no need to do anything
+      return;
+    }
     assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
       "Data-node and name-node layout versions must be the same.";
     
@@ -142,7 +165,7 @@ public class DataStorage extends Storage
 
     if (dataDirs.size() == 0)  // none of the data dirs exist
       throw new IOException(
-                            "All specified directories are not accessible or do not exist.");
+          "All specified directories are not accessible or do not exist.");
 
     // 2. Do transitions
     // Each storage directory is treated individually.
@@ -158,38 +181,142 @@ public class DataStorage extends Storage
     
     // 3. Update all storages. Some of them might have just been formatted.
     this.writeAll();
+    
+    // 4. mark DN storage is initilized
+    this.initilized = true;
+  }
+
+  /**
+   * recoverTransitionRead for a specific block pool
+   * 
+   * @param bpID Block pool Id
+   * @param nsInfo Namespace info of namenode corresponding to the block pool
+   * @param dataDirs Storage directories
+   * @param startOpt startup option
+   * @throws IOException on error
+   */
+  void recoverTransitionRead(String bpID, NamespaceInfo nsInfo,
+      Collection<File> dataDirs, StartupOption startOpt) throws IOException {
+    // First ensure datanode level format/snapshot/rollback is completed
+    recoverTransitionRead(nsInfo, dataDirs, startOpt);
+    
+    // Create list of storage directories for the block pool
+    Collection<File> bpDataDirs = new ArrayList<File>();
+    for(Iterator<File> it = dataDirs.iterator(); it.hasNext();) {
+      File dnRoot = it.next();
+      File bpRoot = getBpRoot(bpID, dnRoot);
+      bpDataDirs.add(bpRoot);
+    }
+    // mkdir for the list of BlockPoolStorage
+    makeBlockPoolDataDir(bpDataDirs, null);
+    BlockPoolStorage bpStorage = new BlockPoolStorage(nsInfo.getNamespaceID(), 
+        bpID, nsInfo.getCTime());
+    bpStorage.recoverTransitionRead(nsInfo, bpDataDirs, startOpt);
+    addBlockPoolStorage(bpID, bpStorage);
   }
 
+  /**
+   * Create physical directory for block pools on the data node
+   * 
+   * @param dataDirs
+   *          List of data directories
+   * @param conf
+   *          Configuration instance to use.
+   * @throws IOException on errors
+   */
+  static void makeBlockPoolDataDir(Collection<File> dataDirs,
+      Configuration conf) throws IOException {
+    if (conf == null)
+      conf = new HdfsConfiguration();
+
+    LocalFileSystem localFS = FileSystem.getLocal(conf);
+    FsPermission permission = new FsPermission(conf.get(
+        DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
+        DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
+    for (File data : dataDirs) {
+      try {
+        DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
+      } catch ( IOException e ) {
+        LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": "
+            + e.getMessage());
+      }
+    }
+  }
+
+  /**
+   * Get a block pool root directory based on data node root directory
+   * @param bpID block pool ID
+   * @param dnRoot directory of data node root
+   * @return root directory for block pool
+   */
+  private static File getBpRoot(String bpID, File dnRoot) {
+    File bpRoot = new File(new File(dnRoot, STORAGE_DIR_CURRENT), bpID);
+    return bpRoot;
+  }
+  
   void format(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
     sd.clearDirectory(); // create directory
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
-    this.namespaceID = nsInfo.getNamespaceID();
     this.clusterID = nsInfo.getClusterID();
-    this.blockpoolID = nsInfo.getBlockPoolID();
+    this.namespaceID = nsInfo.getNamespaceID();
     this.cTime = 0;
     // store storageID as it currently is
     sd.write();
   }
 
+  /*
+   * Set ClusterID, StorageID, StorageType, CTime into
+   * DataStorage VERSION file
+  */
+  @Override
   protected void setFields(Properties props, 
                            StorageDirectory sd 
                            ) throws IOException {
-    super.setFields(props, sd);
+    props.setProperty("storageType", storageType.toString());
+    props.setProperty("clusterID", clusterID);
+    props.setProperty("cTime", String.valueOf(cTime));
+    props.setProperty("layoutVersion", String.valueOf(layoutVersion));
     props.setProperty("storageID", storageID);
   }
 
-  protected void getFields(Properties props, 
-                           StorageDirectory sd 
-                           ) throws IOException {
-    super.getFields(props, sd);
+  /*
+   * Read ClusterID, StorageID, StorageType, CTime from 
+   * DataStorage VERSION file and verify them.
+   */
+  @Override
+  protected void getFields(Properties props, StorageDirectory sd)
+      throws IOException {
+    String scid = props.getProperty("clusterID");
+    String sct = props.getProperty("cTime");
+    String slv = props.getProperty("layoutVersion");
     String ssid = props.getProperty("storageID");
-    if (ssid == null ||
-        !("".equals(storageID) || "".equals(ssid) ||
-          storageID.equals(ssid)))
+    String st = props.getProperty("storageType");
+
+    if (scid == null || sct == null || slv == null|| ssid == null
+        || st == null) {
+      throw new InconsistentFSStateException(sd.getRoot(), "file "
+          + STORAGE_FILE_VERSION + " is invalid.");
+    }
+    setClusterID(sd.getRoot(), scid);
+    
+    long rct = Long.parseLong(sct);
+    cTime = rct;
+    
+    int rlv = Integer.parseInt(slv);
+    setLayoutVersion(sd.getRoot(), rlv);
+    
+    NodeType rt = NodeType.valueOf(st);
+    setStorageType(sd.getRoot(), rt);
+    
+    // valid storage id, storage id may be empty
+    if ((!storageID.equals("") && !ssid.equals("") && !storageID.equals(ssid))) {
       throw new InconsistentFSStateException(sd.getRoot(),
-                                             "has incompatible storage Id.");
-    if ("".equals(storageID)) // update id only if it was empty
+          "has incompatible storage Id.");
+    }
+    
+    if (storageID.equals("")) { // update id only if it was empty
       storageID = ssid;
+    }
   }
 
   public boolean isConversionNeeded(StorageDirectory sd) throws IOException {
@@ -229,37 +356,32 @@ public class DataStorage extends Storage
                              NamespaceInfo nsInfo, 
                              StartupOption startOpt
                              ) throws IOException {
-    if (startOpt == StartupOption.ROLLBACK)
+    if (startOpt == StartupOption.ROLLBACK) {
       doRollback(sd, nsInfo); // rollback if applicable
+    }
     sd.read();
     checkVersionUpgradable(this.layoutVersion);
     assert this.layoutVersion >= FSConstants.LAYOUT_VERSION :
       "Future version is not allowed";
-    if (getNamespaceID() != nsInfo.getNamespaceID())
-      throw new IOException(
-                            "Incompatible namespaceIDs in " + sd.getRoot().getCanonicalPath()
-                            + ": namenode namespaceID = " + nsInfo.getNamespaceID() 
-                            + "; datanode namespaceID = " + getNamespaceID());
+
     if (!getClusterID().equals (nsInfo.getClusterID()))
       throw new IOException(
                             "Incompatible clusterIDs in " + sd.getRoot().getCanonicalPath()
                             + ": namenode clusterID = " + nsInfo.getClusterID() 
                             + "; datanode clusterID = " + getClusterID());
-    if (!getBlockPoolID().equals(nsInfo.getBlockPoolID()))
-      throw new IOException(
-                            "Incompatible blockpoolIDs in " + sd.getRoot().getCanonicalPath()
-                            + ": namenode blockpoolID = " + nsInfo.getBlockPoolID() 
-                            + "; datanode blockpoolID = " + getBlockPoolID());
+    // regular start up
     if (this.layoutVersion == FSConstants.LAYOUT_VERSION 
         && this.cTime == nsInfo.getCTime())
       return; // regular startup
     // verify necessity of a distributed upgrade
     verifyDistributedUpgradeProgress(nsInfo);
+    // do upgrade
     if (this.layoutVersion > FSConstants.LAYOUT_VERSION
         || this.cTime < nsInfo.getCTime()) {
       doUpgrade(sd, nsInfo);  // upgrade
       return;
     }
+    
     // layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime
     // must shutdown
     throw new IOException("Datanode state: LV = " + this.getLayoutVersion() 
@@ -270,15 +392,36 @@ public class DataStorage extends Storage
   }
 
   /**
-   * Move current storage into a backup directory,
+   * Upgrade -- Move current storage into a backup directory,
    * and hardlink all its blocks into the new current directory.
    * 
+   * Upgrade from pre-0.22 to 0.22 or later release e.g. 0.19/0.20/ => 0.22/0.23
+   * <ul>
+   * <li> If <SD>/previous exists then delete it </li>
+   * <li> Rename <SD>/current to <SD>/previous.tmp </li>
+   * <li>Create new <SD>/current/<bpid>/current directory<li>
+   * <ul>
+   * <li> Hard links for block files are created from <SD>/previous.tmp 
+   * to <SD>/current/<bpid>/current </li>
+   * <li> Saves new version file in <SD>/current/<bpid>/current directory </li>
+   * </ul>
+   * <li> Rename <SD>/previous.tmp to <SD>/previous </li>
+   * </ul>
+   * 
+   * There should be only ONE namenode in the cluster for first 
+   * time upgrade to 0.22
    * @param sd  storage directory
-   * @throws IOException
+   * @throws IOException on error
    */
-  void doUpgrade(StorageDirectory sd,
-                 NamespaceInfo nsInfo
-                 ) throws IOException {
+  void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
+    //  bp root directory <SD>/current/<bpid>
+    File bpRootDir = getBpRoot(nsInfo.getBlockPoolID(), sd.getRoot());
+
+    // regular startup if <SD>/current/<bpid> direcotry exist,
+    // i.e. the stored version is 0.22 or later release
+    if (bpRootDir.exists())
+      return;
+    
     LOG.info("Upgrading storage directory " + sd.getRoot()
              + ".\n   old LV = " + this.getLayoutVersion()
              + "; old CTime = " + this.getCTime()
@@ -286,30 +429,37 @@ public class DataStorage extends Storage
              + "; new CTime = " + nsInfo.getCTime());
     File curDir = sd.getCurrentDir();
     File prevDir = sd.getPreviousDir();
-    assert curDir.exists() : "Current directory must exist.";
+    assert curDir.exists() : "Data node current directory must exist.";
     // Cleanup directory "detach"
     cleanupDetachDir(new File(curDir, STORAGE_DIR_DETACHED));
-    // delete previous dir before upgrading
+    
+    // 1. delete <SD>/previous dir before upgrading
     if (prevDir.exists())
       deleteDir(prevDir);
+    // get previous.tmp directory, <SD>/previous.tmp
     File tmpDir = sd.getPreviousTmp();
-    assert !tmpDir.exists() : "previous.tmp directory must not exist.";
-    // rename current to tmp
+    assert !tmpDir.exists() : 
+      "Data node previous.tmp directory must not exist.";
+    
+    // 2. Rename <SD>/current to <SD>/previous.tmp
     rename(curDir, tmpDir);
-    // hard link finalized & rbw blocks
-    linkAllBlocks(tmpDir, curDir);
-    // create current directory if not exists
-    if (!curDir.exists() && !curDir.mkdirs())
-      throw new IOException("Cannot create directory " + curDir);
-    // write version file
-    this.layoutVersion = FSConstants.LAYOUT_VERSION;
-    assert this.namespaceID == nsInfo.getNamespaceID() :
-      "Data-node and name-node layout versions must be the same.";
-    this.cTime = nsInfo.getCTime();
+    
+    // 3. Format BP and hard link blocks from previous directory
+    File curBpDir = getBpRoot(nsInfo.getBlockPoolID(), curDir);
+    BlockPoolStorage bpStorage = new BlockPoolStorage(nsInfo.getNamespaceID(), 
+        nsInfo.getBlockPoolID(), nsInfo.getCTime());
+    bpStorage.format(new StorageDirectory(curBpDir), nsInfo);
+    linkAllBlocks(tmpDir, curBpDir);
+    
+    // 4. Write version file under <SD>/current/<bpid>/current
+    layoutVersion = FSConstants.LAYOUT_VERSION;
+    cTime = nsInfo.getCTime();
     sd.write();
-    // rename tmp to previous
+    
+    // 5. Rename <SD>/previous.tmp to <SD>/previous
     rename(tmpDir, prevDir);
     LOG.info("Upgrade of " + sd.getRoot()+ " is complete.");
+    addBlockPoolStorage(nsInfo.getBlockPoolID(), bpStorage);
   }
 
   /**
@@ -336,6 +486,20 @@ public class DataStorage extends Storage
     }
   }
   
+  /** 
+   * Rolling back to a snapshot in previous directory by moving it to current
+   * directory.
+   * Rollback procedure:
+   * <br>
+   * If previous directory exists:
+   * <ol>
+   * <li> Rename current to removed.tmp </li>
+   * <li> Rename previous to current </li>
+   * <li> Remove removed.tmp </li>
+   * </ol>
+   * 
+   * Do nothing, if previous directory does not exist.
+   */
   void doRollback( StorageDirectory sd,
                    NamespaceInfo nsInfo
                    ) throws IOException {
@@ -352,10 +516,10 @@ public class DataStorage extends Storage
     if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION
           && prevInfo.getCTime() <= nsInfo.getCTime()))  // cannot rollback
       throw new InconsistentFSStateException(prevSD.getRoot(),
-                                             "Cannot rollback to a newer state.\nDatanode previous state: LV = " 
-                                             + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime() 
-                                             + " is newer than the namespace state: LV = "
-                                             + nsInfo.getLayoutVersion() + " CTime = " + nsInfo.getCTime());
+          "Cannot rollback to a newer state.\nDatanode previous state: LV = "
+              + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime()
+              + " is newer than the namespace state: LV = "
+              + nsInfo.getLayoutVersion() + " CTime = " + nsInfo.getCTime());
     LOG.info("Rolling back storage directory " + sd.getRoot()
              + ".\n   target LV = " + nsInfo.getLayoutVersion()
              + "; target CTime = " + nsInfo.getCTime());
@@ -371,22 +535,32 @@ public class DataStorage extends Storage
     deleteDir(tmpDir);
     LOG.info("Rollback of " + sd.getRoot() + " is complete.");
   }
-
+  
+  /**
+   * Finalize procedure deletes an existing snapshot.
+   * <ol>
+   * <li>Rename previous to finalized.tmp directory</li>
+   * <li>Fully delete the finalized.tmp directory</li>
+   * </ol>
+   * 
+   * Do nothing, if previous directory does not exist
+   */
   void doFinalize(StorageDirectory sd) throws IOException {
     File prevDir = sd.getPreviousDir();
     if (!prevDir.exists())
       return; // already discarded
+    
     final String dataDirPath = sd.getRoot().getCanonicalPath();
     LOG.info("Finalizing upgrade for storage directory " 
              + dataDirPath 
              + ".\n   cur LV = " + this.getLayoutVersion()
              + "; cur CTime = " + this.getCTime());
     assert sd.getCurrentDir().exists() : "Current directory must exist.";
-    final File tmpDir = sd.getFinalizedTmp();
-    // rename previous to tmp
+    final File tmpDir = sd.getFinalizedTmp();//finalized.tmp directory
+    // 1. rename previous to finalized.tmp
     rename(prevDir, tmpDir);
 
-    // delete tmp dir in a separate thread
+    // 2. delete finalized.tmp dir in a separate thread
     new Daemon(new Runnable() {
         public void run() {
           try {
@@ -400,9 +574,26 @@ public class DataStorage extends Storage
       }).start();
   }
   
-  void finalizeUpgrade() throws IOException {
-    for (Iterator<StorageDirectory> it = storageDirs.iterator(); it.hasNext();) {
-      doFinalize(it.next());
+  
+  /*
+   * Finalize the upgrade for a block pool
+   */
+  void finalizeUpgrade(String bpID) throws IOException {
+    // To handle finalizing a snapshot taken at datanode level while 
+    // upgrading to federation, if datanode level snapshot previous exists, 
+    // then finalize it. Else finalize the corresponding BP.
+    for (StorageDirectory sd : storageDirs) {
+      File prevDir = sd.getPreviousDir();
+      if (prevDir.exists()) {
+        // data node level storage finalize
+        doFinalize(sd);
+      } else {
+        // block pool storage finalize using specific bpID
+        File dnRoot = sd.getRoot();
+        BlockPoolStorage bpStorage = bpStorageMap.get(bpID);
+        File bpRoot = getBpRoot(bpID, dnRoot);
+        bpStorage.doFinalize(new StorageDirectory(bpRoot));
+      }
     }
   }
 
@@ -499,8 +690,6 @@ public class DataStorage extends Storage
     um.initializeUpgrade(nsInfo);
   }
   
-  private static final Pattern PRE_GENSTAMP_META_FILE_PATTERN = 
-    Pattern.compile("(.*blk_[-]*\\d+)\\.meta$");
   /**
    * This is invoked on target file names when upgrading from pre generation 
    * stamp version (version -13) to correct the metatadata file name.
@@ -516,4 +705,14 @@ public class DataStorage extends Storage
     }
     return oldFileName;
   }
+
+  /**
+   * Add bpStorage into bpStorageMap
+   */
+  private void addBlockPoolStorage(String bpID, BlockPoolStorage bpStorage)
+      throws IOException {
+    if (!this.bpStorageMap.containsKey(bpID)) {
+      this.bpStorageMap.put(bpID, bpStorage);
+    }
+  }
 }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1073857&r1=1073856&r2=1073857&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Wed Feb 23 18:06:11 2011
@@ -93,6 +93,8 @@ public class FSImage extends Storage {
   private static final SimpleDateFormat DATE_FORM =
     new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
 
+  private String blockpoolID = "";   // id of the block pool
+  
   //
   // The filenames used for storing the images
   //
@@ -705,13 +707,33 @@ public class FSImage extends Storage {
     return isUpgradeFinalized;
   }
 
+  /** Validate and set block pool ID */
+  private void setBlockPoolID(File storage, String bpid)
+      throws InconsistentFSStateException {
+    if (bpid == null || bpid.equals("")) {
+      throw new InconsistentFSStateException(storage, "file "
+          + STORAGE_FILE_VERSION + " is invalid.");
+    }
+    
+    if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
+      throw new InconsistentFSStateException(storage,
+          "Unexepcted blockpoolID " + bpid + " . Expected " + blockpoolID);
+    }
+    blockpoolID = bpid;
+  }
+  
   protected void getFields(Properties props, 
                            StorageDirectory sd 
                            ) throws IOException {
     super.getFields(props, sd);
-    if (layoutVersion == 0)
-      throw new IOException("NameNode directory " 
-                            + sd.getRoot() + " is not formatted.");
+    if (layoutVersion == 0) {
+      throw new IOException("NameNode directory " + sd.getRoot()
+          + " is not formatted.");
+    }
+    
+    String sbpid = props.getProperty("blockpoolID");
+    setBlockPoolID(sd.getRoot(), sbpid);
+    
     String sDUS, sDUV;
     sDUS = props.getProperty("distributedUpgradeState"); 
     sDUV = props.getProperty("distributedUpgradeVersion");
@@ -756,6 +778,7 @@ public class FSImage extends Storage {
                            StorageDirectory sd 
                            ) throws IOException {
     super.setFields(props, sd);
+    props.setProperty("blockpoolID", blockpoolID);
     boolean uState = getDistributedUpgradeState();
     int uVersion = getDistributedUpgradeVersion();
     if(uState && uVersion != getLayoutVersion()) {
@@ -1569,8 +1592,8 @@ public class FSImage extends Storage {
       LOG.warn("Could not use SecureRandom");
       rand = R.nextInt(Integer.MAX_VALUE);
     }
-    this.blockpoolID ="BP-" + rand + "-"+ ip + "-" + System.currentTimeMillis();
-    return this.blockpoolID;
+    String bpid = "BP-" + rand + "-"+ ip + "-" + System.currentTimeMillis();
+    return bpid;
   }
   
   /** Create new dfs name directory.  Caution: this destroys all files
@@ -2314,4 +2337,8 @@ public class FSImage extends Storage {
     U_STR.set(str);
     U_STR.write(out);
   }
+
+  String getBlockPoolID() {
+    return blockpoolID;
+  }
 }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java?rev=1073857&r1=1073856&r2=1073857&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java Wed Feb 23 18:06:11 2011
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.Deprecated
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
+import org.apache.hadoop.io.WritableUtils;
 
 /**
  * NamespaceInfo is returned by the name-node in reply 
@@ -42,6 +43,7 @@ import org.apache.hadoop.io.WritableFact
 public class NamespaceInfo extends StorageInfo {
   String  buildVersion;
   int distributedUpgradeVersion;
+  String blockPoolID = "";    // id of the block pool
 
   public NamespaceInfo() {
     super();
@@ -50,7 +52,8 @@ public class NamespaceInfo extends Stora
   
   public NamespaceInfo(int nsID, String clusterID, String bpID, 
       long cT, int duVersion) {
-    super(FSConstants.LAYOUT_VERSION, nsID, clusterID, bpID, cT);
+    super(FSConstants.LAYOUT_VERSION, nsID, clusterID, cT);
+    blockPoolID = bpID;
     buildVersion = Storage.getBuildVersion();
     this.distributedUpgradeVersion = duVersion;
   }
@@ -63,6 +66,10 @@ public class NamespaceInfo extends Stora
     return distributedUpgradeVersion;
   }
   
+  public String getBlockPoolID() {
+    return blockPoolID;
+  }
+
   /////////////////////////////////////////////////
   // Writable
   /////////////////////////////////////////////////
@@ -78,11 +85,13 @@ public class NamespaceInfo extends Stora
     DeprecatedUTF8.writeString(out, getBuildVersion());
     super.write(out);
     out.writeInt(getDistributedUpgradeVersion());
+    WritableUtils.writeString(out, blockPoolID);
   }
 
   public void readFields(DataInput in) throws IOException {
     buildVersion = DeprecatedUTF8.readString(in);
     super.readFields(in);
     distributedUpgradeVersion = in.readInt();
+    blockPoolID = WritableUtils.readString(in);
   }
 }

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1073857&r1=1073856&r2=1073857&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java Wed Feb 23 18:06:11 2011
@@ -196,7 +196,6 @@ public class TestDFSRollback extends Tes
                                          new StorageInfo(Integer.MIN_VALUE,
                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                          UpgradeUtilities.getCurrentClusterID(cluster),
-                                                         UpgradeUtilities.getCurrentBlockPoolID(cluster),
                                                          UpgradeUtilities.getCurrentFsscTime(cluster)));
       startDataNodeShouldFail(StartupOption.ROLLBACK);
       cluster.shutdown();
@@ -218,7 +217,6 @@ public class TestDFSRollback extends Tes
                                          new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                          UpgradeUtilities.getCurrentClusterID(cluster),
-                                                         UpgradeUtilities.getCurrentBlockPoolID(cluster),
                                                          Long.MAX_VALUE));
       startDataNodeShouldFail(StartupOption.ROLLBACK);
       cluster.shutdown();
@@ -259,7 +257,6 @@ public class TestDFSRollback extends Tes
                                          new StorageInfo(1,
                                                          UpgradeUtilities.getCurrentNamespaceID(null),
                                                          UpgradeUtilities.getCurrentClusterID(null),
-                                                         UpgradeUtilities.getCurrentBlockPoolID(null),
                                                          UpgradeUtilities.getCurrentFsscTime(null)));
       startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java?rev=1073857&r1=1073856&r2=1073857&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java Wed Feb 23 18:06:11 2011
@@ -77,27 +77,26 @@ public class TestDFSStartupVersions exte
     long fsscTimeCur = UpgradeUtilities.getCurrentFsscTime(null);
     long fsscTimeNew = Long.MAX_VALUE;
     String clusterID = "cid-test";
-    String bpID = "bpid-test";
     
     return new StorageInfo[] {
-      new StorageInfo(layoutVersionOld, namespaceIdCur, clusterID, bpID, fsscTimeOld), // 0
-      new StorageInfo(layoutVersionOld, namespaceIdCur, clusterID, bpID, fsscTimeCur), // 1
-      new StorageInfo(layoutVersionOld, namespaceIdCur, clusterID, bpID, fsscTimeNew), // 2
-      new StorageInfo(layoutVersionOld, namespaceIdOld, clusterID, bpID, fsscTimeOld), // 3
-      new StorageInfo(layoutVersionOld, namespaceIdOld, clusterID, bpID, fsscTimeCur), // 4
-      new StorageInfo(layoutVersionOld, namespaceIdOld, clusterID, bpID, fsscTimeNew), // 5
-      new StorageInfo(layoutVersionCur, namespaceIdCur, clusterID, bpID, fsscTimeOld), // 6
-      new StorageInfo(layoutVersionCur, namespaceIdCur, clusterID, bpID, fsscTimeCur), // 7
-      new StorageInfo(layoutVersionCur, namespaceIdCur, clusterID, bpID, fsscTimeNew), // 8
-      new StorageInfo(layoutVersionCur, namespaceIdOld, clusterID, bpID, fsscTimeOld), // 9
-      new StorageInfo(layoutVersionCur, namespaceIdOld, clusterID, bpID, fsscTimeCur), // 10
-      new StorageInfo(layoutVersionCur, namespaceIdOld, clusterID, bpID, fsscTimeNew), // 11
-      new StorageInfo(layoutVersionNew, namespaceIdCur, clusterID, bpID, fsscTimeOld), // 12
-      new StorageInfo(layoutVersionNew, namespaceIdCur, clusterID, bpID, fsscTimeCur), // 13
-      new StorageInfo(layoutVersionNew, namespaceIdCur, clusterID, bpID, fsscTimeNew), // 14
-      new StorageInfo(layoutVersionNew, namespaceIdOld, clusterID, bpID, fsscTimeOld), // 15
-      new StorageInfo(layoutVersionNew, namespaceIdOld, clusterID, bpID, fsscTimeCur), // 16
-      new StorageInfo(layoutVersionNew, namespaceIdOld, clusterID, bpID, fsscTimeNew), // 17
+      new StorageInfo(layoutVersionOld, namespaceIdCur, clusterID, fsscTimeOld), // 0
+      new StorageInfo(layoutVersionOld, namespaceIdCur, clusterID, fsscTimeCur), // 1
+      new StorageInfo(layoutVersionOld, namespaceIdCur, clusterID, fsscTimeNew), // 2
+      new StorageInfo(layoutVersionOld, namespaceIdOld, clusterID, fsscTimeOld), // 3
+      new StorageInfo(layoutVersionOld, namespaceIdOld, clusterID, fsscTimeCur), // 4
+      new StorageInfo(layoutVersionOld, namespaceIdOld, clusterID, fsscTimeNew), // 5
+      new StorageInfo(layoutVersionCur, namespaceIdCur, clusterID, fsscTimeOld), // 6
+      new StorageInfo(layoutVersionCur, namespaceIdCur, clusterID, fsscTimeCur), // 7
+      new StorageInfo(layoutVersionCur, namespaceIdCur, clusterID, fsscTimeNew), // 8
+      new StorageInfo(layoutVersionCur, namespaceIdOld, clusterID, fsscTimeOld), // 9
+      new StorageInfo(layoutVersionCur, namespaceIdOld, clusterID, fsscTimeCur), // 10
+      new StorageInfo(layoutVersionCur, namespaceIdOld, clusterID, fsscTimeNew), // 11
+      new StorageInfo(layoutVersionNew, namespaceIdCur, clusterID, fsscTimeOld), // 12
+      new StorageInfo(layoutVersionNew, namespaceIdCur, clusterID, fsscTimeCur), // 13
+      new StorageInfo(layoutVersionNew, namespaceIdCur, clusterID, fsscTimeNew), // 14
+      new StorageInfo(layoutVersionNew, namespaceIdOld, clusterID, fsscTimeOld), // 15
+      new StorageInfo(layoutVersionNew, namespaceIdOld, clusterID, fsscTimeCur), // 16
+      new StorageInfo(layoutVersionNew, namespaceIdOld, clusterID, fsscTimeNew), // 17
     };
   }
   
@@ -185,7 +184,6 @@ public class TestDFSStartupVersions exte
                                                   UpgradeUtilities.getCurrentLayoutVersion(),
                                                   UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                   UpgradeUtilities.getCurrentClusterID(cluster),
-                                                  UpgradeUtilities.getCurrentBlockPoolID(cluster),
                                                   UpgradeUtilities.getCurrentFsscTime(cluster));
     log("NameNode version info", NAME_NODE, null, nameNodeVersion);
     for (int i = 0; i < versions.length; i++) {

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java?rev=1073857&r1=1073856&r2=1073857&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java Wed Feb 23 18:06:11 2011
@@ -194,7 +194,6 @@ public class TestDFSUpgrade extends Test
                                          new StorageInfo(Integer.MIN_VALUE,
                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                          UpgradeUtilities.getCurrentClusterID(cluster),
-                                                         UpgradeUtilities.getCurrentBlockPoolID(cluster),
                                                          UpgradeUtilities.getCurrentFsscTime(cluster)));
       startDataNodeShouldFail(StartupOption.REGULAR);
       cluster.shutdown();
@@ -209,7 +208,6 @@ public class TestDFSUpgrade extends Test
                                          new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                          UpgradeUtilities.getCurrentClusterID(cluster),
-                                                         UpgradeUtilities.getCurrentBlockPoolID(cluster),
                                                          Long.MAX_VALUE));
       startDataNodeShouldFail(StartupOption.REGULAR);
       cluster.shutdown();
@@ -246,7 +244,6 @@ public class TestDFSUpgrade extends Test
                                          new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1,
                                                          UpgradeUtilities.getCurrentNamespaceID(null),
                                                          UpgradeUtilities.getCurrentClusterID(null),
-                                                         UpgradeUtilities.getCurrentBlockPoolID(null),
                                                          UpgradeUtilities.getCurrentFsscTime(null)));
       startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
@@ -257,7 +254,6 @@ public class TestDFSUpgrade extends Test
                                          new StorageInfo(Integer.MIN_VALUE,
                                                          UpgradeUtilities.getCurrentNamespaceID(null),
                                                          UpgradeUtilities.getCurrentClusterID(null),
-                                                         UpgradeUtilities.getCurrentBlockPoolID(null),
                                                          UpgradeUtilities.getCurrentFsscTime(null)));
       startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestStorageInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestStorageInfo.java?rev=1073857&r1=1073856&r2=1073857&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestStorageInfo.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestStorageInfo.java Wed Feb 23 18:06:11 2011
@@ -41,11 +41,10 @@ public class TestStorageInfo extends Tes
     
     int nsID = 123;
     String cid = "cid-test";
-    String bpid = "bpid-test";
     int layoutV = 234;
     long cT = 0L;
     
-    StorageInfo sinfo = new StorageInfo(layoutV, nsID, cid,  bpid, cT);
+    StorageInfo sinfo = new StorageInfo(layoutV, nsID, cid,  cT);
     
     Assert.assertNotNull(sinfo);
 
@@ -69,7 +68,6 @@ public class TestStorageInfo extends Tes
         // compare
         Assert.assertEquals(sinfo.getClusterID(), secondsinfo.getClusterID());
         Assert.assertEquals(sinfo.getNamespaceID(), secondsinfo.getNamespaceID());
-        Assert.assertEquals(sinfo.getBlockPoolID(), secondsinfo.getBlockPoolID());
         Assert.assertEquals(sinfo.getLayoutVersion(), secondsinfo.getLayoutVersion());
         Assert.assertEquals(sinfo.getCTime(), secondsinfo.getCTime());
     }catch (IOException e) {



Mime
View raw message