hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1075654 - in /hadoop/hdfs/branches/HDFS-1052: ./ src/java/org/apache/hadoop/hdfs/server/common/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/ src/tes...
Date Tue, 01 Mar 2011 04:26:41 GMT
Author: suresh
Date: Tue Mar  1 04:26:40 2011
New Revision: 1075654

URL: http://svn.apache.org/viewvc?rev=1075654&view=rev
Log:
HDFS-1654. Federation: Fix TestDFSUpgrade and TestDFSRollback failures. (suresh)


Modified:
    hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolStorage.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java

Modified: hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/CHANGES.txt?rev=1075654&r1=1075653&r2=1075654&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-1052/CHANGES.txt Tue Mar  1 04:26:40 2011
@@ -31,39 +31,39 @@ Trunk (unreleased changes)
     HDFS-1653. Federation: Block received message from datanode sends invalid 
     DatanodeRegistration. (Tanping via suresh)
 
-    HDFS-1645. DatanodeCommond.Finalize needs to include BlockPoolId.
-    (suresh)
+    HDFS-1645. Federation: DatanodeCommond.Finalize needs to include 
+    BlockPoolId.  (suresh)
 
-    HDFS-1638.DataNode.handleDiskError needs to inform ALL namenodes if a disk 
-    failed (boryas)
+    HDFS-1638. Federation: DataNode.handleDiskError needs to inform 
+    ALL namenodes if a disk failed (boryas)
 
     HDFS-1647. Federation: Multiple namenode configuration. (jitendra)
 
-    HDFS-1639. Add block pool management to FSDataset. (suresh)
+    HDFS-1639. Federation: Add block pool management to FSDataset. (suresh)
 
-    HDFS-1648. Only DataStorage must be locked using in_use.lock and no 
-    locks must be associated with BlockPoolStorage. (Tanping via suresh)
+    HDFS-1648. Federation: Only DataStorage must be locked using in_use.lock 
+    and no locks must be associated with BlockPoolStorage. (Tanping via suresh)
 
-    HDFS-1641. Datanode fields that are no longer used should be removed (boryas)
+    HDFS-1641. Federation: Datanode fields that are no longer used should 
+    be removed (boryas)
 
-    HDFS-1642. HDFS Federation: add Datanode.getDNRegistration(String bpid) 
+    HDFS-1642. Federation: add Datanode.getDNRegistration(String bpid) 
     method  (boryas)
 
-    HDFS-1643. HDFS Federation: remove namenode argument from DataNode 
+    HDFS-1643. Federation: remove namenode argument from DataNode 
     constructor (boryas)
 
-    HDFS-1657. Tests that corrupt block files fail due to changed file 
-    path in federation. (suresh)
+    HDFS-1657. Federation: Tests that corrupt block files fail due to changed 
+    file path in federation. (suresh)
 
-    HDFS-1661. Hdfs Federation: Remove unnecessary TODO:FEDERATION comments.
+    HDFS-1661. Federation: Remove unnecessary TODO:FEDERATION comments.
     (jitendra)
 
-    HDFS-1660. HDFS Federation: Datanode doesn't start with two namenodes
-    (boryas)
+    HDFS-1660. Federation: Datanode doesn't start with two namenodes (boryas)
 
-    HDFS-1650. TestReplication fails. (Tanping via suresh)
+    HDFS-1650. Federation: TestReplication fails. (Tanping via suresh)
 
-    HDFS-1651. Tests fail due to null pointer exception in 
+    HDFS-1651. Federation: Tests fail due to null pointer exception in 
     Datnode#shutdown() method. (Tanping via suresh)
 
     HDFS-1649. Federation: Datanode command to refresh namenode list at 
@@ -72,8 +72,12 @@ Trunk (unreleased changes)
     HDFS-1646. Federation: MiniDFSClsuter#waitActive() waits for ever 
     with the introduction of BPOfferService in datanode. (suresh)
 
-    HDFS-1659. BPOfferService exits after one iteration incorrectly.
-    (Tanping via suresh)
+    HDFS-1659. Federation: BPOfferService exits after one iteration 
+    incorrectly.  (Tanping via suresh)
+
+    HDFS-1654. Federation: Fix TestDFSUpgrade and TestDFSRollback failures.
+    (suresh)
+    
 
   IMPROVEMENTS
 

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java?rev=1075654&r1=1075653&r2=1075654&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java Tue Mar  1 04:26:40 2011
@@ -52,6 +52,10 @@ public interface HdfsConstants {
     IMPORT  ("-importCheckpoint");
     
     private String name = null;
+    
+    // Used only with format and upgrade options
+    private String clusterId = null;
+    
     private StartupOption(String arg) {this.name = arg;}
     public String getName() {return name;}
     public NamenodeRole toNodeRole() {
@@ -64,7 +68,14 @@ public interface HdfsConstants {
         return NamenodeRole.ACTIVE;
       }
     }
-
+    
+    public void setClusterId(String cid) {
+      clusterId = cid;
+    }
+    
+    public String getClusterId() {
+      return clusterId;
+    }
   }
 
   // Timeouts for communicating with DataNode for streaming writes/reads

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java?rev=1075654&r1=1075653&r2=1075654&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java Tue Mar  1 04:26:40 2011
@@ -753,28 +753,11 @@ public abstract class Storage extends St
   protected void getFields(Properties props, 
                            StorageDirectory sd 
                            ) throws IOException {
-    String sv, st, sid, scid, sct;
-    sv = props.getProperty("layoutVersion");
-    st = props.getProperty("storageType");
-    sid = props.getProperty("namespaceID");
-    scid = props.getProperty("clusterID");
-    sct = props.getProperty("cTime");
-    if (sv == null || st == null || sid == null || scid == null || 
-        sct == null) {
-      throw new InconsistentFSStateException(sd.root,
-        "file " + STORAGE_FILE_VERSION + " is invalid.");
-    }
-    
-    int rv = Integer.parseInt(sv);
-    NodeType rt = NodeType.valueOf(st);
-    int rid = Integer.parseInt(sid);
-    long rct = Long.parseLong(sct);
-    
-    setClusterID(sd.root, scid);
-    setNamespaceID(sd.root, rid);
-    setLayoutVersion(sd.root, rv);
-    setStorageType(sd.root, rt);
-    cTime = rct;
+    setLayoutVersion(props, sd);
+    setNamespaceID(props, sd);
+    setStorageType(props, sd);
+    setcTime(props, sd);
+    setClusterId(props, layoutVersion, sd);
   }
   
   /**
@@ -888,13 +871,66 @@ public abstract class Storage extends St
     file.getFD().sync();
   }
   
-  /** Validate and set storage type */
-  protected void setStorageType(File storage, NodeType type)
+  String getProperty(Properties props, StorageDirectory sd,
+      String name) throws InconsistentFSStateException {
+    String property = props.getProperty(name);
+    if (property == null) {
+      throw new InconsistentFSStateException(sd.root, "file "
+          + STORAGE_FILE_VERSION + " has " + name + " mising.");
+    }
+    return property;
+  }
+  
+  /** Validate and set storage type from {@link Properties}*/
+  protected void setStorageType(Properties props, StorageDirectory sd)
       throws InconsistentFSStateException {
+    NodeType type = NodeType.valueOf(getProperty(props, sd, "storageType"));
     if (!storageType.equals(type)) {
-      throw new InconsistentFSStateException(storage,
+      throw new InconsistentFSStateException(sd.root,
           "node type is incompatible with others.");
     }
     storageType = type;
   }
+  
+  /** Validate and set ctime from {@link Properties}*/
+  protected void setcTime(Properties props, StorageDirectory sd)
+      throws InconsistentFSStateException {
+    cTime = Long.parseLong(getProperty(props, sd, "cTime"));
+  }
+
+  /** Validate and set clusterId from {@link Properties}*/
+  protected void setClusterId(Properties props, int layoutVersion,
+      StorageDirectory sd) throws InconsistentFSStateException {
+    // No Cluster ID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
+    if (layoutVersion < Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION) {
+      String cid = getProperty(props, sd, "clusterID");
+      if (!(clusterID.equals("") || cid.equals("") || clusterID.equals(cid))) {
+        throw new InconsistentFSStateException(sd.getRoot(),
+            "cluster Id is incompatible with others.");
+      }
+      clusterID = cid;
+    }
+  }
+  
+  /** Validate and set layout version from {@link Properties}*/
+  protected void setLayoutVersion(Properties props, StorageDirectory sd)
+      throws IncorrectVersionException, InconsistentFSStateException {
+    int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
+    if (lv < FSConstants.LAYOUT_VERSION) { // future version
+      throw new IncorrectVersionException(lv, "storage directory "
+          + sd.root.getAbsolutePath());
+    }
+    layoutVersion = lv;
+  }
+  
+  /** Validate and set namespaceID version from {@link Properties}*/
+  protected void setNamespaceID(Properties props, StorageDirectory sd)
+      throws InconsistentFSStateException {
+    int nsId = Integer.parseInt(getProperty(props, sd, "namespaceID"));
+    if (namespaceID != 0 && nsId != 0 && namespaceID != nsId) {
+      throw new InconsistentFSStateException(sd.root,
+          "namespaceID is incompatible with others.");
+    }
+    namespaceID = nsId;
+  }
 }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java?rev=1075654&r1=1075653&r2=1075654&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java Tue Mar  1 04:26:40 2011
@@ -101,36 +101,6 @@ public class StorageInfo implements Writ
     cTime = in.readLong();
   }
   
-  /** validate and set namespaceID */
-  protected void setNamespaceID(File storage, int nsId)
-      throws InconsistentFSStateException {
-    if (namespaceID != 0 && nsId != 0 && namespaceID != nsId) {
-      throw new InconsistentFSStateException(storage,
-          "namespaceID is incompatible with others.");
-    }
-    namespaceID = nsId;
-  }
-  
-  /** validate and set layout version */ 
-  protected void setLayoutVersion(File storage, int lv)
-      throws IncorrectVersionException, IOException {
-    if (lv < FSConstants.LAYOUT_VERSION) { // future version
-      throw new IncorrectVersionException(lv, "storage directory "
-          + storage.getCanonicalPath());
-    }
-    layoutVersion = lv;
-  }
-  
-  /** validate and set ClusterID */
-  protected void setClusterID(File storage, String cid)
-      throws InconsistentFSStateException {
-    if (!clusterID.equals("") && !cid.equals("") && !clusterID.equals(cid)) {
-      throw new InconsistentFSStateException(storage,
-          "cluster id is incompatible with others.");
-    }
-    clusterID = cid;
-  }
-  
   public String toString() {
     StringBuilder sb = new StringBuilder();
     sb.append("lv=").append(layoutVersion).append(";cid=").append(clusterID)

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolStorage.java?rev=1075654&r1=1075653&r2=1075654&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolStorage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolStorage.java Tue Mar  1 04:26:40 2011
@@ -146,11 +146,23 @@ public class BlockPoolStorage extends St
 
   /**
    * Format a block pool storage. 
+   * @param dnCurDir DataStorage current directory
+   * @param nsInfo the name space info
+   * @throws IOException Signals that an I/O exception has occurred.
+   */
+  void format(File dnCurDir, NamespaceInfo nsInfo) throws IOException {
+    File curBpDir = getBpRoot(nsInfo.getBlockPoolID(), dnCurDir);
+    StorageDirectory bpSdir = new StorageDirectory(curBpDir);
+    format(bpSdir, nsInfo);
+  }
+
+  /**
+   * Format a block pool storage. 
    * @param sd the block pool storage
    * @param nsInfo the name space info
    * @throws IOException Signals that an I/O exception has occurred.
    */
-  void format(StorageDirectory bpSdir, NamespaceInfo nsInfo) throws IOException {
+  private void format(StorageDirectory bpSdir, NamespaceInfo nsInfo) throws IOException {
     LOG.info("Formatting block pool " + blockpoolID + " directory "
         + bpSdir.getCurrentDir());
     bpSdir.clearDirectory(); // create directory
@@ -173,7 +185,6 @@ public class BlockPoolStorage extends St
     props.setProperty("namespaceID", String.valueOf(namespaceID));
     props.setProperty("blockpoolID", blockpoolID);
     props.setProperty("cTime", String.valueOf(cTime));
-    props.setProperty("storageType", storageType.toString());
   }
 
   /** Validate and set block pool ID */
@@ -194,27 +205,12 @@ public class BlockPoolStorage extends St
   @Override
   protected void getFields(Properties props, StorageDirectory sd)
       throws IOException {
-    String sv = props.getProperty("layoutVersion");
-    String sctime = props.getProperty("cTime");
-    String sid = props.getProperty("namespaceID");
-    String st = props.getProperty("storageType");
-    if (st == null || sv == null || sctime == null || sid == null) {
-      throw new InconsistentFSStateException(sd.getRoot(), "file "
-          + STORAGE_FILE_VERSION + " is invalid.");
-    }
-    int rv = Integer.parseInt(sv);
-    setLayoutVersion(sd.getRoot(), rv);
-    
-    int rid = Integer.parseInt(sid);
-    setNamespaceID(sd.getRoot(), rid);
-    
-    NodeType rt = NodeType.valueOf(st);
-    setStorageType(sd.getRoot(), rt);
+    setLayoutVersion(props, sd);
+    setNamespaceID(props, sd);
+    setcTime(props, sd);
     
     String sbpid = props.getProperty("blockpoolID");
     setBlockPoolID(sd.getRoot(), sbpid);
-    
-    cTime = Long.parseLong(sctime);
   }
 
   /**
@@ -289,12 +285,11 @@ public class BlockPoolStorage extends St
    * @throws IOException on error
    */
   void doUpgrade(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException {
-    // have to be upgrading between any after 0.22 (0.22 included) release
-    // stored version <= 0.22 && software version < 0.22
+    // Upgrading is applicable only to release with federation or after
     if (!(this.getLayoutVersion() < LAST_PRE_FEDERATION_LAYOUT_VERSION)) {
       return;
     }
-    LOG.info("Upgrading storage directory " + bpSd.getRoot()
+    LOG.info("Upgrading block pool storage directory " + bpSd.getRoot()
         + ".\n   old LV = " + this.getLayoutVersion() + "; old CTime = "
         + this.getCTime() + ".\n   new LV = " + nsInfo.getLayoutVersion()
         + "; new CTime = " + nsInfo.getCTime());
@@ -418,7 +413,9 @@ public class BlockPoolStorage extends St
    * Finalize the block pool storage by deleting <BP>/previous directory
    * that holds the snapshot.
    */
-  void doFinalize(StorageDirectory bpSd) throws IOException {
+  void doFinalize(File dnCurDir) throws IOException {
+    File bpRoot = getBpRoot(blockpoolID, dnCurDir);
+    StorageDirectory bpSd = new StorageDirectory(bpRoot);
     // block pool level previous directory
     File prevDir = bpSd.getPreviousDir();
     if (!prevDir.exists()) {
@@ -514,4 +511,14 @@ public class BlockPoolStorage extends St
   public String toString() {
     return super.toString() + ";bpid=" + blockpoolID;
   }
+  
+  /**
+   * Get a block pool storage root based on data node storage root
+   * @param bpID block pool ID
+   * @param dnRoot data node storage root directory
+   * @return root directory for block pool storage
+   */
+  static File getBpRoot(String bpID, File dnCurDir) {
+    return new File(dnCurDir, bpID);
+  }
 }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1075654&r1=1075653&r2=1075654&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Tue Mar  1 04:26:40 2011
@@ -214,7 +214,8 @@ public class DataStorage extends Storage
     Collection<File> bpDataDirs = new ArrayList<File>();
     for(Iterator<File> it = dataDirs.iterator(); it.hasNext();) {
       File dnRoot = it.next();
-      File bpRoot = getBpRoot(bpID, dnRoot);
+      File bpRoot = BlockPoolStorage.getBpRoot(bpID, new File(dnRoot,
+          STORAGE_DIR_CURRENT));
       bpDataDirs.add(bpRoot);
     }
     // mkdir for the list of BlockPoolStorage
@@ -254,17 +255,6 @@ public class DataStorage extends Storage
     }
   }
 
-  /**
-   * Get a block pool root directory based on data node root directory
-   * @param bpID block pool ID
-   * @param dnRoot directory of data node root
-   * @return root directory for block pool
-   */
-  private static File getBpRoot(String bpID, File dnRoot) {
-    File bpRoot = new File(new File(dnRoot, STORAGE_DIR_CURRENT), bpID);
-    return bpRoot;
-  }
-  
   void format(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
     sd.clearDirectory(); // create directory
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
@@ -297,30 +287,23 @@ public class DataStorage extends Storage
   @Override
   protected void getFields(Properties props, StorageDirectory sd)
       throws IOException {
-    String scid = props.getProperty("clusterID");
-    String sct = props.getProperty("cTime");
-    String slv = props.getProperty("layoutVersion");
+    setLayoutVersion(props, sd);
+    setcTime(props, sd);
+    setStorageType(props, sd);
+    setClusterId(props, layoutVersion, sd);
+    
+    // Read NamespaceID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
+    if (layoutVersion >= LAST_PRE_FEDERATION_LAYOUT_VERSION) {
+      setNamespaceID(props, sd);
+    }
+    
+    // valid storage id, storage id may be empty
     String ssid = props.getProperty("storageID");
-    String st = props.getProperty("storageType");
-
-    if (scid == null || sct == null || slv == null|| ssid == null
-        || st == null) {
+    if (ssid == null) {
       throw new InconsistentFSStateException(sd.getRoot(), "file "
           + STORAGE_FILE_VERSION + " is invalid.");
     }
-    setClusterID(sd.getRoot(), scid);
-    
-    long rct = Long.parseLong(sct);
-    cTime = rct;
-    
-    int rlv = Integer.parseInt(slv);
-    setLayoutVersion(sd.getRoot(), rlv);
-    
-    NodeType rt = NodeType.valueOf(st);
-    setStorageType(sd.getRoot(), rt);
-    
-    // valid storage id, storage id may be empty
-    if ((!storageID.equals("") && !ssid.equals("") && !storageID.equals(ssid))) {
+    if (!(storageID.equals("") || ssid.equals("") || storageID.equals(ssid))) {
       throw new InconsistentFSStateException(sd.getRoot(),
           "has incompatible storage Id.");
     }
@@ -374,12 +357,13 @@ public class DataStorage extends Storage
     checkVersionUpgradable(this.layoutVersion);
     assert this.layoutVersion >= FSConstants.LAYOUT_VERSION :
       "Future version is not allowed";
-
-    if (!getClusterID().equals (nsInfo.getClusterID()))
-      throw new IOException(
-                            "Incompatible clusterIDs in " + sd.getRoot().getCanonicalPath()
-                            + ": namenode clusterID = " + nsInfo.getClusterID() 
-                            + "; datanode clusterID = " + getClusterID());
+    
+    if (layoutVersion < Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION
+        && !getClusterID().equals(nsInfo.getClusterID())) {
+      throw new IOException("Incompatible clusterIDs in "
+          + sd.getRoot().getCanonicalPath() + ": namenode clusterID = "
+          + nsInfo.getClusterID() + "; datanode clusterID = " + getClusterID());
+    }
     // regular start up
     if (this.layoutVersion == FSConstants.LAYOUT_VERSION 
         && this.cTime == nsInfo.getCTime())
@@ -425,13 +409,12 @@ public class DataStorage extends Storage
    * @throws IOException on error
    */
   void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
-    //  bp root directory <SD>/current/<bpid>
-    File bpRootDir = getBpRoot(nsInfo.getBlockPoolID(), sd.getRoot());
-
-    // regular startup if <SD>/current/<bpid> direcotry exist,
-    // i.e. the stored version is 0.22 or later release
-    if (bpRootDir.exists())
+    if (layoutVersion < Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION) {
+      clusterID = nsInfo.getClusterID();
+      layoutVersion = nsInfo.getLayoutVersion();
+      sd.write();
       return;
+    }
     
     LOG.info("Upgrading storage directory " + sd.getRoot()
              + ".\n   old LV = " + this.getLayoutVersion()
@@ -456,15 +439,15 @@ public class DataStorage extends Storage
     rename(curDir, tmpDir);
     
     // 3. Format BP and hard link blocks from previous directory
-    File curBpDir = getBpRoot(nsInfo.getBlockPoolID(), curDir);
+    File curBpDir = BlockPoolStorage.getBpRoot(nsInfo.getBlockPoolID(), curDir);
     BlockPoolStorage bpStorage = new BlockPoolStorage(nsInfo.getNamespaceID(), 
         nsInfo.getBlockPoolID(), nsInfo.getCTime(), nsInfo.getClusterID());
-    bpStorage.format(new StorageDirectory(curBpDir), nsInfo);
-    linkAllBlocks(tmpDir, curBpDir);
+    bpStorage.format(curDir, nsInfo);
+    linkAllBlocks(tmpDir, new File(curBpDir, STORAGE_DIR_CURRENT));
     
-    // 4. Write version file under <SD>/current/<bpid>/current
+    // 4. Write version file under <SD>/current
     layoutVersion = FSConstants.LAYOUT_VERSION;
-    cTime = nsInfo.getCTime();
+    clusterID = nsInfo.getClusterID();
     sd.write();
     
     // 5. Rename <SD>/previous.tmp to <SD>/previous
@@ -600,10 +583,8 @@ public class DataStorage extends Storage
         doFinalize(sd);
       } else {
         // block pool storage finalize using specific bpID
-        File dnRoot = sd.getRoot();
         BlockPoolStorage bpStorage = bpStorageMap.get(bpID);
-        File bpRoot = getBpRoot(bpID, dnRoot);
-        bpStorage.doFinalize(new StorageDirectory(bpRoot));
+        bpStorage.doFinalize(sd.getCurrentDir());
       }
     }
   }
@@ -631,7 +612,8 @@ public class DataStorage extends Storage
     }    
   }
   
-  static void linkBlocks(File from, File to, int oldLV) throws IOException {
+  static void linkBlocks(File from, File to, int oldLV)
+      throws IOException {
     if (!from.exists()) {
       return;
     }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1075654&r1=1075653&r2=1075654&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Tue Mar  1 04:26:40 2011
@@ -51,13 +51,10 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
@@ -321,16 +318,15 @@ public class FSDataset implements FSCons
      * 
      * @param bpid Block pool Id
      * @param volume {@link FSVolume} to which this BlockPool belongs to
-     * @param currentDir currentDir corresponding to the BlockPool
+     * @param bpDir directory corresponding to the BlockPool
      * @param conf
      * @throws IOException
      */
-    BlockPool(String bpid, FSVolume volume, File currentDir, Configuration conf)
+    BlockPool(String bpid, FSVolume volume, File bpDir, Configuration conf)
         throws IOException {
       this.bpid = bpid;
       this.volume = volume;
-      this.currentDir = currentDir; 
-      File parent = currentDir.getParentFile();
+      this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
       final File finalizedDir = new File(
           currentDir, DataStorage.STORAGE_DIR_FINALIZED);
 
@@ -339,7 +335,7 @@ public class FSDataset implements FSCons
       // in the future, we might want to do some sort of datanode-local
       // recovery for these blocks. For example, crc validation.
       //
-      this.tmpDir = new File(parent, "tmp");
+      this.tmpDir = new File(bpDir, "tmp");
       if (tmpDir.exists()) {
         FileUtil.fullyDelete(tmpDir);
       }
@@ -358,7 +354,7 @@ public class FSDataset implements FSCons
           throw new IOException("Mkdirs failed to create " + tmpDir.toString());
         }
       }
-      this.dfsUsage = new DU(parent, conf);
+      this.dfsUsage = new DU(bpDir, conf);
       this.dfsUsage.start();
     }
 
@@ -396,7 +392,6 @@ public class FSDataset implements FSCons
      */
     File createTmpFile(Block b) throws IOException {
       File f = new File(tmpDir, b.getBlockName());
-      DataNode.LOG.info("SURESH creating temporary file " + f);
       return FSDataset.createTmpFile(b, f);
     }
 

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1075654&r1=1075653&r2=1075654&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Tue Mar  1 04:26:40 2011
@@ -492,11 +492,30 @@ public class FSImage extends Storage {
     }
     if (startOpt != StartupOption.UPGRADE
           && layoutVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION
-          && layoutVersion != FSConstants.LAYOUT_VERSION)
+          && layoutVersion != FSConstants.LAYOUT_VERSION) {
         throw new IOException(
-           "\nFile system image contains an old layout version " + layoutVersion
-         + ".\nAn upgrade to version " + FSConstants.LAYOUT_VERSION
-         + " is required.\nPlease restart NameNode with -upgrade option.");
+          "\nFile system image contains an old layout version " + layoutVersion
+              + ".\nAn upgrade to version " + FSConstants.LAYOUT_VERSION
+              + " is required.\nPlease restart NameNode with -upgrade option.");
+    }
+    
+    // Upgrade to federation requires -upgrade -clusterid <clusterID> option
+    if (startOpt == StartupOption.UPGRADE
+        && layoutVersion > LAST_PRE_FEDERATION_LAYOUT_VERSION) {
+      if (startOpt.getClusterId() == null) {
+        throw new IOException(
+            "\nFile system image contains an old layout version "
+                + layoutVersion + ".\nAn upgrade to version "
+                + FSConstants.LAYOUT_VERSION
+                + " is required.\nPlease restart NameNode with "
+                + "-upgrade -clusterid <clusterID> option.");
+      }
+      clusterID = startOpt.getClusterId();
+      
+      // Create new block pool Id
+      blockpoolID = newBlockPoolID();
+    }
+    
     // check whether distributed upgrade is reguired and/or should be continued
     verifyDistributedUpgradeProgress(startOpt);
 
@@ -719,7 +738,7 @@ public class FSImage extends Storage {
       throws InconsistentFSStateException {
     if (bpid == null || bpid.equals("")) {
       throw new InconsistentFSStateException(storage, "file "
-          + STORAGE_FILE_VERSION + " is invalid.");
+          + STORAGE_FILE_VERSION + " has no block pool Id.");
     }
     
     if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
@@ -739,8 +758,11 @@ public class FSImage extends Storage {
           + " is not formatted.");
     }
     
-    String sbpid = props.getProperty("blockpoolID");
-    setBlockPoolID(sd.getRoot(), sbpid);
+    // No Block pool ID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
+    if (layoutVersion < LAST_PRE_FEDERATION_LAYOUT_VERSION) {
+      String sbpid = props.getProperty("blockpoolID");
+      setBlockPoolID(sd.getRoot(), sbpid);
+    }
     
     String sDUS, sDUV;
     sDUS = props.getProperty("distributedUpgradeState"); 

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1075654&r1=1075653&r2=1075654&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Tue Mar  1 04:26:40 2011
@@ -22,8 +22,6 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
-import java.util.AbstractList;
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -163,7 +161,6 @@ public class NameNode implements Namenod
 
   public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
   public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange");
-  public static String clusterIdStr;
 
   protected FSNamesystem namesystem; 
   protected NamenodeRole role;
@@ -1409,23 +1406,24 @@ public class NameNode implements Namenod
 
     FSImage fsImage = new FSImage(dirsToFormat, editDirsToFormat);
     FSNamesystem nsys = new FSNamesystem(fsImage, conf);
-    //new cluster id
-    // if not provided - see if you can find the current one
-    if(clusterIdStr == null || clusterIdStr.equals("")) {
+    
+    // if clusterID is not provided - see if you can find the current one
+    String clusterId = StartupOption.FORMAT.getClusterId();
+    if(clusterId == null || clusterId.equals("")) {
       // try to get one from the existing storage
-      clusterIdStr = fsImage.determineClusterId();
-      if (clusterIdStr == null || clusterIdStr.equals("")) {
+      clusterId = fsImage.determineClusterId();
+      if (clusterId == null || clusterId.equals("")) {
         throw new IllegalArgumentException("Format must be provided with clusterid");
       }
       if(isConfirmationNeeded) {
-        System.err.print("Use existing cluster id=" + clusterIdStr + "? (Y or N)");
+        System.err.print("Use existing cluster id=" + clusterId + "? (Y or N)");
         if(System.in.read() != 'Y') {
           throw new IllegalArgumentException("Format must be provided with clusterid");
         }
         while(System.in.read() != '\n'); // discard the enter-key
       }
     }
-    nsys.dir.fsImage.format(clusterIdStr);
+    nsys.dir.fsImage.format(clusterId);
     return false;
   }
 
@@ -1500,9 +1498,10 @@ public class NameNode implements Namenod
       if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.FORMAT;
         // might be followed by two args
-        if(i+2<argsLen && args[i+1].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
-          i+=2;
-          clusterIdStr = args[i];  
+        if (i + 2 < argsLen
+            && args[i + 1].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
+          i += 2;
+          startOpt.setClusterId(args[i]);
         }
       } else if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.GENCLUSTERID;
@@ -1514,6 +1513,12 @@ public class NameNode implements Namenod
         startOpt = StartupOption.CHECKPOINT;
       } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.UPGRADE;
+        // might be followed by two args
+        if (i + 2 < argsLen
+            && args[i + 1].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
+          i += 2;
+          startOpt.setClusterId(args[i]);
+        }
       } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.ROLLBACK;
       } else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1075654&r1=1075653&r2=1075654&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Tue Mar  1 04:26:40 2011
@@ -86,6 +86,7 @@ public class MiniDFSCluster {
     private String[] racks = null; 
     private String [] hosts = null;
     private long [] simulatedCapacities = null;
+    private String clusterId = null;
     
     public Builder(Configuration conf) {
       this.conf = conf;
@@ -164,6 +165,14 @@ public class MiniDFSCluster {
     }
     
     /**
+     * Default: null
+     */
+    public Builder clusterId(String cid) {
+      this.clusterId = cid;
+      return this;
+    }
+
+    /**
      * Construct the actual MiniDFSCluster
      */
     public MiniDFSCluster build() throws IOException {
@@ -184,7 +193,8 @@ public class MiniDFSCluster {
                        builder.option,
                        builder.racks,
                        builder.hosts,
-                       builder.simulatedCapacities);
+                       builder.simulatedCapacities,
+                       builder.clusterId);
   }
   
   public class DataNodeProperties {
@@ -379,13 +389,14 @@ public class MiniDFSCluster {
                         long[] simulatedCapacities) throws IOException {
     initMiniDFSCluster(nameNodePort, conf, numDataNodes, format,
         manageNameDfsDirs, manageDataDfsDirs, operation, racks, hosts,
-        simulatedCapacities);
+        simulatedCapacities, null);
   }
 
   private void initMiniDFSCluster(int nameNodePort, Configuration conf,
       int numDataNodes, boolean format, boolean manageNameDfsDirs,
       boolean manageDataDfsDirs, StartupOption operation, String[] racks,
-      String[] hosts, long[] simulatedCapacities) throws IOException {
+      String[] hosts, long[] simulatedCapacities, String clusterId)
+      throws IOException {
     this.conf = conf;
     base_dir = new File(getBaseDirectory());
     data_dir = new File(base_dir, "data");
@@ -436,6 +447,9 @@ public class MiniDFSCluster {
       }
       GenericTestUtils.formatNamenode(conf);
     }
+    if (operation == StartupOption.UPGRADE){
+      operation.setClusterId(clusterId);
+    }
     
     // Start the NameNode
     String[] args = (operation == null ||
@@ -1216,7 +1230,7 @@ public class MiniDFSCluster {
    * @return Storage directory
    */
   public static File getStorageDir(int dnIndex, int dirIndex) {
-    return new File(getBaseDirectory() + "data" + (2*dnIndex + 1 + dirIndex));
+    return new File(getBaseDirectory() + "data/data" + (2*dnIndex + 1 + dirIndex));
   }
   
   /**
@@ -1236,7 +1250,7 @@ public class MiniDFSCluster {
    * @return finalized directory for a block pool
    */
   public static File getFinalizedDir(File storageDir, String bpid) {
-    return new File(storageDir, "/current/" + bpid + "/finalized/");
+    return new File(storageDir, "/current/" + bpid + "/current/finalized/");
   }
   
   /**

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1075654&r1=1075653&r2=1075654&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java Tue Mar  1 04:26:40 2011
@@ -104,17 +104,17 @@ public class TestDFSRollback extends Tes
   }
   
   /**
-   * Attempts to start a DataNode with the given operation.  Starting
-   * the DataNode should throw an exception.
+   * Attempts to start a DataNode with the given operation. Starting
+   * the given block pool should fail.
+   * @param operation startup option
+   * @param bpid block pool Id that should fail to start
+   * @throws IOException 
    */
-  void startDataNodeShouldFail(StartupOption operation) {
-    try {
-      cluster.startDataNodes(conf, 1, false, operation, null); // should fail
-      throw new AssertionError("DataNode should have failed to start");
-    } catch (Exception expected) {
-      // expected
-      assertFalse(cluster.isDataNodeUp());
-    }
+  void startBlockPoolShouldFail(StartupOption operation, String bpid)
+      throws IOException {
+    cluster.startDataNodes(conf, 1, false, operation, null); // should fail
+    assertFalse("Block pool " + bpid + " should have failed to start", 
+        cluster.getDataNodes().get(0).isBPServiceAlive(bpid));
   }
  
   /**
@@ -197,7 +197,8 @@ public class TestDFSRollback extends Tes
                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                          UpgradeUtilities.getCurrentClusterID(cluster),
                                                          UpgradeUtilities.getCurrentFsscTime(cluster)));
-      startDataNodeShouldFail(StartupOption.ROLLBACK);
+      startBlockPoolShouldFail(StartupOption.ROLLBACK, 
+          cluster.getNamesystem().getPoolId());
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
@@ -218,7 +219,8 @@ public class TestDFSRollback extends Tes
                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                          UpgradeUtilities.getCurrentClusterID(cluster),
                                                          Long.MAX_VALUE));
-      startDataNodeShouldFail(StartupOption.ROLLBACK);
+      startBlockPoolShouldFail(StartupOption.ROLLBACK, 
+          cluster.getNamesystem().getPoolId());
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java?rev=1075654&r1=1075653&r2=1075654&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java Tue Mar  1 04:26:40 2011
@@ -31,7 +31,6 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 
 /**
@@ -58,41 +57,45 @@ public class TestDFSUpgrade extends Test
   }
   
   /**
-   * Verify that the current and previous directories exist.  Verify that 
-   * previous hasn't been modified by comparing the checksum of all it's
-   * containing files with their original checksum.  It is assumed that
-   * the server has recovered and upgraded.
-   */
-  void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
-    switch (nodeType) {
-    case NAME_NODE:
-      for (int i = 0; i < baseDirs.length; i++) {
-        assertTrue(new File(baseDirs[i],"current").isDirectory());
-        assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
-        assertTrue(new File(baseDirs[i],"current/edits").isFile());
-        assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
-        assertTrue(new File(baseDirs[i],"current/fstime").isFile());
-      }
-      break;
-    case DATA_NODE:
-      for (int i = 0; i < baseDirs.length; i++) {
-        assertEquals(
-                     UpgradeUtilities.checksumContents(
-                                                       nodeType, new File(baseDirs[i],"current")),
-                     UpgradeUtilities.checksumMasterContents(nodeType));
-      }
-      break;
-    }
+   * For namenode, Verify that the current and previous directories exist.
+   * Verify that previous hasn't been modified by comparing the checksum of all
+   * its files with their original checksum. It is assumed that the
+   * server has recovered and upgraded.
+   */
+  void checkNameNode(String[] baseDirs) throws IOException {
     for (int i = 0; i < baseDirs.length; i++) {
-      assertTrue(new File(baseDirs[i],"previous").isDirectory());
-      assertEquals(
-                   UpgradeUtilities.checksumContents(
-                                                     nodeType, new File(baseDirs[i],"previous")),
-                   UpgradeUtilities.checksumMasterContents(nodeType));
+      assertTrue(new File(baseDirs[i],"current").isDirectory());
+      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
+      assertTrue(new File(baseDirs[i],"current/edits").isFile());
+      assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
+      assertTrue(new File(baseDirs[i],"current/fstime").isFile());
+      
+      File previous = new File(baseDirs[i], "previous");
+      assertTrue(previous.isDirectory());
+      assertEquals(UpgradeUtilities.checksumContents(NAME_NODE, previous),
+          UpgradeUtilities.checksumMasterContents(NAME_NODE));
     }
   }
  
   /**
+   * For datanode, for a block pool, verify that the current and previous
+   * directories exist. Verify that previous hasn't been modified by comparing
+   * the checksum of all its files with their original checksum. It
+   * is assumed that the server has recovered and upgraded.
+   */
+  void checkDataNode(String[] baseDirs, String bpid) throws IOException {
+    for (int i = 0; i < baseDirs.length; i++) {
+      File current = new File(baseDirs[i], "current/" + bpid + "/current");
+      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, current),
+        UpgradeUtilities.checksumMasterContents(DATA_NODE));
+      
+      File previous = new File(baseDirs[i], "current/" + bpid + "/previous");
+      assertTrue(previous.isDirectory());
+      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previous),
+          UpgradeUtilities.checksumMasterContents(DATA_NODE));
+    }
+  }
+  /**
    * Attempts to start a NameNode with the given operation.  Starting
    * the NameNode should throw an exception.
    */
@@ -111,17 +114,16 @@ public class TestDFSUpgrade extends Test
   }
   
   /**
-   * Attempts to start a DataNode with the given operation.  Starting
-   * the DataNode should throw an exception.
-   */
-  void startDataNodeShouldFail(StartupOption operation) {
-    try {
-      cluster.startDataNodes(conf, 1, false, operation, null); // should fail
-      throw new AssertionError("DataNode should have failed to start");
-    } catch (Exception expected) {
-      // expected
-      assertFalse(cluster.isDataNodeUp());
-    }
+   * Attempts to start a DataNode with the given operation. Starting
+   * the given block pool should fail.
+   * @param operation startup option
+   * @param bpid block pool Id that should fail to start
+   * @throws IOException 
+   */
+  void startBlockPoolShouldFail(StartupOption operation, String bpid) throws IOException {
+    cluster.startDataNodes(conf, 1, false, operation, null); // should fail
+    assertFalse("Block pool " + bpid + " should have failed to start",
+        cluster.getDataNodes().get(0).isBPServiceAlive(bpid));
   }
  
   /**
@@ -155,7 +157,7 @@ public class TestDFSUpgrade extends Test
       log("Normal NameNode upgrade", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       cluster = createCluster();
-      checkResult(NAME_NODE, nameNodeDirs);
+      checkNameNode(nameNodeDirs);
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
@@ -164,7 +166,7 @@ public class TestDFSUpgrade extends Test
       cluster = createCluster();
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
-      checkResult(DATA_NODE, dataNodeDirs);
+      checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
@@ -181,7 +183,7 @@ public class TestDFSUpgrade extends Test
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
       cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
-      checkResult(DATA_NODE, dataNodeDirs);
+      checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
@@ -195,7 +197,8 @@ public class TestDFSUpgrade extends Test
                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                          UpgradeUtilities.getCurrentClusterID(cluster),
                                                          UpgradeUtilities.getCurrentFsscTime(cluster)));
-      startDataNodeShouldFail(StartupOption.REGULAR);
+      startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities
+          .getCurrentBlockPoolID(null));
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
@@ -209,7 +212,9 @@ public class TestDFSUpgrade extends Test
                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                          UpgradeUtilities.getCurrentClusterID(cluster),
                                                          Long.MAX_VALUE));
-      startDataNodeShouldFail(StartupOption.REGULAR);
+      // Ensure corresponding block pool failed to initialized
+      startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities
+          .getCurrentBlockPoolID(null));
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java?rev=1075654&r1=1075653&r2=1075654&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java Tue Mar  1 04:26:40 2011
@@ -186,6 +186,7 @@ public class TestDFSUpgradeFromImage ext
                                   .numDataNodes(numDataNodes)
                                   .format(false)
                                   .startupOption(StartupOption.UPGRADE)
+                                  .clusterId("testClusterId")
                                   .build();
       cluster.waitActive();
       DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java?rev=1075654&r1=1075653&r2=1075654&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java Tue Mar  1 04:26:40 2011
@@ -33,6 +33,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
 import org.junit.After;
@@ -69,7 +70,7 @@ public class TestClusterId {
     config.set(DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name").getPath());
 
     // 1. should fail to format without cluster id
-    NameNode.clusterIdStr = null;
+    StartupOption.FORMAT.setClusterId("");
     try {
       NameNode.format(config);
       fail("should fail to format without cluster id");
@@ -80,7 +81,7 @@ public class TestClusterId {
     }
 
     // 2. successful format
-    NameNode.clusterIdStr = "mycluster";
+    StartupOption.FORMAT.setClusterId("mycluster");
     try {
       NameNode.format(config);
     } catch (Exception e) {
@@ -102,7 +103,7 @@ public class TestClusterId {
     
 
     // 3. format with existing cluster id
-    NameNode.clusterIdStr="";
+    StartupOption.FORMAT.setClusterId("");
     try {
       NameNode.format(config);
     } catch (Exception e) {

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java?rev=1075654&r1=1075653&r2=1075654&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java Tue Mar  1 04:26:40 2011
@@ -20,6 +20,7 @@ package org.apache.hadoop.test;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 
 /**
@@ -40,7 +41,7 @@ public abstract class GenericTestUtils {
    * @throws IOException
    */
   public static void formatNamenode(Configuration conf) throws IOException {
-    NameNode.clusterIdStr = "testClusterId";
+    StartupOption.FORMAT.setClusterId("testClusterID");
     NameNode.format(conf);
   }
 }



Mime
View raw message