hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1576130 [6/7] - in /hadoop/common/branches/branch-2.4/hadoop-hdfs-project: ./ hadoop-hdfs/ hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/ hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hado...
Date Mon, 10 Mar 2014 23:40:24 GMT
Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java Mon Mar 10 23:40:21 2014
@@ -23,7 +23,6 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.net.HttpURLConnection;
-import java.net.InetSocketAddress;
 import java.net.URL;
 import java.security.DigestInputStream;
 import java.security.MessageDigest;
@@ -39,10 +38,6 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -50,11 +45,13 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.io.MD5Hash;
-import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -90,10 +87,9 @@ public class TransferFsImage {
         null, false);
   }
 
-  public static MD5Hash downloadImageToStorage(
-      URL fsName, long imageTxId, Storage dstStorage, boolean needDigest)
-      throws IOException {
-    String fileid = GetImageServlet.getParamStringForImage(
+  public static MD5Hash downloadImageToStorage(URL fsName, long imageTxId,
+      Storage dstStorage, boolean needDigest) throws IOException {
+    String fileid = GetImageServlet.getParamStringForImage(null,
         imageTxId, dstStorage);
     String fileName = NNStorage.getCheckpointImageFileName(imageTxId);
     
@@ -166,14 +162,14 @@ public class TransferFsImage {
    * @param myNNAddress the host/port where the local node is running an
    *                           HTTPServer hosting GetImageServlet
    * @param storage the storage directory to transfer the image from
+   * @param nnf the NameNodeFile type of the image
    * @param txid the transaction ID of the image to be uploaded
    */
-  public static void uploadImageFromStorage(URL fsName,
-      URL myNNAddress,
-      Storage storage, long txid) throws IOException {
+  public static void uploadImageFromStorage(URL fsName, URL myNNAddress,
+      Storage storage, NameNodeFile nnf, long txid) throws IOException {
     
-    String fileid = GetImageServlet.getParamStringToPutImage(
-        txid, myNNAddress, storage);
+    String fileid = GetImageServlet.getParamStringToPutImage(nnf, txid,
+        myNNAddress, storage);
     // this doesn't directly upload an image, but rather asks the NN
     // to connect back to the 2NN to download the specified image.
     try {

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java Mon Mar 10 23:40:21 2014
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
@@ -45,6 +46,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.tools.DFSHAAdmin;
@@ -152,9 +154,9 @@ public class BootstrapStandby implements
     }
 
     if (!checkLayoutVersion(nsInfo)) {
-      LOG.fatal("Layout version on remote node (" +
-          nsInfo.getLayoutVersion() + ") does not match " +
-          "this node's layout version (" + HdfsConstants.LAYOUT_VERSION + ")");
+      LOG.fatal("Layout version on remote node (" + nsInfo.getLayoutVersion()
+          + ") does not match " + "this node's layout version ("
+          + HdfsConstants.NAMENODE_LAYOUT_VERSION + ")");
       return ERR_CODE_INVALID_VERSION;
     }
 
@@ -192,7 +194,7 @@ public class BootstrapStandby implements
     FSImage image = new FSImage(conf);
     try {
       image.getStorage().setStorageInfo(storage);
-      image.initEditLog();
+      image.initEditLog(StartupOption.REGULAR);
       assert image.getEditLog().isOpenForRead() :
         "Expected edit log to be open for read";
 
@@ -206,9 +208,9 @@ public class BootstrapStandby implements
 
       // Download that checkpoint into our storage directories.
       MD5Hash hash = TransferFsImage.downloadImageToStorage(
-        otherHttpAddr, imageTxId,
-        storage, true);
-      image.saveDigestAndRenameCheckpointImage(imageTxId, hash);
+        otherHttpAddr, imageTxId, storage, true);
+      image.saveDigestAndRenameCheckpointImage(NameNodeFile.IMAGE, imageTxId,
+          hash);
     } catch (IOException ioe) {
       image.close();
       throw ioe;
@@ -256,7 +258,7 @@ public class BootstrapStandby implements
   }
 
   private boolean checkLayoutVersion(NamespaceInfo nsInfo) throws IOException {
-    return (nsInfo.getLayoutVersion() == HdfsConstants.LAYOUT_VERSION);
+    return (nsInfo.getLayoutVersion() == HdfsConstants.NAMENODE_LAYOUT_VERSION);
   }
   
   private void parseConfAndFindOtherNN() throws IOException {

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java Mon Mar 10 23:40:21 2014
@@ -224,7 +224,7 @@ public class EditLogTailer {
       // disk are ignored.
       long editsLoaded = 0;
       try {
-        editsLoaded = image.loadEdits(streams, namesystem, null);
+        editsLoaded = image.loadEdits(streams, namesystem);
       } catch (EditLogInputException elie) {
         editsLoaded = elie.getNumEditsLoaded();
         throw elie;

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java Mon Mar 10 23:40:21 2014
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointConf;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.SaveNamespaceCancelledException;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
@@ -141,9 +142,14 @@ public class StandbyCheckpointer {
     }
   }
 
+  public void triggerRollbackCheckpoint() {
+    thread.interrupt();
+  }
+
   private void doCheckpoint() throws InterruptedException, IOException {
     assert canceler != null;
     final long txid;
+    final NameNodeFile imageType;
     
     namesystem.longReadLockInterruptibly();
     try {
@@ -163,7 +169,15 @@ public class StandbyCheckpointer {
         return;
       }
 
-      img.saveNamespace(namesystem, canceler);
+      if (namesystem.isRollingUpgrade()
+          && !namesystem.getFSImage().hasRollbackFSImage()) {
+        // if we will do rolling upgrade but have not created the rollback image
+        // yet, name this checkpoint as fsimage_rollback
+        imageType = NameNodeFile.IMAGE_ROLLBACK;
+      } else {
+        imageType = NameNodeFile.IMAGE;
+      }
+      img.saveNamespace(namesystem, imageType, canceler);
       txid = img.getStorage().getMostRecentCheckpointTxId();
       assert txid == thisCheckpointTxId : "expected to save checkpoint at txid=" +
         thisCheckpointTxId + " but instead saved at txid=" + txid;
@@ -179,9 +193,8 @@ public class StandbyCheckpointer {
     Future<Void> upload = executor.submit(new Callable<Void>() {
       @Override
       public Void call() throws IOException {
-        TransferFsImage.uploadImageFromStorage(
-            activeNNAddress, myNNAddress,
-            namesystem.getFSImage().getStorage(), txid);
+        TransferFsImage.uploadImageFromStorage(activeNNAddress, myNNAddress,
+            namesystem.getFSImage().getStorage(), imageType, txid);
         return null;
       }
     });
@@ -266,16 +279,20 @@ public class StandbyCheckpointer {
     }
 
     private void doWork() {
+      final long checkPeriod = 1000 * checkpointConf.getCheckPeriod();
       // Reset checkpoint time so that we don't always checkpoint
       // on startup.
       lastCheckpointTime = now();
       while (shouldRun) {
-        try {
-          Thread.sleep(1000 * checkpointConf.getCheckPeriod());
-        } catch (InterruptedException ie) {
-        }
-        if (!shouldRun) {
-          break;
+        boolean needRollbackCheckpoint = namesystem.isNeedRollbackFsImage();
+        if (!needRollbackCheckpoint) {
+          try {
+            Thread.sleep(checkPeriod);
+          } catch (InterruptedException ie) {
+          }
+          if (!shouldRun) {
+            break;
+          }
         }
         try {
           // We may have lost our ticket since last checkpoint, log in again, just in case
@@ -287,8 +304,10 @@ public class StandbyCheckpointer {
           long uncheckpointed = countUncheckpointedTxns();
           long secsSinceLast = (now - lastCheckpointTime)/1000;
           
-          boolean needCheckpoint = false;
-          if (uncheckpointed >= checkpointConf.getTxnCount()) {
+          boolean needCheckpoint = needRollbackCheckpoint;
+          if (needCheckpoint) {
+            LOG.info("Triggering a rollback fsimage for rolling upgrade.");
+          } else if (uncheckpointed >= checkpointConf.getTxnCount()) {
             LOG.info("Triggering checkpoint because there have been " + 
                 uncheckpointed + " txns since the last checkpoint, which " +
                 "exceeds the configured threshold " +
@@ -313,6 +332,13 @@ public class StandbyCheckpointer {
           
           if (needCheckpoint) {
             doCheckpoint();
+            // reset needRollbackCheckpoint to false only when we finish a ckpt
+            // for rollback image
+            if (needRollbackCheckpoint
+                && namesystem.getFSImage().hasRollbackFSImage()) {
+              namesystem.setCreatedRollbackImages(true);
+              namesystem.setNeedRollbackFsImage(false);
+            }
             lastCheckpointTime = now;
           }
         } catch (SaveNamespaceCancelledException ce) {

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java Mon Mar 10 23:40:21 2014
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.pr
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
 
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -31,11 +32,14 @@ public class HeartbeatResponse {
   
   /** Information about the current HA-related state of the NN */
   private NNHAStatusHeartbeat haStatus;
+
+  private RollingUpgradeStatus rollingUpdateStatus;
   
   public HeartbeatResponse(DatanodeCommand[] cmds,
-      NNHAStatusHeartbeat haStatus) {
+      NNHAStatusHeartbeat haStatus, RollingUpgradeStatus rollingUpdateStatus) {
     commands = cmds;
     this.haStatus = haStatus;
+    this.rollingUpdateStatus = rollingUpdateStatus;
   }
   
   public DatanodeCommand[] getCommands() {
@@ -45,4 +49,8 @@ public class HeartbeatResponse {
   public NNHAStatusHeartbeat getNameNodeHaState() {
     return haStatus;
   }
+
+  public RollingUpgradeStatus getRollingUpdateStatus() {
+    return rollingUpdateStatus;
+  }
 }

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java Mon Mar 10 23:40:21 2014
@@ -25,6 +25,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.util.VersionInfo;
 
@@ -41,13 +42,14 @@ public class NamespaceInfo extends Stora
   String softwareVersion;
 
   public NamespaceInfo() {
-    super();
+    super(NodeType.NAME_NODE);
     buildVersion = null;
   }
 
   public NamespaceInfo(int nsID, String clusterID, String bpID,
       long cT, String buildVersion, String softwareVersion) {
-    super(HdfsConstants.LAYOUT_VERSION, nsID, clusterID, cT);
+    super(HdfsConstants.NAMENODE_LAYOUT_VERSION, nsID, clusterID, cT,
+        NodeType.NAME_NODE);
     blockPoolID = bpID;
     this.buildVersion = buildVersion;
     this.softwareVersion = softwareVersion;

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Mon Mar 10 23:40:21 2014
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.tools;
 
 import java.io.File;
 import java.io.IOException;
+import java.io.PrintStream;
 import java.net.InetSocketAddress;
 import java.net.URL;
 import java.security.PrivilegedExceptionAction;
@@ -47,9 +48,12 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
@@ -64,6 +68,8 @@ import org.apache.hadoop.ipc.RefreshCall
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.google.common.base.Preconditions;
+
 /**
  * This class provides some DFS administrative access shell commands.
  */
@@ -271,7 +277,71 @@ public class DFSAdmin extends FsShell {
       dfs.setQuota(path, HdfsConstants.QUOTA_DONT_SET, quota);
     }
   }
-  
+
+  private static class RollingUpgradeCommand {
+    static final String NAME = "rollingUpgrade";
+    static final String USAGE = "-"+NAME+" [<query|prepare|finalize>]";
+    static final String DESCRIPTION = USAGE + ":\n"
+        + "     query: query the current rolling upgrade status.\n"
+        + "   prepare: prepare a new rolling upgrade."
+        + "  finalize: finalize the current rolling upgrade.";
+
+    /** Check if a command is the rollingUpgrade command
+     * 
+     * @param cmd A string representation of a command starting with "-"
+     * @return true if this is a clrQuota command; false otherwise
+     */
+    static boolean matches(String cmd) {
+      return ("-"+NAME).equals(cmd); 
+    }
+
+    private static void printMessage(RollingUpgradeInfo info,
+        PrintStream out) {
+      if (info != null && info.isStarted()) {
+        if (!info.createdRollbackImages()) {
+          out.println(
+              "Preparing for upgrade. Data is being saved for rollback."
+              + "\nRun \"dfsadmin -rollingUpgrade query\" to check the status"
+              + "\nfor proceeding with rolling upgrade");
+            out.println(info);
+        } else if (!info.isFinalized()) {
+          out.println("Proceed with rolling upgrade:");
+          out.println(info);
+        } else {
+          out.println("Rolling upgrade is finalized.");
+          out.println(info);
+        }
+      } else {
+        out.println("There is no rolling upgrade in progress.");
+      }
+    }
+
+    static int run(DistributedFileSystem dfs, String[] argv, int idx) throws IOException {
+      final RollingUpgradeAction action = RollingUpgradeAction.fromString(
+          argv.length >= 2? argv[1]: "");
+      if (action == null) {
+        throw new IllegalArgumentException("Failed to covert \"" + argv[1]
+            +"\" to " + RollingUpgradeAction.class.getSimpleName());
+      }
+
+      System.out.println(action + " rolling upgrade ...");
+
+      final RollingUpgradeInfo info = dfs.rollingUpgrade(action);
+      switch(action){
+      case QUERY:
+        break;
+      case PREPARE:
+        Preconditions.checkState(info.isStarted());
+        break;
+      case FINALIZE:
+        Preconditions.checkState(info.isFinalized());
+        break;
+      }
+      printMessage(info, System.out);
+      return 0;
+    }
+  }
+
   /**
    * Construct a DFSAdmin object.
    */
@@ -576,9 +646,11 @@ public class DFSAdmin extends FsShell {
       "\t[" + ClearQuotaCommand.USAGE +"]\n" +
       "\t[" + SetSpaceQuotaCommand.USAGE + "]\n" +
       "\t[" + ClearSpaceQuotaCommand.USAGE +"]\n" +
+      "\t[-finalizeUpgrade]\n" +
+      "\t[" + RollingUpgradeCommand.USAGE +"]\n" +
       "\t[-refreshServiceAcl]\n" +
       "\t[-refreshUserToGroupsMappings]\n" +
-      "\t[refreshSuperUserGroupsConfiguration]\n" +
+      "\t[-refreshSuperUserGroupsConfiguration]\n" +
       "\t[-refreshCallQueue]\n" +
       "\t[-printTopology]\n" +
       "\t[-refreshNamenodes datanodehost:port]\n"+
@@ -587,6 +659,8 @@ public class DFSAdmin extends FsShell {
       "\t[-fetchImage <local directory>]\n" +
       "\t[-allowSnapshot <snapshotDir>]\n" +
       "\t[-disallowSnapshot <snapshotDir>]\n" +
+      "\t[-shutdownDatanode <datanode_host:ipc_port> [upgrade]]\n" +
+      "\t[-getDatanodeInfo <datanode_host:ipc_port>\n" +
       "\t[-help [cmd]]\n";
 
     String report ="-report: \tReports basic filesystem information and statistics.\n";
@@ -685,6 +759,18 @@ public class DFSAdmin extends FsShell {
     
     String disallowSnapshot = "-disallowSnapshot <snapshotDir>:\n" +
         "\tDo not allow snapshots to be taken on a directory any more.\n";
+
+    String shutdownDatanode = "-shutdownDatanode <datanode_host:ipc_port> [upgrade]\n"
+        + "\tSubmit a shutdown request for the given datanode. If an optional\n"
+        + "\t\"upgrade\" argument is specified, clients accessing the datanode\n"
+        + "\twill be advised to wait for it to restart and the fast start-up\n"
+        + "\tmode will be enabled. When the restart does not happen in time,\n"
+        + "\tclients will timeout and ignore the datanode. In such case, the\n"
+        + "\tfast start-up mode will also be disabled.\n";
+
+    String getDatanodeInfo = "-getDatanodeInfo <datanode_host:ipc_port>\n"
+        + "\tGet the information about the given datanode. This command can\n"
+        + "\tbe used for checking if a datanode is alive.\n";
     
     String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" +
       "\t\tis specified.\n";
@@ -703,6 +789,8 @@ public class DFSAdmin extends FsShell {
       System.out.println(refreshNodes);
     } else if ("finalizeUpgrade".equals(cmd)) {
       System.out.println(finalizeUpgrade);
+    } else if (RollingUpgradeCommand.matches("-"+cmd)) {
+      System.out.println(RollingUpgradeCommand.DESCRIPTION);
     } else if ("metasave".equals(cmd)) {
       System.out.println(metaSave);
     } else if (SetQuotaCommand.matches("-"+cmd)) {
@@ -735,6 +823,10 @@ public class DFSAdmin extends FsShell {
       System.out.println(allowSnapshot);
     } else if ("disallowSnapshot".equalsIgnoreCase(cmd)) {
       System.out.println(disallowSnapshot);
+    } else if ("shutdownDatanode".equalsIgnoreCase(cmd)) {
+      System.out.println(shutdownDatanode);
+    } else if ("getDatanodeInfo".equalsIgnoreCase(cmd)) {
+      System.out.println(getDatanodeInfo);
     } else if ("help".equals(cmd)) {
       System.out.println(help);
     } else {
@@ -746,6 +838,7 @@ public class DFSAdmin extends FsShell {
       System.out.println(restoreFailedStorage);
       System.out.println(refreshNodes);
       System.out.println(finalizeUpgrade);
+      System.out.println(RollingUpgradeCommand.DESCRIPTION);
       System.out.println(metaSave);
       System.out.println(SetQuotaCommand.DESCRIPTION);
       System.out.println(ClearQuotaCommand.DESCRIPTION);
@@ -762,6 +855,8 @@ public class DFSAdmin extends FsShell {
       System.out.println(fetchImage);
       System.out.println(allowSnapshot);
       System.out.println(disallowSnapshot);
+      System.out.println(shutdownDatanode);
+      System.out.println(getDatanodeInfo);
       System.out.println(help);
       System.out.println();
       ToolRunner.printGenericCommandUsage(System.out);
@@ -980,6 +1075,9 @@ public class DFSAdmin extends FsShell {
     } else if ("-finalizeUpgrade".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
                          + " [-finalizeUpgrade]");
+    } else if (RollingUpgradeCommand.matches(cmd)) {
+      System.err.println("Usage: java DFSAdmin"
+          + " [" + RollingUpgradeCommand.USAGE+"]");
     } else if ("-metasave".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
           + " [-metasave filename]");
@@ -1034,6 +1132,7 @@ public class DFSAdmin extends FsShell {
       System.err.println("           [-restoreFailedStorage true|false|check]");
       System.err.println("           [-refreshNodes]");
       System.err.println("           [-finalizeUpgrade]");
+      System.err.println("           ["+RollingUpgradeCommand.USAGE+"]");
       System.err.println("           [-metasave filename]");
       System.err.println("           [-refreshServiceAcl]");
       System.err.println("           [-refreshUserToGroupsMappings]");
@@ -1048,6 +1147,8 @@ public class DFSAdmin extends FsShell {
       System.err.println("           ["+ClearSpaceQuotaCommand.USAGE+"]");      
       System.err.println("           [-setBalancerBandwidth <bandwidth in bytes per second>]");
       System.err.println("           [-fetchImage <local directory>]");
+      System.err.println("           [-shutdownDatanode <datanode_host:ipc_port> [upgrade]]");
+      System.err.println("           [-getDatanodeInfo <datanode_host:ipc_port>]");
       System.err.println("           [-help [cmd]]");
       System.err.println();
       ToolRunner.printGenericCommandUsage(System.err);
@@ -1119,6 +1220,11 @@ public class DFSAdmin extends FsShell {
         printUsage(cmd);
         return exitCode;
       }
+    } else if (RollingUpgradeCommand.matches(cmd)) {
+      if (argv.length < 1 || argv.length > 2) {
+        printUsage(cmd);
+        return exitCode;
+      }
     } else if ("-metasave".equals(cmd)) {
       if (argv.length != 2) {
         printUsage(cmd);
@@ -1159,6 +1265,16 @@ public class DFSAdmin extends FsShell {
         printUsage(cmd);
         return exitCode;
       }
+    } else if ("-shutdownDatanode".equals(cmd)) {
+      if ((argv.length != 2) && (argv.length != 3)) {
+        printUsage(cmd);
+        return exitCode;
+      }
+    } else if ("-getDatanodeInfo".equals(cmd)) {
+      if (argv.length != 2) {
+        printUsage(cmd);
+        return exitCode;
+      }
     }
     
     // initialize DFSAdmin
@@ -1194,6 +1310,8 @@ public class DFSAdmin extends FsShell {
         exitCode = refreshNodes();
       } else if ("-finalizeUpgrade".equals(cmd)) {
         exitCode = finalizeUpgrade();
+      } else if (RollingUpgradeCommand.matches(cmd)) {
+        exitCode = RollingUpgradeCommand.run(getDFS(), argv, i);
       } else if ("-metasave".equals(cmd)) {
         exitCode = metaSave(argv, i);
       } else if (ClearQuotaCommand.matches(cmd)) {
@@ -1222,6 +1340,10 @@ public class DFSAdmin extends FsShell {
         exitCode = setBalancerBandwidth(argv, i);
       } else if ("-fetchImage".equals(cmd)) {
         exitCode = fetchImage(argv, i);
+      } else if ("-shutdownDatanode".equals(cmd)) {
+        exitCode = shutdownDatanode(argv, i);
+      } else if ("-getDatanodeInfo".equals(cmd)) {
+        exitCode = getDatanodeInfo(argv, i);
       } else if ("-help".equals(cmd)) {
         if (i < argv.length) {
           printHelp(argv[i]);
@@ -1306,6 +1428,35 @@ public class DFSAdmin extends FsShell {
     return 0;
   }
 
+  private int shutdownDatanode(String[] argv, int i) throws IOException {
+    final String dn = argv[i];
+    ClientDatanodeProtocol dnProxy = getDataNodeProxy(dn);
+    boolean upgrade = false;
+    if (argv.length-1 == i+1) {
+      if ("upgrade".equalsIgnoreCase(argv[i+1])) {
+        upgrade = true;
+      } else {
+        printUsage("-shutdownDatanode");
+        return -1;
+      }
+    }
+    dnProxy.shutdownDatanode(upgrade);
+    System.out.println("Submitted a shutdown request to datanode " + dn);
+    return 0;
+  }
+
+  private int getDatanodeInfo(String[] argv, int i) throws IOException {
+    ClientDatanodeProtocol dnProxy = getDataNodeProxy(argv[i]);
+    try {
+      DatanodeLocalInfo dnInfo = dnProxy.getDatanodeInfo();
+      System.out.println(dnInfo.getDatanodeLocalReport());
+    } catch (IOException ioe) {
+      System.err.println("Datanode unreachable.");
+      return -1;
+    }
+    return 0;
+  }
+
   /**
    * main() has some simple utility methods.
    * @param argv Command line parameters.

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java Mon Mar 10 23:40:21 2014
@@ -30,10 +30,10 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.LayoutFlags;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableUtils;
@@ -158,7 +158,8 @@ class ImageLoaderCurrent implements Imag
       imageVersion = in.readInt();
       if( !canLoadVersion(imageVersion))
         throw new IOException("Cannot process fslayout version " + imageVersion);
-      if (LayoutVersion.supports(Feature.ADD_LAYOUT_FLAGS, imageVersion)) {
+      if (NameNodeLayoutVersion.supports(
+          LayoutVersion.Feature.ADD_LAYOUT_FLAGS, imageVersion)) {
         LayoutFlags.read(in);
       }
 
@@ -169,22 +170,25 @@ class ImageLoaderCurrent implements Imag
 
       v.visit(ImageElement.GENERATION_STAMP, in.readLong());
 
-      if (LayoutVersion.supports(Feature.SEQUENTIAL_BLOCK_ID, imageVersion)) {
+      if (NameNodeLayoutVersion.supports(
+          LayoutVersion.Feature.SEQUENTIAL_BLOCK_ID, imageVersion)) {
         v.visit(ImageElement.GENERATION_STAMP_V2, in.readLong());
         v.visit(ImageElement.GENERATION_STAMP_V1_LIMIT, in.readLong());
         v.visit(ImageElement.LAST_ALLOCATED_BLOCK_ID, in.readLong());
       }
 
-      if (LayoutVersion.supports(Feature.STORED_TXIDS, imageVersion)) {
+      if (NameNodeLayoutVersion.supports(
+          LayoutVersion.Feature.STORED_TXIDS, imageVersion)) {
         v.visit(ImageElement.TRANSACTION_ID, in.readLong());
       }
       
-      if (LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
+      if (NameNodeLayoutVersion.supports(
+          LayoutVersion.Feature.ADD_INODE_ID, imageVersion)) {
         v.visit(ImageElement.LAST_INODE_ID, in.readLong());
       }
       
-      boolean supportSnapshot = LayoutVersion.supports(Feature.SNAPSHOT,
-          imageVersion);
+      boolean supportSnapshot = NameNodeLayoutVersion.supports(
+          LayoutVersion.Feature.SNAPSHOT, imageVersion);
       if (supportSnapshot) {
         v.visit(ImageElement.SNAPSHOT_COUNTER, in.readInt());
         int numSnapshots = in.readInt();
@@ -194,7 +198,8 @@ class ImageLoaderCurrent implements Imag
         }
       }
       
-      if (LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imageVersion)) {
+      if (NameNodeLayoutVersion.supports(
+          LayoutVersion.Feature.FSIMAGE_COMPRESSION, imageVersion)) {
         boolean isCompressed = in.readBoolean();
         v.visit(ImageElement.IS_COMPRESSED, String.valueOf(isCompressed));
         if (isCompressed) {
@@ -216,11 +221,13 @@ class ImageLoaderCurrent implements Imag
 
       processINodesUC(in, v, skipBlocks);
 
-      if (LayoutVersion.supports(Feature.DELEGATION_TOKEN, imageVersion)) {
+      if (NameNodeLayoutVersion.supports(
+          LayoutVersion.Feature.DELEGATION_TOKEN, imageVersion)) {
         processDelegationTokens(in, v);
       }
       
-      if (LayoutVersion.supports(Feature.CACHING, imageVersion)) {
+      if (NameNodeLayoutVersion.supports(
+          LayoutVersion.Feature.CACHING, imageVersion)) {
         processCacheManagerState(in, v);
       }
       v.leaveEnclosingElement(); // FSImage
@@ -323,7 +330,8 @@ class ImageLoaderCurrent implements Imag
       String n = new String(name, "UTF8");
       v.visit(ImageElement.INODE_PATH, n);
       
-      if (LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
+      if (NameNodeLayoutVersion.supports(
+          LayoutVersion.Feature.ADD_INODE_ID, imageVersion)) {
         long inodeId = in.readLong();
         v.visit(ImageElement.INODE_ID, inodeId);
       }
@@ -443,7 +451,8 @@ class ImageLoaderCurrent implements Imag
     v.visitEnclosingElement(ImageElement.INODES,
         ImageElement.NUM_INODES, numInodes);
     
-    if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION, imageVersion)) {
+    if (NameNodeLayoutVersion.supports(
+        LayoutVersion.Feature.FSIMAGE_NAME_OPTIMIZATION, imageVersion)) {
       if (!supportSnapshot) {
         processLocalNameINodes(in, v, numInodes, skipBlocks);
       } else {
@@ -584,7 +593,8 @@ class ImageLoaderCurrent implements Imag
     if (!useRoot) {
       if (in.readBoolean()) {
         v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_DIRECTORY_ATTRIBUTES);
-        if (LayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) {
+        if (NameNodeLayoutVersion.supports(
+            LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) {
           processINodeDirectoryAttributes(in, v, currentINodeName);
         } else {
           processINode(in, v, true, currentINodeName, true);
@@ -678,10 +688,10 @@ class ImageLoaderCurrent implements Imag
   private void processINode(DataInputStream in, ImageVisitor v,
       boolean skipBlocks, String parentName, boolean isSnapshotCopy)
       throws IOException {
-    boolean supportSnapshot = 
-        LayoutVersion.supports(Feature.SNAPSHOT, imageVersion);
-    boolean supportInodeId = 
-        LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion);
+    boolean supportSnapshot = NameNodeLayoutVersion.supports(
+        LayoutVersion.Feature.SNAPSHOT, imageVersion);
+    boolean supportInodeId = NameNodeLayoutVersion.supports(
+        LayoutVersion.Feature.ADD_INODE_ID, imageVersion);
     
     v.visitEnclosingElement(ImageElement.INODE);
     final String pathName = readINodePath(in, parentName);
@@ -694,7 +704,8 @@ class ImageLoaderCurrent implements Imag
     }
     v.visit(ImageElement.REPLICATION, in.readShort());
     v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
-    if(LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imageVersion))
+    if(NameNodeLayoutVersion.supports(
+        LayoutVersion.Feature.FILE_ACCESS_TIME, imageVersion))
       v.visit(ImageElement.ACCESS_TIME, formatDate(in.readLong()));
     v.visit(ImageElement.BLOCK_SIZE, in.readLong());
     int numBlocks = in.readInt();
@@ -723,7 +734,8 @@ class ImageLoaderCurrent implements Imag
         dirNodeMap.put(inodeId, pathName);
       }
       v.visit(ImageElement.NS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
-      if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imageVersion))
+      if (NameNodeLayoutVersion.supports(
+          LayoutVersion.Feature.DISKSPACE_QUOTA, imageVersion))
         v.visit(ImageElement.DS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
       if (supportSnapshot) {
         boolean snapshottable = in.readBoolean();
@@ -771,7 +783,8 @@ class ImageLoaderCurrent implements Imag
     v.visit(ImageElement.INODE_PATH, pathName);
     processPermission(in, v);
     v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
-    if(LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imageVersion)) {
+    if(NameNodeLayoutVersion.supports(
+        LayoutVersion.Feature.FILE_ACCESS_TIME, imageVersion)) {
       v.visit(ImageElement.ACCESS_TIME, formatDate(in.readLong()));
     }
 
@@ -800,7 +813,8 @@ class ImageLoaderCurrent implements Imag
     v.visit(ImageElement.SNAPSHOT_FILE_SIZE, in.readLong());
     if (in.readBoolean()) {
       v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_FILE_ATTRIBUTES);
-      if (LayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) {
+      if (NameNodeLayoutVersion.supports(
+          LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) {
         processINodeFileAttributes(in, v, currentINodeName);
       } else {
         processINode(in, v, true, currentINodeName, true);

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java Mon Mar 10 23:40:21 2014
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.util;
 import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileInputStream;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
@@ -65,23 +66,17 @@ public abstract class MD5FileUtils {
   }
   
   /**
-   * Read the md5 checksum stored alongside the given file, or null
-   * if no md5 is stored.
+   * Read the md5 file stored alongside the given data file
+   * and match the md5 file content.
    * @param dataFile the file containing data
-   * @return the checksum stored in dataFile.md5
+   * @return a matcher with two matched groups
+   *   where group(1) is the md5 string and group(2) is the data file path.
    */
-  public static MD5Hash readStoredMd5ForFile(File dataFile) throws IOException {
-    File md5File = getDigestFileForFile(dataFile);
-
-    String md5Line;
-    
-    if (!md5File.exists()) {
-      return null;
-    }
-    
+  private static Matcher readStoredMd5(File md5File) throws IOException {
     BufferedReader reader =
         new BufferedReader(new InputStreamReader(new FileInputStream(
             md5File), Charsets.UTF_8));
+    String md5Line;
     try {
       md5Line = reader.readLine();
       if (md5Line == null) { md5Line = ""; }
@@ -94,9 +89,24 @@ public abstract class MD5FileUtils {
     
     Matcher matcher = LINE_REGEX.matcher(md5Line);
     if (!matcher.matches()) {
-      throw new IOException("Invalid MD5 file at " + md5File
-          + " (does not match expected pattern)");
+      throw new IOException("Invalid MD5 file " + md5File + ": the content \""
+          + md5Line + "\" does not match the expected pattern.");
+    }
+    return matcher;
+  }
+
+  /**
+   * Read the md5 checksum stored alongside the given data file.
+   * @param dataFile the file containing data
+   * @return the checksum stored in dataFile.md5
+   */
+  public static MD5Hash readStoredMd5ForFile(File dataFile) throws IOException {
+    final File md5File = getDigestFileForFile(dataFile);
+    if (!md5File.exists()) {
+      return null;
     }
+
+    final Matcher matcher = readStoredMd5(md5File);
     String storedHash = matcher.group(1);
     File referencedFile = new File(matcher.group(2));
 
@@ -135,15 +145,37 @@ public abstract class MD5FileUtils {
    */
   public static void saveMD5File(File dataFile, MD5Hash digest)
       throws IOException {
+    final String digestString = StringUtils.byteToHexString(digest.getDigest());
+    saveMD5File(dataFile, digestString);
+  }
+
+  private static void saveMD5File(File dataFile, String digestString)
+      throws IOException {
     File md5File = getDigestFileForFile(dataFile);
-    String digestString = StringUtils.byteToHexString(
-        digest.getDigest());
     String md5Line = digestString + " *" + dataFile.getName() + "\n";
-    
+
     AtomicFileOutputStream afos = new AtomicFileOutputStream(md5File);
     afos.write(md5Line.getBytes(Charsets.UTF_8));
     afos.close();
-    LOG.debug("Saved MD5 " + digest + " to " + md5File);
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Saved MD5 " + digestString + " to " + md5File);
+    }
+  }
+
+  public static void renameMD5File(File oldDataFile, File newDataFile)
+      throws IOException {
+    final File fromFile = getDigestFileForFile(oldDataFile);
+    if (!fromFile.exists()) {
+      throw new FileNotFoundException(fromFile + " does not exist.");
+    }
+
+    final String digestString = readStoredMd5(fromFile).group(1);
+    saveMD5File(newDataFile, digestString);
+
+    if (!fromFile.delete()) {
+      LOG.warn("deleting  " + fromFile.getAbsolutePath() + " FAILED");
+    }
   }
 
   /**

Propchange: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1574259
  Merged /hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1550130-1574256
  Merged /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1576128

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto Mon Mar 10 23:40:21 2014
@@ -128,6 +128,28 @@ message GetHdfsBlockLocationsResponsePro
 }
 
 /**
+ * forUpgrade - if true, clients are advised to wait for restart and quick
+ *              upgrade restart is instrumented. Otherwise, datanode does
+ *              the regular shutdown.
+ */
+message ShutdownDatanodeRequestProto {
+  required bool forUpgrade = 1;
+}
+
+message ShutdownDatanodeResponseProto {
+}
+
+/**
+ * Ping datanode for liveness and quick info
+ */
+message GetDatanodeInfoRequestProto {
+}
+
+message GetDatanodeInfoResponseProto {
+  required DatanodeLocalInfoProto localInfo = 1;
+}
+
+/**
  * Protocol used from client to the Datanode.
  * See the request and response for details of rpc call.
  */
@@ -164,4 +186,10 @@ service ClientDatanodeProtocolService {
    */
   rpc getHdfsBlockLocations(GetHdfsBlockLocationsRequestProto)
       returns(GetHdfsBlockLocationsResponseProto);
+
+  rpc shutdownDatanode(ShutdownDatanodeRequestProto)
+      returns(ShutdownDatanodeResponseProto);
+
+  rpc getDatanodeInfo(GetDatanodeInfoRequestProto)
+      returns(GetDatanodeInfoResponseProto);
 }

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Mon Mar 10 23:40:21 2014
@@ -333,6 +333,27 @@ message FinalizeUpgradeRequestProto { //
 message FinalizeUpgradeResponseProto { // void response
 }
 
+enum RollingUpgradeActionProto {
+  QUERY = 1;
+  START = 2;
+  FINALIZE = 3;
+}
+
+message RollingUpgradeRequestProto {
+  required RollingUpgradeActionProto action = 1;
+}
+
+message RollingUpgradeInfoProto {
+  required RollingUpgradeStatusProto status = 1;
+  required uint64 startTime = 2;
+  required uint64 finalizeTime = 3;
+  required bool createdRollbackImages = 4;
+}
+
+message RollingUpgradeResponseProto {
+  optional RollingUpgradeInfoProto rollingUpgradeInfo= 1;
+}
+
 message ListCorruptFileBlocksRequestProto {
   required string path = 1;
   optional string cookie = 2;
@@ -659,6 +680,8 @@ service ClientNamenodeProtocol {
   rpc refreshNodes(RefreshNodesRequestProto) returns(RefreshNodesResponseProto);
   rpc finalizeUpgrade(FinalizeUpgradeRequestProto)
       returns(FinalizeUpgradeResponseProto);
+  rpc rollingUpgrade(RollingUpgradeRequestProto)
+      returns(RollingUpgradeResponseProto);
   rpc listCorruptFileBlocks(ListCorruptFileBlocksRequestProto)
       returns(ListCorruptFileBlocksResponseProto);
   rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto);

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto Mon Mar 10 23:40:21 2014
@@ -224,6 +224,7 @@ message NNHAStatusHeartbeatProto {
 message HeartbeatResponseProto {
   repeated DatanodeCommandProto cmds = 1; // Returned commands can be null
   required NNHAStatusHeartbeatProto haStatus = 2;
+  optional RollingUpgradeStatusProto rollingUpgradeStatus = 3;
 }
 
 /**

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto Mon Mar 10 23:40:21 2014
@@ -134,6 +134,17 @@ message IsFormattedResponseProto {
 }
 
 /**
+ * discardSegments()
+ */
+message DiscardSegmentsRequestProto {
+  required JournalIdProto jid = 1;
+  required uint64 startTxId = 2;
+}
+
+message DiscardSegmentsResponseProto {
+}
+
+/**
  * getJournalState()
  */
 message GetJournalStateRequestProto {
@@ -236,6 +247,8 @@ message AcceptRecoveryResponseProto {
 service QJournalProtocolService {
   rpc isFormatted(IsFormattedRequestProto) returns (IsFormattedResponseProto);
 
+  rpc discardSegments(DiscardSegmentsRequestProto) returns (DiscardSegmentsResponseProto);
+
   rpc getJournalState(GetJournalStateRequestProto) returns (GetJournalStateResponseProto);
 
   rpc newEpoch(NewEpochRequestProto) returns (NewEpochResponseProto);

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto Mon Mar 10 23:40:21 2014
@@ -199,6 +199,10 @@ enum Status {
   ERROR_ACCESS_TOKEN = 5;
   CHECKSUM_OK = 6;
   ERROR_UNSUPPORTED = 7;
+  OOB_RESTART = 8;            // Quick restart
+  OOB_RESERVED1 = 9;          // Reserved
+  OOB_RESERVED2 = 10;         // Reserved
+  OOB_RESERVED3 = 11;         // Reserved
 }
 
 message PipelineAckProto {

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto Mon Mar 10 23:40:21 2014
@@ -71,6 +71,7 @@ message NameSystemSection {
   optional uint64 genstampV1Limit = 4;
   optional uint64 lastAllocatedBlockId = 5;
   optional uint64 transactionId = 6;
+  optional uint64 rollingUpgradeStartTime = 7;
 }
 
 /**

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Mon Mar 10 23:40:21 2014
@@ -61,6 +61,15 @@ message DatanodeIDProto {
 }
 
 /**
+ * Datanode local information
+ */
+message DatanodeLocalInfoProto {
+  required string softwareVersion = 1;
+  required string configVersion = 2;
+  required uint64 uptime = 3;
+}
+
+/**
  * DatanodeInfo array
  */
 message DatanodeInfosProto {
@@ -459,4 +468,9 @@ message SnapshotInfoProto {
   // TODO: do we need access time?
 }
 
-
+/**
+ * Rolling upgrade status
+ */
+message RollingUpgradeStatusProto {
+  required string blockPoolId = 1;
+}

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Mon Mar 10 23:40:21 2014
@@ -1031,6 +1031,18 @@
 </property>
 
 <property>
+  <name>dfs.client.datanode-restart.timeout</name>
+  <value>30</value>
+  <description>
+    Expert only. The time to wait, in seconds, from reception of an
+    datanode shutdown notification for quick restart, until declaring
+    the datanode dead and invoking the normal recovery mechanisms.
+    The notification is sent by a datanode when it is being shutdown
+    using the shutdownDatanode admin command with the upgrade option.
+  </description>
+</property>
+
+<property>
   <name>dfs.nameservices</name>
   <value></value>
   <description>

Propchange: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1550130-1574256
  Merged /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1576128
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1574259

Propchange: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1574259
  Merged /hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1550130-1574256
  Merged /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1576128

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html Mon Mar 10 23:40:21 2014
@@ -72,14 +72,26 @@
 <script type="text/x-dust-template" id="tmpl-dfshealth">
 
 {#nn}
-{@if cond="{DistinctVersionCount} > 1"}
+{@if cond="{DistinctVersionCount} > 1 || '{RollingUpgradeStatus}'.length"}
 <div class="alert alert-dismissable alert-info">
   <button type="button" class="close" data-dismiss="alert" aria-hidden="true">&times;</button>
 
-  There are {DistinctVersionCount} versions of datanodes currently live: 
-  {#DistinctVersions}
-  {key} ({value}) {@sep},{/sep}
-  {/DistinctVersions}
+  {#RollingUpgradeStatus}
+    <p>Rolling upgrade started at {#helper_date_tostring value="{startTime}"/}. </br>
+    {#createdRollbackImages}
+      Rollback image has been created. Proceed to upgrade daemons.
+      {:else}
+      Rollback image has not been created.
+    {/createdRollbackImages}
+    </p>
+  {/RollingUpgradeStatus}
+
+  {@if cond="{DistinctVersionCount} > 1"}
+    There are {DistinctVersionCount} versions of datanodes currently live:
+    {#DistinctVersions}
+    {key} ({value}) {@sep},{/sep}
+    {/DistinctVersions}
+  {/if}
 </div>
 {/if}
 

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js Mon Mar 10 23:40:21 2014
@@ -44,6 +44,11 @@
         for (var i in j) {
           chunk.write('<tr><td>' + i + '</td><td>' + j[i] + '</td><td>' + params.type + '</td></tr>');
         }
+      },
+
+      'helper_date_tostring' : function (chunk, ctx, bodies, params) {
+        var value = dust.helpers.tap(params.value, chunk, ctx);
+        return chunk.write('' + new Date(Number(value)).toLocaleString());
       }
     };
 

Propchange: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1574259
  Merged /hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1550130-1574256
  Merged /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1576128

Propchange: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1550130-1574256
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1574259
  Merged /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1576128

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Mon Mar 10 23:40:21 2014
@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -940,8 +941,8 @@ public class DFSTestUtil {
   }
   
   public static DatanodeRegistration getLocalDatanodeRegistration() {
-    return new DatanodeRegistration(getLocalDatanodeID(),
-        new StorageInfo(), new ExportedBlockKeys(), VersionInfo.getVersion());
+    return new DatanodeRegistration(getLocalDatanodeID(), new StorageInfo(
+        NodeType.DATA_NODE), new ExportedBlockKeys(), VersionInfo.getVersion());
   }
   
   /** Copy one file's contents into the other **/

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Mon Mar 10 23:40:21 2014
@@ -32,8 +32,8 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HOSTS;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY;
@@ -101,7 +101,6 @@ import org.apache.hadoop.net.StaticMappi
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
@@ -400,6 +399,10 @@ public class MiniDFSCluster {
       this.secureResources = secureResources;
       this.ipcPort = ipcPort;
     }
+
+    public void setDnArgs(String ... args) {
+      dnArgs = args;
+    }
   }
 
   private Configuration conf;
@@ -1558,11 +1561,11 @@ public class MiniDFSCluster {
   /**
    * Restart the namenode.
    */
-  public synchronized void restartNameNode() throws IOException {
+  public synchronized void restartNameNode(String... args) throws IOException {
     checkSingleNameNode();
-    restartNameNode(true);
+    restartNameNode(0, true, args);
   }
-  
+
   /**
    * Restart the namenode. Optionally wait for the cluster to become active.
    */
@@ -1583,13 +1586,13 @@ public class MiniDFSCluster {
    * Restart the namenode at a given index. Optionally wait for the cluster
    * to become active.
    */
-  public synchronized void restartNameNode(int nnIndex, boolean waitActive)
-      throws IOException {
+  public synchronized void restartNameNode(int nnIndex, boolean waitActive,
+      String... args) throws IOException {
     String nameserviceId = nameNodes[nnIndex].nameserviceId;
     String nnId = nameNodes[nnIndex].nnId;
     Configuration conf = nameNodes[nnIndex].conf;
     shutdownNameNode(nnIndex);
-    NameNode nn = NameNode.createNameNode(new String[] {}, conf);
+    NameNode nn = NameNode.createNameNode(args, conf);
     nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, conf);
     if (waitActive) {
       waitClusterUp();
@@ -1933,7 +1936,8 @@ public class MiniDFSCluster {
 
   /** Wait until the given namenode gets registration from all the datanodes */
   public void waitActive(int nnIndex) throws IOException {
-    if (nameNodes.length == 0 || nameNodes[nnIndex] == null) {
+    if (nameNodes.length == 0 || nameNodes[nnIndex] == null
+        || nameNodes[nnIndex].nameNode == null) {
       return;
     }
     InetSocketAddress addr = nameNodes[nnIndex].nameNode.getServiceRpcAddress();

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java Mon Mar 10 23:40:21 2014
@@ -24,8 +24,10 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.io.IOUtils;
 import org.junit.Assert;
@@ -159,4 +161,101 @@ public class TestClientProtocolForPipeli
       }
     }
   }
+
+  /**
+   * Test recovery on restart OOB message. It also tests the delivery of 
+   * OOB ack originating from the primary datanode. Since there is only
+   * one node in the cluster, failure of restart-recovery will fail the
+   * test.
+   */
+  @Test
+  public void testPipelineRecoveryOnOOB() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.set(DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "15");
+    MiniDFSCluster cluster = null;
+    try {
+      int numDataNodes = 1;
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
+      cluster.waitActive();
+      FileSystem fileSys = cluster.getFileSystem();
+
+      Path file = new Path("dataprotocol2.dat");
+      DFSTestUtil.createFile(fileSys, file, 10240L, (short)1, 0L);
+      DFSOutputStream out = (DFSOutputStream)(fileSys.append(file).
+          getWrappedStream());
+      out.write(1);
+      out.hflush();
+
+      DFSAdmin dfsadmin = new DFSAdmin(conf);
+      DataNode dn = cluster.getDataNodes().get(0);
+      final String dnAddr = dn.getDatanodeId().getIpcAddr(false);
+      // issue shutdown to the datanode.
+      final String[] args1 = {"-shutdownDatanode", dnAddr, "upgrade" };
+      Assert.assertEquals(0, dfsadmin.run(args1));
+      // Wait long enough to receive an OOB ack before closing the file.
+      Thread.sleep(4000);
+      // Retart the datanode 
+      cluster.restartDataNode(0, true);
+      // The following forces a data packet and end of block packets to be sent. 
+      out.close();
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /** Test restart timeout */
+  @Test
+  public void testPipelineRecoveryOnRestartFailure() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.set(DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "5");
+    MiniDFSCluster cluster = null;
+    try {
+      int numDataNodes = 2;
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
+      cluster.waitActive();
+      FileSystem fileSys = cluster.getFileSystem();
+
+      Path file = new Path("dataprotocol3.dat");
+      DFSTestUtil.createFile(fileSys, file, 10240L, (short)2, 0L);
+      DFSOutputStream out = (DFSOutputStream)(fileSys.append(file).
+          getWrappedStream());
+      out.write(1);
+      out.hflush();
+
+      DFSAdmin dfsadmin = new DFSAdmin(conf);
+      DataNode dn = cluster.getDataNodes().get(0);
+      final String dnAddr1 = dn.getDatanodeId().getIpcAddr(false);
+      // issue shutdown to the datanode.
+      final String[] args1 = {"-shutdownDatanode", dnAddr1, "upgrade" };
+      Assert.assertEquals(0, dfsadmin.run(args1));
+      Thread.sleep(4000);
+      // This should succeed without restarting the node. The restart will
+      // expire and regular pipeline recovery will kick in. 
+      out.close();
+
+      // At this point there is only one node in the cluster. 
+      out = (DFSOutputStream)(fileSys.append(file).
+          getWrappedStream());
+      out.write(1);
+      out.hflush();
+
+      dn = cluster.getDataNodes().get(1);
+      final String dnAddr2 = dn.getDatanodeId().getIpcAddr(false);
+      // issue shutdown to the datanode.
+      final String[] args2 = {"-shutdownDatanode", dnAddr2, "upgrade" };
+      Assert.assertEquals(0, dfsadmin.run(args2));
+      Thread.sleep(4000);
+      try {
+        // close should fail
+        out.close();
+        assert false;
+      } catch (IOException ioe) { }
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java Mon Mar 10 23:40:21 2014
@@ -30,6 +30,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
@@ -193,10 +194,11 @@ public class TestDFSRollback {
           UpgradeUtilities.getCurrentBlockPoolID(cluster));
       // Put newer layout version in current.
       storageInfo = new StorageInfo(
-          UpgradeUtilities.getCurrentLayoutVersion()-1,
+          HdfsConstants.DATANODE_LAYOUT_VERSION - 1,
           UpgradeUtilities.getCurrentNamespaceID(cluster),
           UpgradeUtilities.getCurrentClusterID(cluster),
-          UpgradeUtilities.getCurrentFsscTime(cluster));
+          UpgradeUtilities.getCurrentFsscTime(cluster),
+          NodeType.DATA_NODE);
 
       // Overwrite VERSION file in the current directory of
       // volume directories and block pool slice directories
@@ -252,7 +254,8 @@ public class TestDFSRollback {
       storageInfo = new StorageInfo(Integer.MIN_VALUE, 
           UpgradeUtilities.getCurrentNamespaceID(cluster), 
           UpgradeUtilities.getCurrentClusterID(cluster), 
-          UpgradeUtilities.getCurrentFsscTime(cluster));
+          UpgradeUtilities.getCurrentFsscTime(cluster),
+          NodeType.DATA_NODE);
       
       UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
           UpgradeUtilities.getCurrentBlockPoolID(cluster));
@@ -275,10 +278,11 @@ public class TestDFSRollback {
       
       UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
       baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
-      storageInfo = new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(), 
-            UpgradeUtilities.getCurrentNamespaceID(cluster), 
-            UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE);
-      
+      storageInfo = new StorageInfo(HdfsConstants.DATANODE_LAYOUT_VERSION,
+          UpgradeUtilities.getCurrentNamespaceID(cluster),
+          UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE,
+          NodeType.DATA_NODE);
+     
       UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
           UpgradeUtilities.getCurrentBlockPoolID(cluster));
       
@@ -321,10 +325,10 @@ public class TestDFSRollback {
       log("NameNode rollback with old layout version in previous", numDirs);
       UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
       baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
-      storageInfo = new StorageInfo(1, 
+      storageInfo = new StorageInfo(1,
           UpgradeUtilities.getCurrentNamespaceID(null),
           UpgradeUtilities.getCurrentClusterID(null),
-          UpgradeUtilities.getCurrentFsscTime(null));
+          UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
       
       UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs,
           storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java Mon Mar 10 23:40:21 2014
@@ -75,7 +75,7 @@ public class TestDFSStartupVersions {
     StorageData(int layoutVersion, int namespaceId, String clusterId,
         long cTime, String bpid) {
       storageInfo = new StorageInfo(layoutVersion, namespaceId, clusterId,
-          cTime);
+          cTime, NodeType.DATA_NODE);
       blockPoolId = bpid;
     }
   }
@@ -89,7 +89,7 @@ public class TestDFSStartupVersions {
    */
   private StorageData[] initializeVersions() throws Exception {
     int layoutVersionOld = Storage.LAST_UPGRADABLE_LAYOUT_VERSION;
-    int layoutVersionCur = UpgradeUtilities.getCurrentLayoutVersion();
+    int layoutVersionCur = HdfsConstants.DATANODE_LAYOUT_VERSION;
     int layoutVersionNew = Integer.MIN_VALUE;
     int namespaceIdCur = UpgradeUtilities.getCurrentNamespaceID(null);
     int namespaceIdOld = Integer.MIN_VALUE;
@@ -200,7 +200,7 @@ public class TestDFSStartupVersions {
       return false;
     }
     // check #3
-    int softwareLV = HdfsConstants.LAYOUT_VERSION;  // will also be Namenode's LV
+    int softwareLV = HdfsConstants.DATANODE_LAYOUT_VERSION;
     int storedLV = datanodeVer.getLayoutVersion();
     if (softwareLV == storedLV &&  
         datanodeVer.getCTime() == namenodeVer.getCTime()) 
@@ -252,7 +252,7 @@ public class TestDFSStartupVersions {
                                               .startupOption(StartupOption.REGULAR)
                                               .build();
     StorageData nameNodeVersion = new StorageData(
-        UpgradeUtilities.getCurrentLayoutVersion(),
+        HdfsConstants.NAMENODE_LAYOUT_VERSION,
         UpgradeUtilities.getCurrentNamespaceID(cluster),
         UpgradeUtilities.getCurrentClusterID(cluster),
         UpgradeUtilities.getCurrentFsscTime(cluster),

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java Mon Mar 10 23:40:21 2014
@@ -34,10 +34,16 @@ import java.util.regex.Pattern;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
@@ -212,7 +218,7 @@ public class TestDFSUpgrade {
    * This test attempts to upgrade the NameNode and DataNode under
    * a number of valid and invalid conditions.
    */
-  @Test
+  @Test(timeout = 60000)
   public void testUpgrade() throws Exception {
     File[] baseDirs;
     StorageInfo storageInfo = null;
@@ -225,6 +231,19 @@ public class TestDFSUpgrade {
       log("Normal NameNode upgrade", numDirs);
       UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
       cluster = createCluster();
+
+      // make sure that rolling upgrade cannot be started
+      try {
+        final DistributedFileSystem dfs = cluster.getFileSystem();
+        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+        dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
+        fail();
+      } catch(RemoteException re) {
+        assertEquals(InconsistentFSStateException.class.getName(),
+            re.getClassName());
+        LOG.info("The exception is expected.", re);
+      }
+
       checkNameNode(nameNodeDirs, EXPECTED_TXID);
       if (numDirs > 1)
         TestParallelImageWrite.checkImages(cluster.getNamesystem(), numDirs);
@@ -262,10 +281,10 @@ public class TestDFSUpgrade {
       UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
       cluster = createCluster();
       baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
-      storageInfo = new StorageInfo(Integer.MIN_VALUE, 
+      storageInfo = new StorageInfo(Integer.MIN_VALUE,
           UpgradeUtilities.getCurrentNamespaceID(cluster),
           UpgradeUtilities.getCurrentClusterID(cluster),
-          UpgradeUtilities.getCurrentFsscTime(cluster));
+          UpgradeUtilities.getCurrentFsscTime(cluster), NodeType.DATA_NODE);
       
       UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
           UpgradeUtilities.getCurrentBlockPoolID(cluster));
@@ -280,9 +299,10 @@ public class TestDFSUpgrade {
       UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
       cluster = createCluster();
       baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
-      storageInfo = new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(), 
+      storageInfo = new StorageInfo(HdfsConstants.DATANODE_LAYOUT_VERSION,
           UpgradeUtilities.getCurrentNamespaceID(cluster),
-          UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE);
+          UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE,
+          NodeType.DATA_NODE);
           
       UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo, 
           UpgradeUtilities.getCurrentBlockPoolID(cluster));
@@ -321,7 +341,7 @@ public class TestDFSUpgrade {
       storageInfo = new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1, 
           UpgradeUtilities.getCurrentNamespaceID(null),
           UpgradeUtilities.getCurrentClusterID(null),
-          UpgradeUtilities.getCurrentFsscTime(null));
+          UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
       
       UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
           UpgradeUtilities.getCurrentBlockPoolID(cluster));
@@ -334,7 +354,7 @@ public class TestDFSUpgrade {
       storageInfo = new StorageInfo(Integer.MIN_VALUE, 
           UpgradeUtilities.getCurrentNamespaceID(null),
           UpgradeUtilities.getCurrentClusterID(null),
-          UpgradeUtilities.getCurrentFsscTime(null));
+          UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
       
       UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
           UpgradeUtilities.getCurrentBlockPoolID(cluster));
@@ -354,6 +374,19 @@ public class TestDFSUpgrade {
       log("Normal NameNode upgrade", numDirs);
       UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
       cluster = createCluster();
+
+      // make sure that rolling upgrade cannot be started
+      try {
+        final DistributedFileSystem dfs = cluster.getFileSystem();
+        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+        dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
+        fail();
+      } catch(RemoteException re) {
+        assertEquals(InconsistentFSStateException.class.getName(),
+            re.getClassName());
+        LOG.info("The exception is expected.", re);
+      }
+
       checkNameNode(nameNodeDirs, EXPECTED_TXID);
       TestParallelImageWrite.checkImages(cluster.getNamesystem(), numDirs);
       cluster.shutdown();

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java Mon Mar 10 23:40:21 2014
@@ -180,7 +180,7 @@ public class TestDatanodeRegistration {
           .getCTime();
       StorageInfo mockStorageInfo = mock(StorageInfo.class);
       doReturn(nnCTime).when(mockStorageInfo).getCTime();
-      doReturn(HdfsConstants.LAYOUT_VERSION).when(mockStorageInfo)
+      doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockStorageInfo)
           .getLayoutVersion();
       DatanodeRegistration dnReg = new DatanodeRegistration(dnId,
           mockStorageInfo, null, VersionInfo.getVersion());
@@ -225,7 +225,7 @@ public class TestDatanodeRegistration {
       doReturn(nnCTime).when(mockStorageInfo).getCTime();
       
       DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
-      doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
+      doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion();
       doReturn(123).when(mockDnReg).getXferPort();
       doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
       doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
@@ -273,7 +273,7 @@ public class TestDatanodeRegistration {
       doReturn(nnCTime).when(mockStorageInfo).getCTime();
       
       DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
-      doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
+      doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion();
       doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
       doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
       

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1576130&r1=1576129&r2=1576130&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java Mon Mar 10 23:40:21 2014
@@ -40,13 +40,13 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@@ -290,10 +290,14 @@ public class UpgradeUtilities {
       if (!list[i].isFile()) {
         continue;
       }
-      // skip VERSION file for DataNodes
-      if (nodeType == DATA_NODE && list[i].getName().equals("VERSION")) {
+
+      // skip VERSION and dfsUsed file for DataNodes
+      if (nodeType == DATA_NODE && 
+         (list[i].getName().equals("VERSION") || 
+         list[i].getName().equals("dfsUsed"))) {
         continue; 
       }
+
       FileInputStream fis = null;
       try {
         fis = new FileInputStream(list[i]);
@@ -471,7 +475,8 @@ public class UpgradeUtilities {
   public static void createBlockPoolVersionFile(File bpDir,
       StorageInfo version, String bpid) throws IOException {
     // Create block pool version files
-    if (LayoutVersion.supports(Feature.FEDERATION, version.layoutVersion)) {
+    if (DataNodeLayoutVersion.supports(
+        LayoutVersion.Feature.FEDERATION, version.layoutVersion)) {
       File bpCurDir = new File(bpDir, Storage.STORAGE_DIR_CURRENT);
       BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(version,
           bpid);
@@ -514,8 +519,8 @@ public class UpgradeUtilities {
    * Return the layout version inherent in the current version
    * of the Namenode, whether it is running or not.
    */
-  public static int getCurrentLayoutVersion() {
-    return HdfsConstants.LAYOUT_VERSION;
+  public static int getCurrentNameNodeLayoutVersion() {
+    return HdfsConstants.NAMENODE_LAYOUT_VERSION;
   }
   
   /**



Mime
View raw message