Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id C9E2710544 for ; Mon, 24 Feb 2014 20:47:10 +0000 (UTC) Received: (qmail 94468 invoked by uid 500); 24 Feb 2014 20:47:09 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 94433 invoked by uid 500); 24 Feb 2014 20:47:08 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 94425 invoked by uid 99); 24 Feb 2014 20:47:08 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 24 Feb 2014 20:47:08 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 24 Feb 2014 20:46:59 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 840782388860; Mon, 24 Feb 2014 20:46:36 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1571431 - in /hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/datanode/ src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ src/main/java/org/apache/hadoop/hdfs/s... Date: Mon, 24 Feb 2014 20:46:36 -0000 To: hdfs-commits@hadoop.apache.org From: arp@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140224204636.840782388860@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: arp Date: Mon Feb 24 20:46:35 2014 New Revision: 1571431 URL: http://svn.apache.org/r1571431 Log: HDFS-6005. Simplify Datanode rollback and downgrade. (Contributed by Suresh Srinivas) Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeStartupOptions.java Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt?rev=1571431&r1=1571430&r2=1571431&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt Mon Feb 24 20:46:35 2014 @@ -75,3 +75,7 @@ HDFS-5535 subtasks: HDFS-5994. Fix TestDataNodeRollingUpgrade. (Arpit Agarwal via szetszwo) HDFS-5999. Do not create rollback fsimage when it already exists. (jing9) + + HDFS-6005. Simplify Datanode rollback and downgrade. (Suresh Srinivas via + Arpit Agarwal) + Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java?rev=1571431&r1=1571430&r2=1571431&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java Mon Feb 24 20:46:35 2014 @@ -17,13 +17,10 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import java.util.concurrent.CopyOnWriteArrayList; - +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; import org.apache.commons.logging.Log; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; @@ -32,24 +29,15 @@ import org.apache.hadoop.hdfs.protocol.B import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; -import org.apache.hadoop.hdfs.server.protocol.BlockCommand; -import org.apache.hadoop.hdfs.server.protocol.BlockIdCommand; -import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; -import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; -import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; -import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; -import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; -import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; -import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; -import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; -import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; +import org.apache.hadoop.hdfs.server.protocol.*; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; /** * One instance per block-pool/namespace on the DN, which handles the @@ -419,9 +407,9 @@ class BPOfferService { */ void signalRollingUpgrade(boolean inProgress) { if (inProgress) { - dn.getFSDataset().enableDeleteToTrash(getBlockPoolId()); + dn.getFSDataset().enableTrash(getBlockPoolId()); } else { - dn.getFSDataset().disableAndPurgeTrashStorage(getBlockPoolId()); + dn.getFSDataset().restoreTrash(getBlockPoolId()); } } Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java?rev=1571431&r1=1571430&r2=1571431&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java Mon Feb 24 20:46:35 2014 @@ -18,15 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode; -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.Properties; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.HardLink; @@ -40,7 +32,14 @@ import org.apache.hadoop.hdfs.server.com import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.util.Daemon; -import com.google.common.annotations.VisibleForTesting; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.Properties; +import java.util.regex.Matcher; +import java.util.regex.Pattern; /** * Manages storage for the set of BlockPoolSlices which share a particular @@ -174,7 +173,7 @@ public class BlockPoolSliceStorage exten /** * Format a block pool slice storage. - * @param sd the block pool storage + * @param bpSdir the block pool storage * @param nsInfo the name space info * @throws IOException Signals that an I/O exception has occurred. */ @@ -212,7 +211,7 @@ public class BlockPoolSliceStorage exten if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) { throw new InconsistentFSStateException(storage, - "Unexepcted blockpoolID " + bpid + " . Expected " + blockpoolID); + "Unexpected blockpoolID " + bpid + ". Expected " + blockpoolID); } blockpoolID = bpid; } @@ -236,7 +235,6 @@ public class BlockPoolSliceStorage exten * Upgrade if this.LV > LAYOUT_VERSION || this.cTime < namenode.cTime Regular * startup if this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime * - * @param dn DataNode to which this storage belongs to * @param sd storage directory /current/ * @param nsInfo namespace info * @param startOpt startup option @@ -246,13 +244,13 @@ public class BlockPoolSliceStorage exten NamespaceInfo nsInfo, StartupOption startOpt) throws IOException { if (startOpt == StartupOption.ROLLBACK) { doRollback(sd, nsInfo); // rollback if applicable - } else if (StartupOption.isRollingUpgradeRollback(startOpt)) { - File trashRoot = getTrashRootDir(sd); - int filesRestored = - trashRoot.exists() ? restoreBlockFilesFromTrash(trashRoot) : 0; - LOG.info("Restored " + filesRestored + " block files from trash."); + } else { + // Restore all the files in the trash. The restored files are retained + // during rolling upgrade rollback. They are deleted during rolling + // upgrade downgrade. + int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd)); + LOG.info("Restored " + restored + " block files from trash."); } - readProperties(sd); checkVersionUpgradable(this.layoutVersion); assert this.layoutVersion >= HdfsConstants.DATANODE_LAYOUT_VERSION @@ -335,7 +333,8 @@ public class BlockPoolSliceStorage exten File bpTmpDir = bpSd.getPreviousTmp(); assert !bpTmpDir.exists() : "previous.tmp directory must not exist."; - // 2. Rename /curernt//current to /curernt//previous.tmp + // 2. Rename /current//current to + // /current//previous.tmp rename(bpCurDir, bpTmpDir); // 3. Create new /current with block files hardlinks and VERSION @@ -346,7 +345,8 @@ public class BlockPoolSliceStorage exten this.cTime = nsInfo.getCTime(); writeProperties(bpSd); - // 4.rename /curernt//previous.tmp to /curernt//previous + // 4.rename /current//previous.tmp to + // /current//previous rename(bpTmpDir, bpPrevDir); LOG.info("Upgrade of block pool " + blockpoolID + " at " + bpSd.getRoot() + " is complete"); @@ -380,15 +380,17 @@ public class BlockPoolSliceStorage exten /** * Restore all files from the trash directory to their corresponding * locations under current/ - * - * @param trashRoot - * @throws IOException */ - private int restoreBlockFilesFromTrash(File trashRoot) throws IOException { + private int restoreBlockFilesFromTrash(File trashRoot) + throws IOException { int filesRestored = 0; - File restoreDirectory = null; + File[] children = trashRoot.exists() ? trashRoot.listFiles() : null; + if (children == null) { + return 0; + } - for (File child : trashRoot.listFiles()) { + File restoreDirectory = null; + for (File child : children) { if (child.isDirectory()) { // Recurse to process subdirectories. filesRestored += restoreBlockFilesFromTrash(child); @@ -408,7 +410,7 @@ public class BlockPoolSliceStorage exten } ++filesRestored; } - + FileUtil.fullyDelete(trashRoot); return filesRestored; } @@ -527,9 +529,6 @@ public class BlockPoolSliceStorage exten /** * gets the data node storage directory based on block pool storage - * - * @param bpRoot - * @return */ private static String getDataNodeStorageRoot(String bpRoot) { Matcher matcher = BLOCK_POOL_PATH_PATTERN.matcher(bpRoot); @@ -571,7 +570,6 @@ public class BlockPoolSliceStorage exten * The subdirectory structure under trash/ mirrors that under current/ to keep * implicit memory of where the files are to be restored (if necessary). * - * @param blockFile * @return the trash directory for a given block file that is being deleted. */ public String getTrashDirectory(File blockFile) { @@ -587,7 +585,6 @@ public class BlockPoolSliceStorage exten * The subdirectory structure under trash/ mirrors that under current/ to keep * implicit memory of where the files are to be restored. * - * @param blockFile * @return the target directory to restore a previously deleted block file. */ @VisibleForTesting @@ -601,9 +598,26 @@ public class BlockPoolSliceStorage exten /** * Delete all files and directories in the trash directories. */ - public void emptyTrash() { + public void restoreTrash() { for (StorageDirectory sd : storageDirs) { - FileUtil.fullyDelete(getTrashRootDir(sd)); + File trashRoot = getTrashRootDir(sd); + try { + restoreBlockFilesFromTrash(trashRoot); + FileUtil.fullyDelete(getTrashRootDir(sd)); + } catch (IOException ioe) { + LOG.warn("Restoring trash failed for storage directory " + sd); + } } } + + /** trash is enabled if at least one storage directory contains trash root */ + @VisibleForTesting + public boolean trashEnabled() { + for (StorageDirectory sd : storageDirs) { + if (getTrashRootDir(sd).exists()) { + return true; + } + } + return false; + } } Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1571431&r1=1571430&r2=1571431&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Mon Feb 24 20:46:35 2014 @@ -17,42 +17,10 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import static org.apache.hadoop.hdfs.DFSConfigKeys.*; -import static org.apache.hadoop.util.ExitUtil.terminate; - -import java.io.BufferedOutputStream; -import java.io.ByteArrayInputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.PrintStream; -import java.lang.management.ManagementFactory; -import java.net.InetSocketAddress; -import java.net.Socket; -import java.net.SocketException; -import java.net.SocketTimeoutException; -import java.net.URI; -import java.net.UnknownHostException; -import java.nio.channels.ClosedByInterruptException; -import java.nio.channels.ClosedChannelException; -import java.nio.channels.SocketChannel; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; - -import javax.management.ObjectName; - +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.protobuf.BlockingService; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -69,41 +37,17 @@ import org.apache.hadoop.hdfs.HDFSPolicy import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.net.DomainPeerServer; import org.apache.hadoop.hdfs.net.TcpPeerServer; -import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; -import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; -import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; -import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; -import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor; -import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; -import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; -import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; +import org.apache.hadoop.hdfs.protocol.*; +import org.apache.hadoop.hdfs.protocol.datatransfer.*; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService; -import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolPB; -import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolServerSideTranslatorPB; -import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolPB; -import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolServerSideTranslatorPB; -import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB; -import org.apache.hadoop.hdfs.protocolPB.PBHelper; -import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; +import org.apache.hadoop.hdfs.protocolPB.*; +import org.apache.hadoop.hdfs.security.token.block.*; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; -import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; -import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.StorageInfo; @@ -115,11 +59,7 @@ import org.apache.hadoop.hdfs.server.dat import org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets; import org.apache.hadoop.hdfs.server.namenode.StreamFile; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; -import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; -import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; -import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; -import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; -import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; +import org.apache.hadoop.hdfs.server.protocol.*; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.resources.Param; import org.apache.hadoop.http.HttpConfig; @@ -142,21 +82,24 @@ import org.apache.hadoop.security.UserGr import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.util.Daemon; -import org.apache.hadoop.util.DiskChecker; +import org.apache.hadoop.util.*; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; -import org.apache.hadoop.util.GenericOptionsParser; -import org.apache.hadoop.util.JvmPauseMonitor; -import org.apache.hadoop.util.ServicePlugin; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.VersionInfo; import org.mortbay.util.ajax.JSON; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.protobuf.BlockingService; +import javax.management.ObjectName; +import java.io.*; +import java.lang.management.ManagementFactory; +import java.net.*; +import java.nio.channels.ClosedByInterruptException; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.SocketChannel; +import java.security.PrivilegedExceptionAction; +import java.util.*; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.*; +import static org.apache.hadoop.util.ExitUtil.terminate; /********************************************************** * DataNode is a class (and program) that stores a set of @@ -1770,7 +1713,6 @@ public class DataNode extends Configured } if (!parseArguments(args, conf)) { - LOG.error("Bad command line arguments"); printUsage(System.err); return null; } @@ -1940,18 +1882,6 @@ public class DataNode extends Configured startOpt = StartupOption.ROLLBACK; } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.REGULAR; - } else if (StartupOption.ROLLINGUPGRADE.getName().equalsIgnoreCase(cmd)) { - startOpt = StartupOption.ROLLINGUPGRADE; - - if ((i < args.length ) && - (args[i].equalsIgnoreCase(RollingUpgradeStartupOption.ROLLBACK.toString()))) { - startOpt.setRollingUpgradeStartupOption(args[i++]); - } else { - LOG.error("Missing or unrecognized option to " + StartupOption.ROLLINGUPGRADE); - return false; - } - - LOG.info("Rolling upgrade rollback requested via startup option"); } else { return false; } @@ -2579,4 +2509,9 @@ public class DataNode extends Configured boolean shouldRun() { return shouldRun; } + + @VisibleForTesting + DataStorage getStorage() { + return storage; + } } Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1571431&r1=1571430&r2=1571431&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Mon Feb 24 20:46:35 2014 @@ -18,22 +18,10 @@ package org.apache.hadoop.hdfs.server.datanode; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.nio.channels.FileLock; -import java.util.*; -import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.HardLink; -import org.apache.hadoop.fs.LocalFileSystem; -import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -50,6 +38,11 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DiskChecker; +import java.io.*; +import java.nio.channels.FileLock; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; + /** * Data storage information file. *

@@ -95,7 +88,7 @@ public class DataStorage extends Storage new ConcurrentHashMap()); } - public StorageInfo getBPStorage(String bpid) { + public BlockPoolSliceStorage getBPStorage(String bpid) { return bpStorageMap.get(bpid); } @@ -120,9 +113,6 @@ public class DataStorage extends Storage /** * Enable trash for the specified block pool storage. - * - * @param bpid - * @param inProgress */ public void enableTrash(String bpid) { if (trashEnabledBpids.add(bpid)) { @@ -130,18 +120,16 @@ public class DataStorage extends Storage } } - /** - * Disable trash for the specified block pool storage. - * Existing files in trash are purged i.e. permanently deleted. - * - * @param bpid - * @param inProgress - */ - public void disableAndPurgeTrash(String bpid) { - if (trashEnabledBpids.remove(bpid)) { - LOG.info("Disabled trash for bpid " + bpid); + public void restoreTrash(String bpid) { + if (trashEnabledBpids.contains(bpid)) { + getBPStorage(bpid).restoreTrash(); + trashEnabledBpids.remove(bpid); + LOG.info("Restored trash for bpid " + bpid); } - ((BlockPoolSliceStorage) getBPStorage(bpid)).emptyTrash(); + } + + public boolean trashEnabled(String bpid) { + return trashEnabledBpids.contains(bpid); } /** @@ -150,7 +138,6 @@ public class DataStorage extends Storage * 'trash' directory. If there is a subsequent rollback, then the block * files will be restored from trash. * - * @param blockFile * @return trash directory if rolling upgrade is in progress, null * otherwise. */ @@ -242,7 +229,7 @@ public class DataStorage extends Storage // 3. Update all storages. Some of them might have just been formatted. this.writeAll(); - // 4. mark DN storage is initilized + // 4. mark DN storage is initialized this.initialized = true; } @@ -724,9 +711,11 @@ public class DataStorage extends Storage /* * Finalize the upgrade for a block pool + * This also empties trash created during rolling upgrade and disables + * trash functionality. */ void finalizeUpgrade(String bpID) throws IOException { - // To handle finalizing a snapshot taken at datanode level while + // To handle finalizing a snapshot taken at datanode level while // upgrading to federation, if datanode level snapshot previous exists, // then finalize it. Else finalize the corresponding BP. for (StorageDirectory sd : storageDirs) { Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java?rev=1571431&r1=1571430&r2=1571431&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java Mon Feb 24 20:46:35 2014 @@ -417,11 +417,16 @@ public interface FsDatasetSpi