Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 5F40C107D1 for ; Mon, 24 Mar 2014 21:19:23 +0000 (UTC) Received: (qmail 56603 invoked by uid 500); 24 Mar 2014 21:19:20 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 56531 invoked by uid 500); 24 Mar 2014 21:19:20 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 56522 invoked by uid 99); 24 Mar 2014 21:19:20 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 24 Mar 2014 21:19:20 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 24 Mar 2014 21:19:15 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 800B423888FE; Mon, 24 Mar 2014 21:18:55 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1581068 [1/3] - in /hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs: ./ dev-support/ src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/ src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/b... Date: Mon, 24 Mar 2014 21:18:53 -0000 To: hdfs-commits@hadoop.apache.org From: jing9@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140324211855.800B423888FE@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: jing9 Date: Mon Mar 24 21:18:52 2014 New Revision: 1581068 URL: http://svn.apache.org/r1581068 Log: HDFS-5138. Merge change r1581067 from branch-2. Added: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java - copied unchanged from r1581067, hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Mar 24 21:18:52 2014 @@ -171,6 +171,8 @@ Release 2.4.0 - UNRELEASED HDFS-6050. NFS does not handle exceptions correctly in a few places (brandonli) + HDFS-5138. Support HDFS upgrade in HA. (atm via todd) + OPTIMIZATIONS HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml Mon Mar 24 21:18:52 2014 @@ -189,5 +189,10 @@ + + + + + Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java Mon Mar 24 21:18:52 2014 @@ -18,6 +18,8 @@ package org.apache.hadoop.contrib.bkjournal; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.JournalManager; import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; @@ -666,6 +668,37 @@ public class BookKeeperJournalManager im } @Override + public void doPreUpgrade() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void doUpgrade(Storage storage) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public long getJournalCTime() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void doFinalize() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, + int targetLayoutVersion) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void doRollback() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override public void close() throws IOException { try { bkc.close(); Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java Mon Mar 24 21:18:52 2014 @@ -316,7 +316,7 @@ public class TestBookKeeperAsHASharedDir } catch (IOException ioe) { LOG.info("Got expected exception", ioe); GenericTestUtils.assertExceptionContains( - "Cannot start an HA namenode with name dirs that need recovery", ioe); + "storage directory does not exist or is not accessible", ioe); } } Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Mon Mar 24 21:18:52 2014 @@ -43,6 +43,7 @@ import java.net.URISyntaxException; import java.security.SecureRandom; import java.text.SimpleDateFormat; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; @@ -53,7 +54,6 @@ import java.util.Locale; import java.util.Map; import java.util.Random; import java.util.Set; -import java.util.concurrent.TimeUnit; import javax.net.SocketFactory; @@ -72,7 +72,6 @@ import org.apache.hadoop.fs.BlockLocatio import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -635,10 +634,24 @@ public class DFSUtil { } return ret; } + + /** + * Get all of the RPC addresses of the individual NNs in a given nameservice. + * + * @param conf Configuration + * @param nsId the nameservice whose NNs addresses we want. + * @param defaultValue default address to return in case key is not found. + * @return A map from nnId -> RPC address of each NN in the nameservice. + */ + public static Map getRpcAddressesForNameserviceId( + Configuration conf, String nsId, String defaultValue) { + return getAddressesForNameserviceId(conf, nsId, defaultValue, + DFS_NAMENODE_RPC_ADDRESS_KEY); + } private static Map getAddressesForNameserviceId( Configuration conf, String nsId, String defaultValue, - String[] keys) { + String... keys) { Collection nnIds = getNameNodeIds(conf, nsId); Map ret = Maps.newHashMap(); for (String nnId : emptyAsSingletonNull(nnIds)) { @@ -1734,4 +1747,32 @@ public class DFSUtil { } return ttl*1000; } + + /** + * Assert that all objects in the collection are equal. Returns silently if + * so, throws an AssertionError if any object is not equal. All null values + * are considered equal. + * + * @param objects the collection of objects to check for equality. + */ + public static void assertAllResultsEqual(Collection objects) { + Object[] resultsArray = objects.toArray(); + + if (resultsArray.length == 0) + return; + + for (int i = 0; i < resultsArray.length; i++) { + if (i == 0) + continue; + else { + Object currElement = resultsArray[i]; + Object lastElement = resultsArray[i - 1]; + if ((currElement == null && currElement != lastElement) || + (currElement != null && !currElement.equals(lastElement))) { + throw new AssertionError("Not all elements match in results: " + + Arrays.toString(resultsArray)); + } + } + } + } } Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java Mon Mar 24 21:18:52 2014 @@ -26,7 +26,6 @@ import static org.apache.hadoop.hdfs.pro import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; -import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -38,14 +37,14 @@ import org.apache.hadoop.HadoopIllegalAr import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; - -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RPC; - +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -301,4 +300,55 @@ public class HAUtil { DFSClient dfsClient = dfs.getClient(); return RPC.getServerAddress(dfsClient.getNamenode()); } + + /** + * Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC + * call should be made on every NN in an HA nameservice, not just the active. + * + * @param conf configuration + * @param nsId the nameservice to get all of the proxies for. + * @return a list of RPC proxies for each NN in the nameservice. + * @throws IOException in the event of error. + */ + public static List getProxiesForAllNameNodesInNameservice( + Configuration conf, String nsId) throws IOException { + Map nnAddresses = + DFSUtil.getRpcAddressesForNameserviceId(conf, nsId, null); + + List namenodes = new ArrayList(); + for (InetSocketAddress nnAddress : nnAddresses.values()) { + NameNodeProxies.ProxyAndInfo proxyInfo = null; + proxyInfo = NameNodeProxies.createNonHAProxy(conf, + nnAddress, ClientProtocol.class, + UserGroupInformation.getCurrentUser(), false); + namenodes.add(proxyInfo.getProxy()); + } + return namenodes; + } + + /** + * Used to ensure that at least one of the given HA NNs is currently in the + * active state.. + * + * @param namenodes list of RPC proxies for each NN to check. + * @return true if at least one NN is active, false if all are in the standby state. + * @throws IOException in the event of error. + */ + public static boolean isAtLeastOneActive(List namenodes) + throws IOException { + for (ClientProtocol namenode : namenodes) { + try { + namenode.getFileInfo("/"); + return true; + } catch (RemoteException re) { + IOException cause = re.unwrapRemoteException(); + if (cause instanceof StandbyException) { + // This is expected to happen for a standby NN. + } else { + throw re; + } + } + } + return false; + } } Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java Mon Mar 24 21:18:52 2014 @@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.qjournal.p import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; @@ -153,5 +154,18 @@ interface AsyncLogger { */ public void appendReport(StringBuilder sb); + public ListenableFuture doPreUpgrade(); + + public ListenableFuture doUpgrade(StorageInfo sInfo); + + public ListenableFuture doFinalize(); + + public ListenableFuture canRollBack(StorageInfo storage, + StorageInfo prevStorage, int targetLayoutVersion); + + public ListenableFuture doRollback(); + + public ListenableFuture getJournalCTime(); + public ListenableFuture discardSegments(long startTxId); } Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java Mon Mar 24 21:18:52 2014 @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.qjournal.p import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; @@ -317,4 +318,71 @@ class AsyncLoggerSet { } return QuorumCall.create(calls); } + + QuorumCall doPreUpgrade() { + Map> calls = + Maps.newHashMap(); + for (AsyncLogger logger : loggers) { + ListenableFuture future = + logger.doPreUpgrade(); + calls.put(logger, future); + } + return QuorumCall.create(calls); + } + + public QuorumCall doUpgrade(StorageInfo sInfo) { + Map> calls = + Maps.newHashMap(); + for (AsyncLogger logger : loggers) { + ListenableFuture future = + logger.doUpgrade(sInfo); + calls.put(logger, future); + } + return QuorumCall.create(calls); + } + + public QuorumCall doFinalize() { + Map> calls = + Maps.newHashMap(); + for (AsyncLogger logger : loggers) { + ListenableFuture future = + logger.doFinalize(); + calls.put(logger, future); + } + return QuorumCall.create(calls); + } + + public QuorumCall canRollBack(StorageInfo storage, + StorageInfo prevStorage, int targetLayoutVersion) { + Map> calls = + Maps.newHashMap(); + for (AsyncLogger logger : loggers) { + ListenableFuture future = + logger.canRollBack(storage, prevStorage, targetLayoutVersion); + calls.put(logger, future); + } + return QuorumCall.create(calls); + } + + public QuorumCall doRollback() { + Map> calls = + Maps.newHashMap(); + for (AsyncLogger logger : loggers) { + ListenableFuture future = + logger.doRollback(); + calls.put(logger, future); + } + return QuorumCall.create(calls); + } + + public QuorumCall getJournalCTime() { + Map> calls = + Maps.newHashMap(); + for (AsyncLogger logger : loggers) { + ListenableFuture future = logger.getJournalCTime(); + calls.put(logger, future); + } + return QuorumCall.create(calls); + } + } Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java Mon Mar 24 21:18:52 2014 @@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.qjournal.p import org.apache.hadoop.hdfs.qjournal.protocolPB.QJournalProtocolPB; import org.apache.hadoop.hdfs.qjournal.protocolPB.QJournalProtocolTranslatorPB; import org.apache.hadoop.hdfs.qjournal.server.GetJournalEditServlet; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -574,6 +575,72 @@ public class IPCLoggerChannel implements } }); } + + @Override + public ListenableFuture doPreUpgrade() { + return executor.submit(new Callable() { + @Override + public Void call() throws IOException { + getProxy().doPreUpgrade(journalId); + return null; + } + }); + } + + @Override + public ListenableFuture doUpgrade(final StorageInfo sInfo) { + return executor.submit(new Callable() { + @Override + public Void call() throws IOException { + getProxy().doUpgrade(journalId, sInfo); + return null; + } + }); + } + + @Override + public ListenableFuture doFinalize() { + return executor.submit(new Callable() { + @Override + public Void call() throws IOException { + getProxy().doFinalize(journalId); + return null; + } + }); + } + + @Override + public ListenableFuture canRollBack(final StorageInfo storage, + final StorageInfo prevStorage, final int targetLayoutVersion) { + return executor.submit(new Callable() { + @Override + public Boolean call() throws IOException { + return getProxy().canRollBack(journalId, storage, prevStorage, + targetLayoutVersion); + } + }); + } + + @Override + public ListenableFuture doRollback() { + return executor.submit(new Callable() { + @Override + public Void call() throws IOException { + getProxy().doRollback(journalId); + return null; + } + }); + } + + @Override + public ListenableFuture getJournalCTime() { + return executor.submit(new Callable() { + @Override + public Long call() throws IOException { + return getProxy().getJournalCTime(journalId); + } + }); + } @Override public String toString() { @@ -646,4 +713,5 @@ public class IPCLoggerChannel implements private boolean hasHttpServerEndPoint() { return httpServerURL != null; } + } Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java Mon Mar 24 21:18:52 2014 @@ -34,10 +34,13 @@ import org.apache.commons.logging.LogFac import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; @@ -77,8 +80,14 @@ public class QuorumJournalManager implem // Since these don't occur during normal operation, we can // use rather lengthy timeouts, and don't need to make them // configurable. - private static final int FORMAT_TIMEOUT_MS = 60000; - private static final int HASDATA_TIMEOUT_MS = 60000; + private static final int FORMAT_TIMEOUT_MS = 60000; + private static final int HASDATA_TIMEOUT_MS = 60000; + private static final int CAN_ROLL_BACK_TIMEOUT_MS = 60000; + private static final int FINALIZE_TIMEOUT_MS = 60000; + private static final int PRE_UPGRADE_TIMEOUT_MS = 60000; + private static final int ROLL_BACK_TIMEOUT_MS = 60000; + private static final int UPGRADE_TIMEOUT_MS = 60000; + private static final int GET_JOURNAL_CTIME_TIMEOUT_MS = 60000; private static final int DISCARD_SEGMENTS_TIMEOUT_MS = 60000; private final Configuration conf; @@ -496,6 +505,134 @@ public class QuorumJournalManager implem } @Override + public void doPreUpgrade() throws IOException { + QuorumCall call = loggers.doPreUpgrade(); + try { + call.waitFor(loggers.size(), loggers.size(), 0, PRE_UPGRADE_TIMEOUT_MS, + "doPreUpgrade"); + + if (call.countExceptions() > 0) { + call.rethrowException("Could not do pre-upgrade of one or more JournalNodes"); + } + } catch (InterruptedException e) { + throw new IOException("Interrupted waiting for doPreUpgrade() response"); + } catch (TimeoutException e) { + throw new IOException("Timed out waiting for doPreUpgrade() response"); + } + } + + @Override + public void doUpgrade(Storage storage) throws IOException { + QuorumCall call = loggers.doUpgrade(storage); + try { + call.waitFor(loggers.size(), loggers.size(), 0, UPGRADE_TIMEOUT_MS, + "doUpgrade"); + + if (call.countExceptions() > 0) { + call.rethrowException("Could not perform upgrade of one or more JournalNodes"); + } + } catch (InterruptedException e) { + throw new IOException("Interrupted waiting for doUpgrade() response"); + } catch (TimeoutException e) { + throw new IOException("Timed out waiting for doUpgrade() response"); + } + } + + @Override + public void doFinalize() throws IOException { + QuorumCall call = loggers.doFinalize(); + try { + call.waitFor(loggers.size(), loggers.size(), 0, FINALIZE_TIMEOUT_MS, + "doFinalize"); + + if (call.countExceptions() > 0) { + call.rethrowException("Could not finalize one or more JournalNodes"); + } + } catch (InterruptedException e) { + throw new IOException("Interrupted waiting for doFinalize() response"); + } catch (TimeoutException e) { + throw new IOException("Timed out waiting for doFinalize() response"); + } + } + + @Override + public boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, + int targetLayoutVersion) throws IOException { + QuorumCall call = loggers.canRollBack(storage, + prevStorage, targetLayoutVersion); + try { + call.waitFor(loggers.size(), loggers.size(), 0, CAN_ROLL_BACK_TIMEOUT_MS, + "lockSharedStorage"); + + if (call.countExceptions() > 0) { + call.rethrowException("Could not check if roll back possible for" + + " one or more JournalNodes"); + } + + // Either they all return the same thing or this call fails, so we can + // just return the first result. + DFSUtil.assertAllResultsEqual(call.getResults().values()); + for (Boolean result : call.getResults().values()) { + return result; + } + } catch (InterruptedException e) { + throw new IOException("Interrupted waiting for lockSharedStorage() " + + "response"); + } catch (TimeoutException e) { + throw new IOException("Timed out waiting for lockSharedStorage() " + + "response"); + } + + throw new AssertionError("Unreachable code."); + } + + @Override + public void doRollback() throws IOException { + QuorumCall call = loggers.doRollback(); + try { + call.waitFor(loggers.size(), loggers.size(), 0, ROLL_BACK_TIMEOUT_MS, + "doRollback"); + + if (call.countExceptions() > 0) { + call.rethrowException("Could not perform rollback of one or more JournalNodes"); + } + } catch (InterruptedException e) { + throw new IOException("Interrupted waiting for doFinalize() response"); + } catch (TimeoutException e) { + throw new IOException("Timed out waiting for doFinalize() response"); + } + } + + @Override + public long getJournalCTime() throws IOException { + QuorumCall call = loggers.getJournalCTime(); + try { + call.waitFor(loggers.size(), loggers.size(), 0, + GET_JOURNAL_CTIME_TIMEOUT_MS, "getJournalCTime"); + + if (call.countExceptions() > 0) { + call.rethrowException("Could not journal CTime for one " + + "more JournalNodes"); + } + + // Either they all return the same thing or this call fails, so we can + // just return the first result. + DFSUtil.assertAllResultsEqual(call.getResults().values()); + for (Long result : call.getResults().values()) { + return result; + } + } catch (InterruptedException e) { + throw new IOException("Interrupted waiting for getJournalCTime() " + + "response"); + } catch (TimeoutException e) { + throw new IOException("Timed out waiting for getJournalCTime() " + + "response"); + } + + throw new AssertionError("Unreachable code."); + } + + @Override public void discardSegments(long startTxId) throws IOException { QuorumCall call = loggers.discardSegments(startTxId); try { Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java Mon Mar 24 21:18:52 2014 @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.qjournal.p import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.qjournal.server.JournalNode; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.JournalManager; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.io.retry.Idempotent; @@ -146,6 +147,19 @@ public interface QJournalProtocol { public void acceptRecovery(RequestInfo reqInfo, SegmentStateProto stateToAccept, URL fromUrl) throws IOException; + public void doPreUpgrade(String journalId) throws IOException; + + public void doUpgrade(String journalId, StorageInfo sInfo) throws IOException; + + public void doFinalize(String journalId) throws IOException; + + public Boolean canRollBack(String journalId, StorageInfo storage, + StorageInfo prevStorage, int targetLayoutVersion) throws IOException; + + public void doRollback(String journalId) throws IOException; + + public Long getJournalCTime(String journalId) throws IOException; + /** * Discard journal segments whose first TxId is greater than or equal to the * given txid. Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java Mon Mar 24 21:18:52 2014 @@ -28,14 +28,26 @@ import org.apache.hadoop.hdfs.qjournal.p import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto; @@ -54,6 +66,8 @@ import org.apache.hadoop.hdfs.qjournal.p import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.hdfs.server.protocol.JournalProtocol; @@ -263,4 +277,79 @@ public class QJournalProtocolServerSideT throw new ServiceException(e); } } + + + @Override + public DoPreUpgradeResponseProto doPreUpgrade(RpcController controller, + DoPreUpgradeRequestProto request) throws ServiceException { + try { + impl.doPreUpgrade(convert(request.getJid())); + return DoPreUpgradeResponseProto.getDefaultInstance(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public DoUpgradeResponseProto doUpgrade(RpcController controller, + DoUpgradeRequestProto request) throws ServiceException { + StorageInfo si = PBHelper.convert(request.getSInfo(), NodeType.JOURNAL_NODE); + try { + impl.doUpgrade(convert(request.getJid()), si); + return DoUpgradeResponseProto.getDefaultInstance(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public DoFinalizeResponseProto doFinalize(RpcController controller, + DoFinalizeRequestProto request) throws ServiceException { + try { + impl.doFinalize(convert(request.getJid())); + return DoFinalizeResponseProto.getDefaultInstance(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public CanRollBackResponseProto canRollBack(RpcController controller, + CanRollBackRequestProto request) throws ServiceException { + try { + StorageInfo si = PBHelper.convert(request.getStorage(), NodeType.JOURNAL_NODE); + Boolean result = impl.canRollBack(convert(request.getJid()), si, + PBHelper.convert(request.getPrevStorage(), NodeType.JOURNAL_NODE), + request.getTargetLayoutVersion()); + return CanRollBackResponseProto.newBuilder() + .setCanRollBack(result) + .build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public DoRollbackResponseProto doRollback(RpcController controller, DoRollbackRequestProto request) + throws ServiceException { + try { + impl.doRollback(convert(request.getJid())); + return DoRollbackResponseProto.getDefaultInstance(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public GetJournalCTimeResponseProto getJournalCTime(RpcController controller, + GetJournalCTimeRequestProto request) throws ServiceException { + try { + Long resultCTime = impl.getJournalCTime(convert(request.getJid())); + return GetJournalCTimeResponseProto.newBuilder() + .setResultCTime(resultCTime) + .build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } } Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java Mon Mar 24 21:18:52 2014 @@ -28,11 +28,19 @@ import org.apache.hadoop.hdfs.protocolPB import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto; @@ -49,6 +57,7 @@ import org.apache.hadoop.hdfs.qjournal.p import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.JournalProtocol; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.ipc.ProtobufHelper; @@ -280,6 +289,87 @@ public class QJournalProtocolTranslatorP } @Override + public void doPreUpgrade(String jid) throws IOException { + try { + rpcProxy.doPreUpgrade(NULL_CONTROLLER, + DoPreUpgradeRequestProto.newBuilder() + .setJid(convertJournalId(jid)) + .build()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public void doUpgrade(String journalId, StorageInfo sInfo) throws IOException { + try { + rpcProxy.doUpgrade(NULL_CONTROLLER, + DoUpgradeRequestProto.newBuilder() + .setJid(convertJournalId(journalId)) + .setSInfo(PBHelper.convert(sInfo)) + .build()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public void doFinalize(String jid) throws IOException { + try { + rpcProxy.doFinalize(NULL_CONTROLLER, + DoFinalizeRequestProto.newBuilder() + .setJid(convertJournalId(jid)) + .build()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public Boolean canRollBack(String journalId, StorageInfo storage, + StorageInfo prevStorage, int targetLayoutVersion) throws IOException { + try { + CanRollBackResponseProto response = rpcProxy.canRollBack( + NULL_CONTROLLER, + CanRollBackRequestProto.newBuilder() + .setJid(convertJournalId(journalId)) + .setStorage(PBHelper.convert(storage)) + .setPrevStorage(PBHelper.convert(prevStorage)) + .setTargetLayoutVersion(targetLayoutVersion) + .build()); + return response.getCanRollBack(); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public void doRollback(String journalId) throws IOException { + try { + rpcProxy.doRollback(NULL_CONTROLLER, + DoRollbackRequestProto.newBuilder() + .setJid(convertJournalId(journalId)) + .build()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public Long getJournalCTime(String journalId) throws IOException { + try { + GetJournalCTimeResponseProto response = rpcProxy.getJournalCTime( + NULL_CONTROLLER, + GetJournalCTimeRequestProto.newBuilder() + .setJid(convertJournalId(journalId)) + .build()); + return response.getResultCTime(); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override public void discardSegments(String journalId, long startTxId) throws IOException { try { Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java Mon Mar 24 21:18:52 2014 @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.DFSConfigK import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager; import org.apache.hadoop.hdfs.server.common.JspHelper; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.ImageServlet; @@ -139,20 +140,26 @@ public class GetJournalEditServlet exten private boolean checkStorageInfoOrSendError(JNStorage storage, HttpServletRequest request, HttpServletResponse response) throws IOException { - String myStorageInfoString = storage.toColonSeparatedString(); + int myNsId = storage.getNamespaceID(); + String myClusterId = storage.getClusterID(); + String theirStorageInfoString = StringEscapeUtils.escapeHtml( request.getParameter(STORAGEINFO_PARAM)); - if (theirStorageInfoString != null - && !myStorageInfoString.equals(theirStorageInfoString)) { - String msg = "This node has storage info '" + myStorageInfoString - + "' but the requesting node expected '" - + theirStorageInfoString + "'"; - - response.sendError(HttpServletResponse.SC_FORBIDDEN, msg); - LOG.warn("Received an invalid request file transfer request from " + - request.getRemoteAddr() + ": " + msg); - return false; + if (theirStorageInfoString != null) { + int theirNsId = StorageInfo.getNsIdFromColonSeparatedString( + theirStorageInfoString); + String theirClusterId = StorageInfo.getClusterIdFromColonSeparatedString( + theirStorageInfoString); + if (myNsId != theirNsId || !myClusterId.equals(theirClusterId)) { + String msg = "This node has namespaceId '" + myNsId + " and clusterId '" + + myClusterId + "' but the requesting node expected '" + theirNsId + + "' and '" + theirClusterId + "'"; + response.sendError(HttpServletResponse.SC_FORBIDDEN, msg); + LOG.warn("Received an invalid request file transfer request from " + + request.getRemoteAddr() + ": " + msg); + return false; + } } return true; } Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java Mon Mar 24 21:18:52 2014 @@ -130,6 +130,10 @@ class JNStorage extends Storage { return new File(sd.getCurrentDir(), "paxos"); } + File getRoot() { + return sd.getRoot(); + } + /** * Remove any log files and associated paxos files which are older than * the given txid. @@ -182,12 +186,15 @@ class JNStorage extends Storage { unlockAll(); sd.clearDirectory(); writeProperties(sd); + createPaxosDir(); + analyzeStorage(); + } + + void createPaxosDir() throws IOException { if (!getPaxosDir().mkdirs()) { throw new IOException("Could not create paxos dir: " + getPaxosDir()); } - analyzeStorage(); } - void analyzeStorage() throws IOException { this.state = sd.analyzeStorage(StartupOption.REGULAR, this); Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java Mon Mar 24 21:18:52 2014 @@ -37,12 +37,14 @@ import org.apache.hadoop.hdfs.protocol.H import org.apache.hadoop.hdfs.qjournal.protocol.JournalNotFormattedException; import org.apache.hadoop.hdfs.qjournal.protocol.JournalOutOfSyncException; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; import org.apache.hadoop.hdfs.server.common.StorageErrorReporter; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; @@ -73,7 +75,7 @@ import com.google.protobuf.TextFormat; * Each such journal is entirely independent despite being hosted by * the same JVM. */ -class Journal implements Closeable { +public class Journal implements Closeable { static final Log LOG = LogFactory.getLog(Journal.class); @@ -122,8 +124,8 @@ class Journal implements Closeable { */ private BestEffortLongFile committedTxnId; - private static final String LAST_PROMISED_FILENAME = "last-promised-epoch"; - private static final String LAST_WRITER_EPOCH = "last-writer-epoch"; + public static final String LAST_PROMISED_FILENAME = "last-promised-epoch"; + public static final String LAST_WRITER_EPOCH = "last-writer-epoch"; private static final String COMMITTED_TXID_FILENAME = "committed-txid"; private final FileJournalManager fjm; @@ -627,7 +629,7 @@ class Journal implements Closeable { } /** - * @see QJournalProtocol#getEditLogManifest(String, long) + * @see QJournalProtocol#getEditLogManifest(String, long, boolean) */ public RemoteEditLogManifest getEditLogManifest(long sinceTxId, boolean inProgressOk) throws IOException { @@ -729,7 +731,7 @@ class Journal implements Closeable { } /** - * @see QJournalProtocol#acceptRecovery(RequestInfo, SegmentStateProto, URL) + * @see QJournalProtocol#acceptRecovery(RequestInfo, QJournalProtocolProtos.SegmentStateProto, URL) */ public synchronized void acceptRecovery(RequestInfo reqInfo, SegmentStateProto segment, URL fromUrl) @@ -987,4 +989,62 @@ class Journal implements Closeable { // we delete all the segments after the startTxId. let's reset committedTxnId committedTxnId.set(startTxId - 1); } + + public synchronized void doPreUpgrade() throws IOException { + storage.getJournalManager().doPreUpgrade(); + } + + public synchronized void doUpgrade(StorageInfo sInfo) throws IOException { + long oldCTime = storage.getCTime(); + storage.cTime = sInfo.cTime; + int oldLV = storage.getLayoutVersion(); + storage.layoutVersion = sInfo.layoutVersion; + LOG.info("Starting upgrade of edits directory: " + + ".\n old LV = " + oldLV + + "; old CTime = " + oldCTime + + ".\n new LV = " + storage.getLayoutVersion() + + "; new CTime = " + storage.getCTime()); + storage.getJournalManager().doUpgrade(storage); + storage.createPaxosDir(); + + // Copy over the contents of the epoch data files to the new dir. + File currentDir = storage.getSingularStorageDir().getCurrentDir(); + File previousDir = storage.getSingularStorageDir().getPreviousDir(); + + PersistentLongFile prevLastPromisedEpoch = new PersistentLongFile( + new File(previousDir, LAST_PROMISED_FILENAME), 0); + PersistentLongFile prevLastWriterEpoch = new PersistentLongFile( + new File(previousDir, LAST_WRITER_EPOCH), 0); + + lastPromisedEpoch = new PersistentLongFile( + new File(currentDir, LAST_PROMISED_FILENAME), 0); + lastWriterEpoch = new PersistentLongFile( + new File(currentDir, LAST_WRITER_EPOCH), 0); + + lastPromisedEpoch.set(prevLastPromisedEpoch.get()); + lastWriterEpoch.set(prevLastWriterEpoch.get()); + } + + public synchronized void doFinalize() throws IOException { + LOG.info("Finalizing upgrade for journal " + + storage.getRoot() + "." + + (storage.getLayoutVersion()==0 ? "" : + "\n cur LV = " + storage.getLayoutVersion() + + "; cur CTime = " + storage.getCTime())); + storage.getJournalManager().doFinalize(); + } + + public Boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, + int targetLayoutVersion) throws IOException { + return this.storage.getJournalManager().canRollBack(storage, prevStorage, + targetLayoutVersion); + } + + public void doRollback() throws IOException { + storage.getJournalManager().doRollback(); + } + + public Long getJournalCTime() throws IOException { + return storage.getJournalManager().getJournalCTime(); + } } Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java Mon Mar 24 21:18:52 2014 @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSConfigK import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager; import org.apache.hadoop.hdfs.server.common.StorageErrorReporter; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; @@ -290,4 +291,31 @@ public class JournalNode implements Tool throws IOException { getOrCreateJournal(journalId).discardSegments(startTxId); } + + public void doPreUpgrade(String journalId) throws IOException { + getOrCreateJournal(journalId).doPreUpgrade(); + } + + public void doUpgrade(String journalId, StorageInfo sInfo) throws IOException { + getOrCreateJournal(journalId).doUpgrade(sInfo); + } + + public void doFinalize(String journalId) throws IOException { + getOrCreateJournal(journalId).doFinalize(); + } + + public Boolean canRollBack(String journalId, StorageInfo storage, + StorageInfo prevStorage, int targetLayoutVersion) throws IOException { + return getOrCreateJournal(journalId).canRollBack(storage, prevStorage, + targetLayoutVersion); + } + + public void doRollback(String journalId) throws IOException { + getOrCreateJournal(journalId).doRollback(); + } + + public Long getJournalCTime(String journalId) throws IOException { + return getOrCreateJournal(journalId).getJournalCTime(); + } + } Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java Mon Mar 24 21:18:52 2014 @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.qjournal.p import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; import org.apache.hadoop.hdfs.qjournal.protocolPB.QJournalProtocolPB; import org.apache.hadoop.hdfs.qjournal.protocolPB.QJournalProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -206,6 +207,38 @@ class JournalNodeRpcServer implements QJ } @Override + public void doPreUpgrade(String journalId) throws IOException { + jn.doPreUpgrade(journalId); + } + + @Override + public void doUpgrade(String journalId, StorageInfo sInfo) throws IOException { + jn.doUpgrade(journalId, sInfo); + } + + @Override + public void doFinalize(String journalId) throws IOException { + jn.doFinalize(journalId); + } + + @Override + public Boolean canRollBack(String journalId, StorageInfo storage, + StorageInfo prevStorage, int targetLayoutVersion) + throws IOException { + return jn.canRollBack(journalId, storage, prevStorage, targetLayoutVersion); + } + + @Override + public void doRollback(String journalId) throws IOException { + jn.doRollback(journalId); + } + + @Override + public Long getJournalCTime(String journalId) throws IOException { + return jn.getJournalCTime(journalId); + } + + @Override public void discardSegments(String journalId, long startTxId) throws IOException { jn.discardSegments(journalId, startTxId); Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java Mon Mar 24 21:18:52 2014 @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.server.common; import java.io.File; -import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.RandomAccessFile; @@ -78,7 +77,6 @@ public abstract class Storage extends St public static final int[] LAYOUT_VERSIONS_203 = {-19, -31}; public static final String STORAGE_FILE_LOCK = "in_use.lock"; - protected static final String STORAGE_FILE_VERSION = "VERSION"; public static final String STORAGE_DIR_CURRENT = "current"; public static final String STORAGE_DIR_PREVIOUS = "previous"; public static final String STORAGE_TMP_REMOVED = "removed.tmp"; @@ -121,22 +119,24 @@ public abstract class Storage extends St private class DirIterator implements Iterator { StorageDirType dirType; + boolean includeShared; int prevIndex; // for remove() int nextIndex; // for next() - DirIterator(StorageDirType dirType) { + DirIterator(StorageDirType dirType, boolean includeShared) { this.dirType = dirType; this.nextIndex = 0; this.prevIndex = 0; + this.includeShared = includeShared; } @Override public boolean hasNext() { if (storageDirs.isEmpty() || nextIndex >= storageDirs.size()) return false; - if (dirType != null) { + if (dirType != null || !includeShared) { while (nextIndex < storageDirs.size()) { - if (getStorageDir(nextIndex).getStorageDirType().isOfType(dirType)) + if (shouldReturnNextDir()) break; nextIndex++; } @@ -151,9 +151,9 @@ public abstract class Storage extends St StorageDirectory sd = getStorageDir(nextIndex); prevIndex = nextIndex; nextIndex++; - if (dirType != null) { + if (dirType != null || !includeShared) { while (nextIndex < storageDirs.size()) { - if (getStorageDir(nextIndex).getStorageDirType().isOfType(dirType)) + if (shouldReturnNextDir()) break; nextIndex++; } @@ -167,6 +167,12 @@ public abstract class Storage extends St storageDirs.remove(prevIndex); // remove last returned element hasNext(); // reset nextIndex to correct place } + + private boolean shouldReturnNextDir() { + StorageDirectory sd = getStorageDir(nextIndex); + return (dirType == null || sd.getStorageDirType().isOfType(dirType)) && + (includeShared || !sd.isShared()); + } } /** @@ -198,7 +204,27 @@ public abstract class Storage extends St * them via the Iterator */ public Iterator dirIterator(StorageDirType dirType) { - return new DirIterator(dirType); + return dirIterator(dirType, true); + } + + /** + * Return all entries in storageDirs, potentially excluding shared dirs. + * @param includeShared whether or not to include shared dirs. + * @return an iterator over the configured storage dirs. + */ + public Iterator dirIterator(boolean includeShared) { + return dirIterator(null, includeShared); + } + + /** + * @param dirType all entries will be of this type of dir + * @param includeShared true to include any shared directories, + * false otherwise + * @return an iterator over the configured storage dirs. + */ + public Iterator dirIterator(StorageDirType dirType, + boolean includeShared) { + return new DirIterator(dirType, includeShared); } public Iterable dirIterable(final StorageDirType dirType) { @@ -228,7 +254,9 @@ public abstract class Storage extends St @InterfaceAudience.Private public static class StorageDirectory implements FormatConfirmable { final File root; // root directory - final boolean useLock; // flag to enable storage lock + // whether or not this dir is shared between two separate NNs for HA, or + // between multiple block pools in the case of federation. + final boolean isShared; final StorageDirType dirType; // storage dir type FileLock lock; // storage lock @@ -236,11 +264,11 @@ public abstract class Storage extends St public StorageDirectory(File dir) { // default dirType is null - this(dir, null, true); + this(dir, null, false); } public StorageDirectory(File dir, StorageDirType dirType) { - this(dir, dirType, true); + this(dir, dirType, false); } public void setStorageUuid(String storageUuid) { @@ -255,14 +283,14 @@ public abstract class Storage extends St * Constructor * @param dir directory corresponding to the storage * @param dirType storage directory type - * @param useLock true - enables locking on the storage directory and false - * disables locking + * @param isShared whether or not this dir is shared between two NNs. true + * disables locking on the storage directory, false enables locking */ - public StorageDirectory(File dir, StorageDirType dirType, boolean useLock) { + public StorageDirectory(File dir, StorageDirType dirType, boolean isShared) { this.root = dir; this.lock = null; this.dirType = dirType; - this.useLock = useLock; + this.isShared = isShared; } /** @@ -616,6 +644,10 @@ public abstract class Storage extends St return true; } + + public boolean isShared() { + return isShared; + } /** @@ -630,7 +662,7 @@ public abstract class Storage extends St * @throws IOException if locking fails */ public void lock() throws IOException { - if (!useLock) { + if (isShared()) { LOG.info("Locking is disabled"); return; } @@ -901,32 +933,20 @@ public abstract class Storage extends St } /** - * Read properties from the VERSION file in the given storage directory. - */ - public void readProperties(StorageDirectory sd) throws IOException { - Properties props = readPropertiesFile(sd.getVersionFile()); - setFieldsFromProperties(props, sd); - } - - /** - * Read properties from the the previous/VERSION file in the given storage directory. - */ - public void readPreviousVersionProperties(StorageDirectory sd) - throws IOException { - Properties props = readPropertiesFile(sd.getPreviousVersionFile()); - setFieldsFromProperties(props, sd); - } - - /** * Write properties to the VERSION file in the given storage directory. */ public void writeProperties(StorageDirectory sd) throws IOException { writeProperties(sd.getVersionFile(), sd); } - + public void writeProperties(File to, StorageDirectory sd) throws IOException { Properties props = new Properties(); setPropertiesFromFields(props, sd); + writeProperties(to, sd, props); + } + + public static void writeProperties(File to, StorageDirectory sd, + Properties props) throws IOException { RandomAccessFile file = new RandomAccessFile(to, "rws"); FileOutputStream out = null; try { @@ -953,23 +973,6 @@ public abstract class Storage extends St file.close(); } } - - public static Properties readPropertiesFile(File from) throws IOException { - RandomAccessFile file = new RandomAccessFile(from, "rws"); - FileInputStream in = null; - Properties props = new Properties(); - try { - in = new FileInputStream(file.getFD()); - file.seek(0); - props.load(in); - } finally { - if (in != null) { - in.close(); - } - file.close(); - } - return props; - } public static void rename(File from, File to) throws IOException { if (!from.renameTo(to)) Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java Mon Mar 24 21:18:52 2014 @@ -17,7 +17,10 @@ */ package org.apache.hadoop.hdfs.server.common; +import java.io.File; +import java.io.FileInputStream; import java.io.IOException; +import java.io.RandomAccessFile; import java.util.Map; import java.util.Properties; import java.util.SortedSet; @@ -208,4 +211,46 @@ public class StorageInfo { } return property; } + + public static int getNsIdFromColonSeparatedString(String in) { + return Integer.parseInt(in.split(":")[1]); + } + + public static String getClusterIdFromColonSeparatedString(String in) { + return in.split(":")[3]; + } + + /** + * Read properties from the VERSION file in the given storage directory. + */ + public void readProperties(StorageDirectory sd) throws IOException { + Properties props = readPropertiesFile(sd.getVersionFile()); + setFieldsFromProperties(props, sd); + } + + /** + * Read properties from the the previous/VERSION file in the given storage directory. + */ + public void readPreviousVersionProperties(StorageDirectory sd) + throws IOException { + Properties props = readPropertiesFile(sd.getPreviousVersionFile()); + setFieldsFromProperties(props, sd); + } + + public static Properties readPropertiesFile(File from) throws IOException { + RandomAccessFile file = new RandomAccessFile(from, "rws"); + FileInputStream in = null; + Properties props = new Properties(); + try { + in = new FileInputStream(file.getFD()); + file.seek(0); + props.load(in); + } finally { + if (in != null) { + in.close(); + } + file.close(); + } + return props; + } } Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java Mon Mar 24 21:18:52 2014 @@ -111,7 +111,7 @@ public class BlockPoolSliceStorage exten dataDirs.size()); for (Iterator it = dataDirs.iterator(); it.hasNext();) { File dataDir = it.next(); - StorageDirectory sd = new StorageDirectory(dataDir, null, false); + StorageDirectory sd = new StorageDirectory(dataDir, null, true); StorageState curState; try { curState = sd.analyzeStorage(startOpt, this); Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java Mon Mar 24 21:18:52 2014 @@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.na import java.io.IOException; import java.util.Collection; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.JournalInfo; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; @@ -103,4 +105,35 @@ class BackupJournalManager implements Jo public void discardSegments(long startTxId) throws IOException { throw new UnsupportedOperationException(); } + + @Override + public void doPreUpgrade() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void doUpgrade(Storage storage) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void doFinalize() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, + int targetLayoutVersion) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void doRollback() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public long getJournalCTime() throws IOException { + throw new UnsupportedOperationException(); + } } Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Mon Mar 24 21:18:52 2014 @@ -415,7 +415,8 @@ public class BackupNode extends NameNode return DFSUtil.getBackupNameServiceId(conf); } - protected HAState createHAState() { + @Override + protected HAState createHAState(StartupOption startOpt) { return new BackupState(); } Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1581068&r1=1581067&r2=1581068&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Mon Mar 24 21:18:52 2014 @@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.protocol.H import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddBlockOp; @@ -72,6 +73,7 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RollingUpgradeOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetAclOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op; @@ -83,7 +85,6 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RollingUpgradeOp; import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; @@ -256,10 +257,12 @@ public class FSEditLog implements LogsPu if (u.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) { StorageDirectory sd = storage.getStorageDirectory(u); if (sd != null) { - journalSet.add(new FileJournalManager(conf, sd, storage), required); + journalSet.add(new FileJournalManager(conf, sd, storage), + required, sharedEditsDirs.contains(u)); } } else { - journalSet.add(createJournal(u), required); + journalSet.add(createJournal(u), required, + sharedEditsDirs.contains(u)); } } @@ -1330,7 +1333,59 @@ public class FSEditLog implements LogsPu // TODO: are we sure this is OK? } } - + + public long getSharedLogCTime() throws IOException { + for (JournalAndStream jas : journalSet.getAllJournalStreams()) { + if (jas.isShared()) { + return jas.getManager().getJournalCTime(); + } + } + throw new IOException("No shared log found."); + } + + public synchronized void doPreUpgradeOfSharedLog() throws IOException { + for (JournalAndStream jas : journalSet.getAllJournalStreams()) { + if (jas.isShared()) { + jas.getManager().doPreUpgrade(); + } + } + } + + public synchronized void doUpgradeOfSharedLog() throws IOException { + for (JournalAndStream jas : journalSet.getAllJournalStreams()) { + if (jas.isShared()) { + jas.getManager().doUpgrade(storage); + } + } + } + + public synchronized void doFinalizeOfSharedLog() throws IOException { + for (JournalAndStream jas : journalSet.getAllJournalStreams()) { + if (jas.isShared()) { + jas.getManager().doFinalize(); + } + } + } + + public synchronized boolean canRollBackSharedLog(Storage prevStorage, + int targetLayoutVersion) throws IOException { + for (JournalAndStream jas : journalSet.getAllJournalStreams()) { + if (jas.isShared()) { + return jas.getManager().canRollBack(storage, prevStorage, + targetLayoutVersion); + } + } + throw new IOException("No shared log found."); + } + + public synchronized void doRollback() throws IOException { + for (JournalAndStream jas : journalSet.getAllJournalStreams()) { + if (jas.isShared()) { + jas.getManager().doRollback(); + } + } + } + public synchronized void discardSegments(long markerTxid) throws IOException { for (JournalAndStream jas : journalSet.getAllJournalStreams()) { @@ -1469,4 +1524,5 @@ public class FSEditLog implements LogsPu + uri, e); } } + }