Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id E49659096 for ; Wed, 11 Apr 2012 22:51:43 +0000 (UTC) Received: (qmail 21203 invoked by uid 500); 11 Apr 2012 22:51:43 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 21166 invoked by uid 500); 11 Apr 2012 22:51:43 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 21156 invoked by uid 99); 11 Apr 2012 22:51:43 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 11 Apr 2012 22:51:43 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED,T_FILL_THIS_FORM_SHORT X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 11 Apr 2012 22:51:38 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 7F5122388B3A; Wed, 11 Apr 2012 22:51:18 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1325052 [2/3] - in /hadoop/common/branches/HDFS-3092/hadoop-hdfs-project: ./ hadoop-hdfs/ hadoop-hdfs/src/contrib/ hadoop-hdfs/src/contrib/fuse-dfs/ hadoop-hdfs/src/contrib/fuse-dfs/src/ hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main/java... Date: Wed, 11 Apr 2012 22:51:15 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120411225118.7F5122388B3A@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java?rev=1325052&r1=1325051&r2=1325052&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java (original) +++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java Wed Apr 11 22:51:10 2012 @@ -33,10 +33,14 @@ import org.apache.hadoop.HadoopIllegalAr import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ha.HAServiceProtocol; +import org.apache.hadoop.ha.HAServiceStatus; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; +import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.NameNodeProxies; -import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; @@ -47,8 +51,10 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.tools.NNHAServiceTarget; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.MD5Hash; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Tool; @@ -65,7 +71,7 @@ import com.google.common.collect.Sets; */ @InterfaceAudience.Private public class BootstrapStandby implements Tool, Configurable { - private static final Log LOG = LogFactory.getLog(BootstrapStandby.class); + private static final Log LOG = LogFactory.getLog(BootstrapStandby.class); private String nsId; private String nnId; private String otherNNId; @@ -79,7 +85,13 @@ public class BootstrapStandby implements private boolean force = false; private boolean interactive = true; - + + // Exit/return codes. + static final int ERR_CODE_FAILED_CONNECT = 2; + static final int ERR_CODE_INVALID_VERSION = 3; + static final int ERR_CODE_OTHER_NN_NOT_ACTIVE = 4; + static final int ERR_CODE_ALREADY_FORMATTED = 5; + static final int ERR_CODE_LOGS_UNAVAILABLE = 6; public int run(String[] args) throws Exception { SecurityUtil.initKrb5CipherSuites(); @@ -121,24 +133,43 @@ public class BootstrapStandby implements System.err.println("Usage: " + this.getClass().getSimpleName() + "[-force] [-nonInteractive]"); } + + private NamenodeProtocol createNNProtocolProxy() + throws IOException { + return NameNodeProxies.createNonHAProxy(getConf(), + otherIpcAddr, NamenodeProtocol.class, + UserGroupInformation.getLoginUser(), true) + .getProxy(); + } + + private HAServiceProtocol createHAProtocolProxy() + throws IOException { + return new NNHAServiceTarget(new HdfsConfiguration(conf), + nsId, otherNNId).getProxy(conf, 15000); + } private int doRun() throws IOException { - ProxyAndInfo proxyAndInfo = NameNodeProxies.createNonHAProxy(getConf(), - otherIpcAddr, NamenodeProtocol.class, - UserGroupInformation.getLoginUser(), true); - NamenodeProtocol proxy = proxyAndInfo.getProxy(); + + NamenodeProtocol proxy = createNNProtocolProxy(); NamespaceInfo nsInfo; try { nsInfo = proxy.versionRequest(); - checkLayoutVersion(nsInfo); } catch (IOException ioe) { LOG.fatal("Unable to fetch namespace information from active NN at " + otherIpcAddr + ": " + ioe.getMessage()); if (LOG.isDebugEnabled()) { LOG.debug("Full exception trace", ioe); } - return 1; + return ERR_CODE_FAILED_CONNECT; } + + if (!checkLayoutVersion(nsInfo)) { + LOG.fatal("Layout version on remote node (" + + nsInfo.getLayoutVersion() + ") does not match " + + "this node's layout version (" + HdfsConstants.LAYOUT_VERSION + ")"); + return ERR_CODE_INVALID_VERSION; + } + System.out.println( "=====================================================\n" + @@ -153,12 +184,35 @@ public class BootstrapStandby implements " Layout version: " + nsInfo.getLayoutVersion() + "\n" + "====================================================="); + // Ensure the other NN is active - we can't force it to roll edit logs + // below if it's not active. + if (!isOtherNNActive()) { + String err = "NameNode " + nsId + "." + nnId + " at " + otherIpcAddr + + " is not currently in ACTIVE state."; + if (!interactive) { + LOG.fatal(err + " Please transition it to " + + "active before attempting to bootstrap a standby node."); + return ERR_CODE_OTHER_NN_NOT_ACTIVE; + } + + System.err.println(err); + if (ToolRunner.confirmPrompt( + "Do you want to automatically transition it to active now?")) { + transitionOtherNNActive(); + } else { + LOG.fatal("User aborted. Exiting without bootstrapping standby."); + return ERR_CODE_OTHER_NN_NOT_ACTIVE; + } + } + + + // Check with the user before blowing away data. if (!NameNode.confirmFormat( Sets.union(Sets.newHashSet(dirsToFormat), Sets.newHashSet(editUrisToFormat)), force, interactive)) { - return 1; + return ERR_CODE_ALREADY_FORMATTED; } // Force the active to roll its log @@ -180,7 +234,7 @@ public class BootstrapStandby implements // Ensure that we have enough edits already in the shared directory to // start up from the last checkpoint on the active. if (!checkLogsAvailableForRead(image, imageTxId, rollTxId)) { - return 1; + return ERR_CODE_LOGS_UNAVAILABLE; } image.getStorage().writeTransactionIdFileToStorage(rollTxId); @@ -193,6 +247,14 @@ public class BootstrapStandby implements return 0; } + + private void transitionOtherNNActive() + throws AccessControlException, ServiceFailedException, IOException { + LOG.info("Transitioning the running namenode to active..."); + createHAProtocolProxy().transitionToActive(); + LOG.info("Successful"); + } + private boolean checkLogsAvailableForRead(FSImage image, long imageTxId, long rollTxId) { @@ -225,12 +287,14 @@ public class BootstrapStandby implements } } - private void checkLayoutVersion(NamespaceInfo nsInfo) throws IOException { - if (nsInfo.getLayoutVersion() != HdfsConstants.LAYOUT_VERSION) { - throw new IOException("Layout version on remote node (" + - nsInfo.getLayoutVersion() + ") does not match " + - "this node's layout version (" + HdfsConstants.LAYOUT_VERSION + ")"); - } + private boolean checkLayoutVersion(NamespaceInfo nsInfo) throws IOException { + return (nsInfo.getLayoutVersion() == HdfsConstants.LAYOUT_VERSION); + } + + private boolean isOtherNNActive() + throws AccessControlException, IOException { + HAServiceStatus status = createHAProtocolProxy().getServiceStatus(); + return status.getState() == HAServiceState.ACTIVE; } private void parseConfAndFindOtherNN() throws IOException { Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java?rev=1325052&r1=1325051&r2=1325052&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java (original) +++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java Wed Apr 11 22:51:10 2012 @@ -25,14 +25,6 @@ package org.apache.hadoop.hdfs.server.pr * each datanode. */ -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableFactories; -import org.apache.hadoop.io.WritableFactory; - /** * Balancer bandwidth command instructs each datanode to change its value for * the max amount of network bandwidth it may use during the block balancing @@ -71,35 +63,4 @@ public class BalancerBandwidthCommand ex public long getBalancerBandwidthValue() { return this.bandwidth; } - - // /////////////////////////////////////////////// - // Writable - // /////////////////////////////////////////////// - static { // register a ctor - WritableFactories.setFactory(BalancerBandwidthCommand.class, new WritableFactory() { - public Writable newInstance() { - return new BalancerBandwidthCommand(); - } - }); - } - - /** - * Writes the bandwidth payload to the Balancer Bandwidth Command packet. - * @param out DataOutput stream used for writing commands to the datanode. - * @throws IOException - */ - public void write(DataOutput out) throws IOException { - super.write(out); - out.writeLong(this.bandwidth); - } - - /** - * Reads the bandwidth payload from the Balancer Bandwidth Command packet. - * @param in DataInput stream used for reading commands to the datanode. - * @throws IOException - */ - public void readFields(DataInput in) throws IOException { - super.readFields(in); - this.bandwidth = in.readLong(); - } } Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java?rev=1325052&r1=1325051&r2=1325052&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java (original) +++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java Wed Apr 11 22:51:10 2012 @@ -17,9 +17,6 @@ */ package org.apache.hadoop.hdfs.server.protocol; -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; @@ -27,11 +24,6 @@ import org.apache.hadoop.classification. import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableFactories; -import org.apache.hadoop.io.WritableFactory; - /**************************************************** * A BlockCommand is an instruction to a datanode @@ -58,8 +50,6 @@ public class BlockCommand extends Datano Block blocks[]; DatanodeInfo targets[][]; - public BlockCommand() {} - /** * Create BlockCommand for transferring blocks to another datanode * @param blocktargetlist blocks to be transferred @@ -110,50 +100,4 @@ public class BlockCommand extends Datano public DatanodeInfo[][] getTargets() { return targets; } - - /////////////////////////////////////////// - // Writable - /////////////////////////////////////////// - static { // register a ctor - WritableFactories.setFactory - (BlockCommand.class, - new WritableFactory() { - public Writable newInstance() { return new BlockCommand(); } - }); - } - - public void write(DataOutput out) throws IOException { - super.write(out); - Text.writeString(out, poolId); - out.writeInt(blocks.length); - for (int i = 0; i < blocks.length; i++) { - blocks[i].write(out); - } - out.writeInt(targets.length); - for (int i = 0; i < targets.length; i++) { - out.writeInt(targets[i].length); - for (int j = 0; j < targets[i].length; j++) { - targets[i][j].write(out); - } - } - } - - public void readFields(DataInput in) throws IOException { - super.readFields(in); - this.poolId = Text.readString(in); - this.blocks = new Block[in.readInt()]; - for (int i = 0; i < blocks.length; i++) { - blocks[i] = new Block(); - blocks[i].readFields(in); - } - - this.targets = new DatanodeInfo[in.readInt()][]; - for (int i = 0; i < targets.length; i++) { - this.targets[i] = new DatanodeInfo[in.readInt()]; - for (int j = 0; j < targets[i].length; j++) { - targets[i][j] = new DatanodeInfo(); - targets[i][j].readFields(in); - } - } - } } Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java?rev=1325052&r1=1325051&r2=1325052&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java (original) +++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java Wed Apr 11 22:51:10 2012 @@ -17,9 +17,6 @@ */ package org.apache.hadoop.hdfs.server.protocol; -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; import java.util.Collection; import java.util.ArrayList; @@ -28,9 +25,6 @@ import org.apache.hadoop.classification. import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableFactories; -import org.apache.hadoop.io.WritableFactory; import com.google.common.base.Joiner; @@ -62,14 +56,6 @@ public class BlockRecoveryCommand extend private long newGenerationStamp; /** - * Create empty RecoveringBlock. - */ - public RecoveringBlock() { - super(); - newGenerationStamp = -1L; - } - - /** * Create RecoveringBlock. */ public RecoveringBlock(ExtendedBlock b, DatanodeInfo[] locs, long newGS) { @@ -84,27 +70,6 @@ public class BlockRecoveryCommand extend public long getNewGenerationStamp() { return newGenerationStamp; } - - /////////////////////////////////////////// - // Writable - /////////////////////////////////////////// - static { // register a ctor - WritableFactories.setFactory - (RecoveringBlock.class, - new WritableFactory() { - public Writable newInstance() { return new RecoveringBlock(); } - }); - } - - public void write(DataOutput out) throws IOException { - super.write(out); - out.writeLong(newGenerationStamp); - } - - public void readFields(DataInput in) throws IOException { - super.readFields(in); - newGenerationStamp = in.readLong(); - } } /** @@ -149,34 +114,4 @@ public class BlockRecoveryCommand extend sb.append("\n)"); return sb.toString(); } - - /////////////////////////////////////////// - // Writable - /////////////////////////////////////////// - static { // register a ctor - WritableFactories.setFactory - (BlockRecoveryCommand.class, - new WritableFactory() { - public Writable newInstance() { return new BlockRecoveryCommand(); } - }); - } - - public void write(DataOutput out) throws IOException { - super.write(out); - out.writeInt(recoveringBlocks.size()); - for(RecoveringBlock block : recoveringBlocks) { - block.write(out); - } - } - - public void readFields(DataInput in) throws IOException { - super.readFields(in); - int numBlocks = in.readInt(); - recoveringBlocks = new ArrayList(numBlocks); - for(int i = 0; i < numBlocks; i++) { - RecoveringBlock b = new RecoveringBlock(); - b.readFields(in); - add(b); - } - } } Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java?rev=1325052&r1=1325051&r2=1325052&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java (original) +++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java Wed Apr 11 22:51:10 2012 @@ -17,16 +17,9 @@ */ package org.apache.hadoop.hdfs.server.protocol; -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableUtils; /** A class to implement an array of BlockLocations * It provide efficient customized serialization/deserialization methods @@ -34,23 +27,17 @@ import org.apache.hadoop.io.WritableUtil */ @InterfaceAudience.Private @InterfaceStability.Evolving -public class BlocksWithLocations implements Writable { +public class BlocksWithLocations { /** * A class to keep track of a block and its locations */ @InterfaceAudience.Private @InterfaceStability.Evolving - public static class BlockWithLocations implements Writable { + public static class BlockWithLocations { Block block; String datanodeIDs[]; - /** default constructor */ - public BlockWithLocations() { - block = new Block(); - datanodeIDs = null; - } - /** constructor */ public BlockWithLocations(Block b, String[] datanodes) { block = b; @@ -66,33 +53,10 @@ public class BlocksWithLocations impleme public String[] getDatanodes() { return datanodeIDs; } - - /** deserialization method */ - public void readFields(DataInput in) throws IOException { - block.readFields(in); - int len = WritableUtils.readVInt(in); // variable length integer - datanodeIDs = new String[len]; - for(int i=0; i blockList = new ArrayList(MAX_BLOCKS); ArrayList blockInfoList = new ArrayList(); int headIndex; Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java?rev=1325052&r1=1325051&r2=1325052&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java (original) +++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java Wed Apr 11 22:51:10 2012 @@ -28,6 +28,7 @@ import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; @@ -80,8 +81,8 @@ public class TestCorruptReplicaInfo exte block_ids.add((long)i); } - DatanodeDescriptor dn1 = new DatanodeDescriptor(); - DatanodeDescriptor dn2 = new DatanodeDescriptor(); + DatanodeDescriptor dn1 = DFSTestUtil.getLocalDatanodeDescriptor(); + DatanodeDescriptor dn2 = DFSTestUtil.getLocalDatanodeDescriptor(); crm.addToCorruptReplicasMap(getBlock(0), dn1, "TEST"); assertEquals("Number of corrupt blocks not returning correctly", Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java?rev=1325052&r1=1325051&r2=1325052&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java (original) +++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java Wed Apr 11 22:51:10 2012 @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.bl import java.util.ArrayList; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.GenerationStamp; @@ -36,7 +37,7 @@ public class TestDatanodeDescriptor exte final int REMAINING_BLOCKS = 2; final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS; - DatanodeDescriptor dd = new DatanodeDescriptor(); + DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor(); ArrayList blockList = new ArrayList(MAX_BLOCKS); for (int i=0; i dirsToFormat = FSNamesystem.getNamespaceDirs(config); @@ -59,33 +66,41 @@ public class TestClusterId { LOG.info("successfully formated : sd="+sd.getCurrentDir() + ";cid="+cid); return cid; } - + @Before public void setUp() throws IOException { + System.setSecurityManager(new NoExitSecurityManager()); + String baseDir = System.getProperty("test.build.data", "build/test/data"); - hdfsDir = new File(baseDir, "dfs"); - if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { - throw new IOException("Could not delete test directory '" + - hdfsDir + "'"); + hdfsDir = new File(baseDir, "dfs/name"); + if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) { + throw new IOException("Could not delete test directory '" + hdfsDir + "'"); } LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath()); + + // as some tests might change these values we reset them to defaults before + // every test + StartupOption.FORMAT.setForceFormat(false); + StartupOption.FORMAT.setInteractiveFormat(true); + + config = new Configuration(); + config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath()); } - + @After public void tearDown() throws IOException { - if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { - throw new IOException("Could not tearDown test directory '" + - hdfsDir + "'"); + System.setSecurityManager(null); + + if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) { + throw new IOException("Could not tearDown test directory '" + hdfsDir + + "'"); } } - + @Test public void testFormatClusterIdOption() throws IOException { - Configuration config = new Configuration(); - config.set(DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name").getPath()); - // 1. should format without cluster id //StartupOption.FORMAT.setClusterId(""); NameNode.format(config); @@ -107,4 +122,356 @@ public class TestClusterId { String newCid = getClusterId(config); assertFalse("ClusterId should not be the same", newCid.equals(cid)); } -} + + /** + * Test namenode format with -format option. Format should succeed. + * + * @throws IOException + */ + @Test + public void testFormat() throws IOException { + String[] argv = { "-format" }; + try { + NameNode.createNameNode(argv, config); + fail("createNameNode() did not call System.exit()"); + } catch (ExitException e) { + assertEquals("Format should have succeeded", 0, e.status); + } + + String cid = getClusterId(config); + assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals(""))); + } + + /** + * Test namenode format with -format option when an empty name directory + * exists. Format should succeed. + * + * @throws IOException + */ + @Test + public void testFormatWithEmptyDir() throws IOException { + + if (!hdfsDir.mkdirs()) { + fail("Failed to create dir " + hdfsDir.getPath()); + } + + String[] argv = { "-format" }; + try { + NameNode.createNameNode(argv, config); + fail("createNameNode() did not call System.exit()"); + } catch (ExitException e) { + assertEquals("Format should have succeeded", 0, e.status); + } + + String cid = getClusterId(config); + assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals(""))); + } + + /** + * Test namenode format with -format -force options when name directory + * exists. Format should succeed. + * + * @throws IOException + */ + @Test + public void testFormatWithForce() throws IOException { + + if (!hdfsDir.mkdirs()) { + fail("Failed to create dir " + hdfsDir.getPath()); + } + + String[] argv = { "-format", "-force" }; + try { + NameNode.createNameNode(argv, config); + fail("createNameNode() did not call System.exit()"); + } catch (ExitException e) { + assertEquals("Format should have succeeded", 0, e.status); + } + + String cid = getClusterId(config); + assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals(""))); + } + + /** + * Test namenode format with -format -force -clusterid option when name + * directory exists. Format should succeed. + * + * @throws IOException + */ + @Test + public void testFormatWithForceAndClusterId() throws IOException { + + if (!hdfsDir.mkdirs()) { + fail("Failed to create dir " + hdfsDir.getPath()); + } + + String myId = "testFormatWithForceAndClusterId"; + String[] argv = { "-format", "-force", "-clusterid", myId }; + try { + NameNode.createNameNode(argv, config); + fail("createNameNode() did not call System.exit()"); + } catch (ExitException e) { + assertEquals("Format should have succeeded", 0, e.status); + } + + String cId = getClusterId(config); + assertEquals("ClusterIds do not match", myId, cId); + } + + /** + * Test namenode format with -clusterid -force option. Format command should + * fail as no cluster id was provided. + * + * @throws IOException + */ + @Test + public void testFormatWithInvalidClusterIdOption() throws IOException { + + String[] argv = { "-format", "-clusterid", "-force" }; + PrintStream origErr = System.err; + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintStream stdErr = new PrintStream(baos); + System.setErr(stdErr); + + NameNode.createNameNode(argv, config); + + // Check if usage is printed + assertTrue(baos.toString("UTF-8").contains("Usage: java NameNode")); + System.setErr(origErr); + + // check if the version file does not exists. + File version = new File(hdfsDir, "current/VERSION"); + assertFalse("Check version should not exist", version.exists()); + } + + /** + * Test namenode format with -format -clusterid options. Format should fail + * was no clusterid was sent. + * + * @throws IOException + */ + @Test + public void testFormatWithNoClusterIdOption() throws IOException { + + String[] argv = { "-format", "-clusterid" }; + PrintStream origErr = System.err; + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintStream stdErr = new PrintStream(baos); + System.setErr(stdErr); + + NameNode.createNameNode(argv, config); + + // Check if usage is printed + assertTrue(baos.toString("UTF-8").contains("Usage: java NameNode")); + System.setErr(origErr); + + // check if the version file does not exists. + File version = new File(hdfsDir, "current/VERSION"); + assertFalse("Check version should not exist", version.exists()); + } + + /** + * Test namenode format with -format -clusterid and empty clusterid. Format + * should fail as no valid if was provided. + * + * @throws IOException + */ + @Test + public void testFormatWithEmptyClusterIdOption() throws IOException { + + String[] argv = { "-format", "-clusterid", "" }; + + PrintStream origErr = System.err; + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintStream stdErr = new PrintStream(baos); + System.setErr(stdErr); + + NameNode.createNameNode(argv, config); + + // Check if usage is printed + assertTrue(baos.toString("UTF-8").contains("Usage: java NameNode")); + System.setErr(origErr); + + // check if the version file does not exists. + File version = new File(hdfsDir, "current/VERSION"); + assertFalse("Check version should not exist", version.exists()); + } + + /** + * Test namenode format with -format -nonInteractive options when a non empty + * name directory exists. Format should not succeed. + * + * @throws IOException + */ + @Test + public void testFormatWithNonInteractive() throws IOException { + + // we check for a non empty dir, so create a child path + File data = new File(hdfsDir, "file"); + if (!data.mkdirs()) { + fail("Failed to create dir " + data.getPath()); + } + + String[] argv = { "-format", "-nonInteractive" }; + try { + NameNode.createNameNode(argv, config); + fail("createNameNode() did not call System.exit()"); + } catch (ExitException e) { + assertEquals("Format should have been aborted with exit code 1", 1, + e.status); + } + + // check if the version file does not exists. + File version = new File(hdfsDir, "current/VERSION"); + assertFalse("Check version should not exist", version.exists()); + } + + /** + * Test namenode format with -format -nonInteractive options when name + * directory does not exist. Format should succeed. + * + * @throws IOException + */ + @Test + public void testFormatWithNonInteractiveNameDirDoesNotExit() + throws IOException { + + String[] argv = { "-format", "-nonInteractive" }; + try { + NameNode.createNameNode(argv, config); + fail("createNameNode() did not call System.exit()"); + } catch (ExitException e) { + assertEquals("Format should have succeeded", 0, e.status); + } + + String cid = getClusterId(config); + assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals(""))); + } + + /** + * Test namenode format with -force -nonInteractive -force option. Format + * should succeed. + * + * @throws IOException + */ + @Test + public void testFormatWithNonInteractiveAndForce() throws IOException { + + if (!hdfsDir.mkdirs()) { + fail("Failed to create dir " + hdfsDir.getPath()); + } + + String[] argv = { "-format", "-nonInteractive", "-force" }; + try { + NameNode.createNameNode(argv, config); + fail("createNameNode() did not call System.exit()"); + } catch (ExitException e) { + assertEquals("Format should have succeeded", 0, e.status); + } + + String cid = getClusterId(config); + assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals(""))); + } + + /** + * Test namenode format with -format option when a non empty name directory + * exists. Enter Y when prompted and the format should succeed. + * + * @throws IOException + * @throws InterruptedException + */ + @Test + public void testFormatWithoutForceEnterYes() throws IOException, + InterruptedException { + + // we check for a non empty dir, so create a child path + File data = new File(hdfsDir, "file"); + if (!data.mkdirs()) { + fail("Failed to create dir " + data.getPath()); + } + + // capture the input stream + InputStream origIn = System.in; + ByteArrayInputStream bins = new ByteArrayInputStream("Y\n".getBytes()); + System.setIn(bins); + + String[] argv = { "-format" }; + + try { + NameNode.createNameNode(argv, config); + fail("createNameNode() did not call System.exit()"); + } catch (ExitException e) { + assertEquals("Format should have succeeded", 0, e.status); + } + + System.setIn(origIn); + + String cid = getClusterId(config); + assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals(""))); + } + + /** + * Test namenode format with -format option when a non empty name directory + * exists. Enter N when prompted and format should be aborted. + * + * @throws IOException + * @throws InterruptedException + */ + @Test + public void testFormatWithoutForceEnterNo() throws IOException, + InterruptedException { + + // we check for a non empty dir, so create a child path + File data = new File(hdfsDir, "file"); + if (!data.mkdirs()) { + fail("Failed to create dir " + data.getPath()); + } + + // capture the input stream + InputStream origIn = System.in; + ByteArrayInputStream bins = new ByteArrayInputStream("N\n".getBytes()); + System.setIn(bins); + + String[] argv = { "-format" }; + try { + NameNode.createNameNode(argv, config); + fail("createNameNode() did not call System.exit()"); + } catch (ExitException e) { + assertEquals("Format should not have succeeded", 1, e.status); + } + + System.setIn(origIn); + + // check if the version file does not exists. + File version = new File(hdfsDir, "current/VERSION"); + assertFalse("Check version should not exist", version.exists()); + } + + private static class ExitException extends SecurityException { + private static final long serialVersionUID = 1L; + public final int status; + + public ExitException(int status) { + super("There is no escape!"); + this.status = status; + } + } + + private static class NoExitSecurityManager extends SecurityManager { + @Override + public void checkPermission(Permission perm) { + // allow anything. + } + + @Override + public void checkPermission(Permission perm, Object context) { + // allow anything. + } + + @Override + public void checkExit(int status) { + super.checkExit(status); + throw new ExitException(status); + } + } +} \ No newline at end of file