Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id A7FCE9C40 for ; Mon, 2 Apr 2012 07:29:32 +0000 (UTC) Received: (qmail 94666 invoked by uid 500); 2 Apr 2012 07:29:32 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 94616 invoked by uid 500); 2 Apr 2012 07:29:32 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 94604 invoked by uid 99); 2 Apr 2012 07:29:31 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 02 Apr 2012 07:29:31 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 02 Apr 2012 07:29:29 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id B32392388BBD; Mon, 2 Apr 2012 07:28:54 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1308260 [4/4] - in /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/main/sbin/ hadoop-hdfs/ hadoop-hdfs/src/main/bin/ hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/ hadoop-hdfs/src/main/java/ hadoop... Date: Mon, 02 Apr 2012 07:28:49 -0000 To: hdfs-commits@hadoop.apache.org From: todd@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120402072854.B32392388BBD@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java?rev=1308260&r1=1308259&r2=1308260&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java (original) +++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java Mon Apr 2 07:28:42 2012 @@ -183,7 +183,7 @@ public class TestDeleteBlockPool { Assert.assertEquals(1, dn1.getAllBpOs().length); DFSAdmin admin = new DFSAdmin(nn1Conf); - String dn1Address = dn1.getSelfAddr().getHostName()+":"+dn1.getIpcPort(); + String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort(); String[] args = { "-deleteBlockPool", dn1Address, bpid2 }; int ret = admin.run(args); Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1308260&r1=1308259&r2=1308260&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original) +++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Mon Apr 2 07:28:42 2012 @@ -136,7 +136,7 @@ public class TestDiskError { DataNode datanode = cluster.getDataNodes().get(sndNode); // replicate the block to the second datanode - InetSocketAddress target = datanode.getSelfAddr(); + InetSocketAddress target = datanode.getXferAddress(); Socket s = new Socket(target.getAddress(), target.getPort()); // write the header. DataOutputStream out = new DataOutputStream(s.getOutputStream()); Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java?rev=1308260&r1=1308259&r2=1308260&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java (original) +++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java Mon Apr 2 07:28:42 2012 @@ -348,7 +348,7 @@ public class TestInterDatanodeProtocol { final InetSocketAddress addr = NetUtils.getConnectAddress(server); DatanodeID fakeDnId = new DatanodeID( - "localhost:" + addr.getPort(), "fake-storage", 0, addr.getPort()); + "localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort()); DatanodeInfo dInfo = new DatanodeInfo(fakeDnId); InterDatanodeProtocol proxy = null; Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1308260&r1=1308259&r2=1308260&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original) +++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Mon Apr 2 07:28:42 2012 @@ -766,28 +766,33 @@ public class NNThroughputBenchmark { long[] blockReportList; /** - * Get data-node in the form - * : - * where port is a 6 digit integer. + * Return a a 6 digit integer port. * This is necessary in order to provide lexocographic ordering. * Host names are all the same, the ordering goes by port numbers. */ - private static String getNodeName(int port) throws IOException { - String machineName = DNS.getDefaultHost("default", "default"); - String sPort = String.valueOf(100000 + port); - if(sPort.length() > 6) - throw new IOException("Too many data-nodes."); - return machineName + ":" + sPort; + private static int getNodePort(int num) throws IOException { + int port = 100000 + num; + if (String.valueOf(port).length() > 6) { + throw new IOException("Too many data-nodes"); + } + return port; } TinyDatanode(int dnIdx, int blockCapacity) throws IOException { - dnRegistration = new DatanodeRegistration(getNodeName(dnIdx)); + String hostName = DNS.getDefaultHost("default", "default"); + dnRegistration = new DatanodeRegistration(hostName); + dnRegistration.setXferPort(getNodePort(dnIdx)); + dnRegistration.setHostName(hostName); this.blocks = new ArrayList(blockCapacity); this.nrBlocks = 0; } - String getName() { - return dnRegistration.getName(); + public String toString() { + return dnRegistration.toString(); + } + + String getXferAddr() { + return dnRegistration.getXferAddr(); } void register() throws IOException { @@ -850,8 +855,8 @@ public class NNThroughputBenchmark { return blockReportList; } - public int compareTo(String name) { - return getName().compareTo(name); + public int compareTo(String xferAddr) { + return getXferAddr().compareTo(xferAddr); } /** @@ -889,10 +894,12 @@ public class NNThroughputBenchmark { for(int t = 0; t < blockTargets.length; t++) { DatanodeInfo dnInfo = blockTargets[t]; DatanodeRegistration receivedDNReg; - receivedDNReg = new DatanodeRegistration(dnInfo.getName()); + receivedDNReg = new DatanodeRegistration(dnInfo.getIpAddr()); receivedDNReg.setStorageInfo( new DataStorage(nsInfo, dnInfo.getStorageID())); + receivedDNReg.setXferPort(dnInfo.getXferPort()); receivedDNReg.setInfoPort(dnInfo.getInfoPort()); + receivedDNReg.setIpcPort(dnInfo.getIpcPort()); ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo( blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, @@ -977,10 +984,10 @@ public class NNThroughputBenchmark { for(int idx=0; idx < nrDatanodes; idx++) { datanodes[idx] = new TinyDatanode(idx, blocksPerReport); datanodes[idx].register(); - assert datanodes[idx].getName().compareTo(prevDNName) > 0 + assert datanodes[idx].getXferAddr().compareTo(prevDNName) > 0 : "Data-nodes must be sorted lexicographically."; datanodes[idx].sendHeartbeat(); - prevDNName = datanodes[idx].getName(); + prevDNName = datanodes[idx].getXferAddr(); } // create files @@ -1010,7 +1017,7 @@ public class NNThroughputBenchmark { LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName, prevBlock, null); prevBlock = loc.getBlock(); for(DatanodeInfo dnInfo : loc.getLocations()) { - int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getName()); + int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr()); datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock()); ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo( loc.getBlock().getLocalBlock(), @@ -1165,9 +1172,9 @@ public class NNThroughputBenchmark { for(int i=0; i < nodesToDecommission; i++) { TinyDatanode dn = blockReportObject.datanodes[nrDatanodes-1-i]; numDecommissionedBlocks += dn.nrBlocks; - excludeFile.write(dn.getName().getBytes()); + excludeFile.write(dn.getXferAddr().getBytes()); excludeFile.write('\n'); - LOG.info("Datanode " + dn.getName() + " is decommissioned."); + LOG.info("Datanode " + dn + " is decommissioned."); } excludeFile.close(); nameNodeProto.refreshNodes(); Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java?rev=1308260&r1=1308259&r2=1308260&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java (original) +++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java Mon Apr 2 07:28:42 2012 @@ -156,7 +156,7 @@ public class TestDecommissioningStatus { throws IOException { DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE); - String nodename = info[nodeIndex].getName(); + String nodename = info[nodeIndex].getXferAddr(); System.out.println("Decommissioning node: " + nodename); // write nodename into the exclude file. Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1308260&r1=1308259&r2=1308260&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original) +++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Mon Apr 2 07:28:42 2012 @@ -93,6 +93,15 @@ public class TestNameNodeMXBean { // get attribute alivenodeinfo String alivenodeinfo = (String) (mbs.getAttribute(mxbeanName, "LiveNodes")); + Map> liveNodes = + (Map>) JSON.parse(alivenodeinfo); + assertTrue(liveNodes.size() > 0); + for (Map liveNode : liveNodes.values()) { + assertTrue(liveNode.containsKey("nonDfsUsedSpace")); + assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) > 0); + assertTrue(liveNode.containsKey("capacity")); + assertTrue(((Long)liveNode.get("capacity")) > 0); + } Assert.assertEquals(fsn.getLiveNodes(), alivenodeinfo); // get attribute deadnodeinfo String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName, Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java?rev=1308260&r1=1308259&r2=1308260&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java (original) +++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java Mon Apr 2 07:28:42 2012 @@ -167,7 +167,7 @@ public class TestStandbyIsHot { // Stop the DN. DataNode dn = cluster.getDataNodes().get(0); - String dnName = dn.getDatanodeId().getName(); + String dnName = dn.getDatanodeId().getXferAddr(); DataNodeProperties dnProps = cluster.stopDataNode(0); // Make sure both NNs register it as dead. Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java?rev=1308260&r1=1308259&r2=1308260&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java (original) +++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java Mon Apr 2 07:28:42 2012 @@ -65,12 +65,11 @@ public class TestOfflineEditsViewer { * * These are the opcodes that are not used anymore, some * are marked deprecated, we need to include them here to make - * sure we exclude them when checking for completness of testing, + * sure we exclude them when checking for completeness of testing, * that's why the "deprecation" warnings are suppressed. */ @SuppressWarnings("deprecation") private static void initializeObsoleteOpCodes() { - // these are obsolete obsoleteOpCodes.put(FSEditLogOpCodes.OP_DATANODE_ADD, true); obsoleteOpCodes.put(FSEditLogOpCodes.OP_DATANODE_REMOVE, true); obsoleteOpCodes.put(FSEditLogOpCodes.OP_SET_NS_QUOTA, true); Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java?rev=1308260&r1=1308259&r2=1308260&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java (original) +++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java Mon Apr 2 07:28:42 2012 @@ -30,16 +30,16 @@ import org.apache.hadoop.hdfs.server.blo public class TestNetworkTopology extends TestCase { private final static NetworkTopology cluster = new NetworkTopology(); private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] { - new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("h4:5020"), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("h5:5020"), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("h6:5020"), "/d2/r3"), - new DatanodeDescriptor(new DatanodeID("h7:5020"), "/d2/r3") + new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"), + new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"), + new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"), + new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"), + new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d1/r2"), + new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3"), + new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r3") }; private final static DatanodeDescriptor NODE = - new DatanodeDescriptor(new DatanodeID("h8:5020"), "/d2/r4"); + new DatanodeDescriptor(new DatanodeID("h8", 5020), "/d2/r4"); static { for(int i=0; i SubstringComparator - setSpaceQuota: java.io.FileNotFoundException: Directory does not exist: /test1 + setSpaceQuota: Directory does not exist: /test1 @@ -15486,7 +15486,7 @@ SubstringComparator - clrQuota: java.io.FileNotFoundException: Directory does not exist: /test1 + clrQuota: Directory does not exist: /test1 @@ -15506,7 +15506,7 @@ RegexpComparator - put: org.apache.hadoop.hdfs.protocol.DSQuotaExceededException: The DiskSpace quota of /dir1 is exceeded: quota=1.0k diskspace consumed=[0-9.]+[kmg]* + put: The DiskSpace quota of /dir1 is exceeded: quota=1.0k diskspace consumed=[0-9.]+[kmg]* @@ -15526,7 +15526,7 @@ SubstringComparator - mkdir: org.apache.hadoop.hdfs.protocol.NSQuotaExceededException: The NameSpace quota (directories and files) of directory /dir1 is exceeded: quota=1 file count=2 + mkdir: The NameSpace quota (directories and files) of directory /dir1 is exceeded: quota=1 file count=2 @@ -15741,6 +15741,10 @@ RegexpComparator + Hostname: [-.a-zA-z0-9\.]+ + + + RegexpComparator Decommission Status : [a-zA-Z]+ @@ -15792,7 +15796,7 @@ TokenComparator - saveNamespace: java.io.IOException: Safe mode should be turned ON in order to create namespace image. + saveNamespace: Safe mode should be turned ON in order to create namespace image. @@ -15838,6 +15842,10 @@ RegexpComparator + Hostname: [-.a-zA-z0-9\.]+ + + + RegexpComparator Decommission Status : [a-zA-Z]+ @@ -16168,7 +16176,7 @@ SubstringComparator - setQuota: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set quota on /test. Name node is in safe mode. + setQuota: Cannot set quota on /test. Name node is in safe mode. @@ -16187,7 +16195,7 @@ SubstringComparator - clrQuota: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set quota on /test. Name node is in safe mode. + clrQuota: Cannot set quota on /test. Name node is in safe mode. @@ -16207,7 +16215,7 @@ SubstringComparator - setSpaceQuota: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set quota on /test. Name node is in safe mode. + setSpaceQuota: Cannot set quota on /test. Name node is in safe mode. @@ -16226,7 +16234,7 @@ SubstringComparator - clrSpaceQuota: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set quota on /test. Name node is in safe mode. + clrSpaceQuota: Cannot set quota on /test. Name node is in safe mode.