Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 0D138DD83 for ; Wed, 19 Sep 2012 04:36:03 +0000 (UTC) Received: (qmail 90916 invoked by uid 500); 19 Sep 2012 04:36:02 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 90594 invoked by uid 500); 19 Sep 2012 04:35:58 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 90557 invoked by uid 99); 19 Sep 2012 04:35:57 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 19 Sep 2012 04:35:57 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 19 Sep 2012 04:35:49 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 55E0A2388A4A; Wed, 19 Sep 2012 04:35:04 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1387449 [2/2] - in /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ hadoop-hdfs-httpfs/src/site/apt/ hadoop-hdfs-httpfs/src/test/java/org/apache/hado... Date: Wed, 19 Sep 2012 04:35:01 -0000 To: hdfs-commits@hadoop.apache.org From: todd@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120919043504.55E0A2388A4A@eris.apache.org> Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java Wed Sep 19 04:34:55 2012 @@ -25,6 +25,7 @@ import static org.junit.Assert.fail; import java.io.FileNotFoundException; import java.io.IOException; import java.util.Random; +import java.util.concurrent.TimeoutException; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -100,7 +101,7 @@ public class TestFileStatus { } private void checkFile(FileSystem fileSys, Path name, int repl) - throws IOException { + throws IOException, InterruptedException, TimeoutException { DFSTestUtil.waitReplication(fileSys, name, (short) repl); } @@ -129,7 +130,7 @@ public class TestFileStatus { /** Test the FileStatus obtained calling getFileStatus on a file */ @Test - public void testGetFileStatusOnFile() throws IOException { + public void testGetFileStatusOnFile() throws Exception { checkFile(fs, file1, 1); // test getFileStatus on a file FileStatus status = fs.getFileStatus(file1); Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java Wed Sep 19 04:34:55 2012 @@ -17,8 +17,7 @@ */ package org.apache.hadoop.hdfs; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.Assert.*; import java.io.IOException; import java.net.InetSocketAddress; @@ -28,48 +27,178 @@ import java.util.Map; import java.util.Random; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.GenerationStamp; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.util.Time; import org.junit.Test; + /** - * This class tests if block replacement request to data nodes work correctly. + * This class tests if getblocks request works correctly. */ public class TestGetBlocks { + private static final int blockSize = 8192; + private static final String racks[] = new String[] { "/d1/r1", "/d1/r1", + "/d1/r2", "/d1/r2", "/d1/r2", "/d2/r3", "/d2/r3" }; + private static final int numDatanodes = racks.length; + + /** + * Stop the heartbeat of a datanode in the MiniDFSCluster + * + * @param cluster + * The MiniDFSCluster + * @param hostName + * The hostName of the datanode to be stopped + * @return The DataNode whose heartbeat has been stopped + */ + private DataNode stopDataNodeHeartbeat(MiniDFSCluster cluster, String hostName) { + for (DataNode dn : cluster.getDataNodes()) { + if (dn.getDatanodeId().getHostName().equals(hostName)) { + DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true); + return dn; + } + } + return null; + } + + /** + * Test if the datanodes returned by + * {@link ClientProtocol#getBlockLocations(String, long, long)} is correct + * when stale nodes checking is enabled. Also test during the scenario when 1) + * stale nodes checking is enabled, 2) a writing is going on, 3) a datanode + * becomes stale happen simultaneously + * + * @throws Exception + */ + @Test + public void testReadSelectNonStaleDatanode() throws Exception { + HdfsConfiguration conf = new HdfsConfiguration(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true); + long staleInterval = 30 * 1000 * 60; + conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, + staleInterval); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(numDatanodes).racks(racks).build(); + + cluster.waitActive(); + InetSocketAddress addr = new InetSocketAddress("localhost", + cluster.getNameNodePort()); + DFSClient client = new DFSClient(addr, conf); + List nodeInfoList = cluster.getNameNode() + .getNamesystem().getBlockManager().getDatanodeManager() + .getDatanodeListForReport(DatanodeReportType.LIVE); + assertEquals("Unexpected number of datanodes", numDatanodes, + nodeInfoList.size()); + FileSystem fileSys = cluster.getFileSystem(); + FSDataOutputStream stm = null; + try { + // do the writing but do not close the FSDataOutputStream + // in order to mimic the ongoing writing + final Path fileName = new Path("/file1"); + stm = fileSys.create( + fileName, + true, + fileSys.getConf().getInt( + CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), + (short) 3, blockSize); + stm.write(new byte[(blockSize * 3) / 2]); + // We do not close the stream so that + // the writing seems to be still ongoing + stm.hflush(); + + LocatedBlocks blocks = client.getNamenode().getBlockLocations( + fileName.toString(), 0, blockSize); + DatanodeInfo[] nodes = blocks.get(0).getLocations(); + assertEquals(nodes.length, 3); + DataNode staleNode = null; + DatanodeDescriptor staleNodeInfo = null; + // stop the heartbeat of the first node + staleNode = this.stopDataNodeHeartbeat(cluster, nodes[0].getHostName()); + assertNotNull(staleNode); + // set the first node as stale + staleNodeInfo = cluster.getNameNode().getNamesystem().getBlockManager() + .getDatanodeManager() + .getDatanode(staleNode.getDatanodeId()); + staleNodeInfo.setLastUpdate(Time.now() - staleInterval - 1); + + LocatedBlocks blocksAfterStale = client.getNamenode().getBlockLocations( + fileName.toString(), 0, blockSize); + DatanodeInfo[] nodesAfterStale = blocksAfterStale.get(0).getLocations(); + assertEquals(nodesAfterStale.length, 3); + assertEquals(nodesAfterStale[2].getHostName(), nodes[0].getHostName()); + + // restart the staleNode's heartbeat + DataNodeTestUtils.setHeartbeatsDisabledForTests(staleNode, false); + // reset the first node as non-stale, so as to avoid two stale nodes + staleNodeInfo.setLastUpdate(Time.now()); + + LocatedBlock lastBlock = client.getLocatedBlocks(fileName.toString(), 0, + Long.MAX_VALUE).getLastLocatedBlock(); + nodes = lastBlock.getLocations(); + assertEquals(nodes.length, 3); + // stop the heartbeat of the first node for the last block + staleNode = this.stopDataNodeHeartbeat(cluster, nodes[0].getHostName()); + assertNotNull(staleNode); + // set the node as stale + cluster.getNameNode().getNamesystem().getBlockManager() + .getDatanodeManager() + .getDatanode(staleNode.getDatanodeId()) + .setLastUpdate(Time.now() - staleInterval - 1); + + LocatedBlock lastBlockAfterStale = client.getLocatedBlocks( + fileName.toString(), 0, Long.MAX_VALUE).getLastLocatedBlock(); + nodesAfterStale = lastBlockAfterStale.getLocations(); + assertEquals(nodesAfterStale.length, 3); + assertEquals(nodesAfterStale[2].getHostName(), nodes[0].getHostName()); + } finally { + if (stm != null) { + stm.close(); + } + cluster.shutdown(); + } + } + /** test getBlocks */ @Test public void testGetBlocks() throws Exception { final Configuration CONF = new HdfsConfiguration(); - final short REPLICATION_FACTOR = (short)2; + final short REPLICATION_FACTOR = (short) 2; final int DEFAULT_BLOCK_SIZE = 1024; final Random r = new Random(); - + CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF) - .numDataNodes(REPLICATION_FACTOR) - .build(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes( + REPLICATION_FACTOR).build(); try { cluster.waitActive(); - + // create a file with two blocks FileSystem fs = cluster.getFileSystem(); FSDataOutputStream out = fs.create(new Path("/tmp.txt"), REPLICATION_FACTOR); - byte [] data = new byte[1024]; - long fileLen = 2*DEFAULT_BLOCK_SIZE; + byte[] data = new byte[1024]; + long fileLen = 2 * DEFAULT_BLOCK_SIZE; long bytesToWrite = fileLen; - while( bytesToWrite > 0 ) { + while (bytesToWrite > 0) { r.nextBytes(data); - int bytesToWriteNext = (1024 locatedBlocks; - DatanodeInfo[] dataNodes=null; + DatanodeInfo[] dataNodes = null; boolean notWritten; do { - final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF), CONF); - locatedBlocks = dfsclient.getNamenode(). - getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks(); + final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF), + CONF); + locatedBlocks = dfsclient.getNamenode() + .getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks(); assertEquals(2, locatedBlocks.size()); notWritten = false; - for(int i=0; i<2; i++) { + for (int i = 0; i < 2; i++) { dataNodes = locatedBlocks.get(i).getLocations(); - if(dataNodes.length != REPLICATION_FACTOR) { + if (dataNodes.length != REPLICATION_FACTOR) { notWritten = true; try { Thread.sleep(10); - } catch(InterruptedException e) { + } catch (InterruptedException e) { } break; } } - } while(notWritten); - + } while (notWritten); + // get RPC client to namenode InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort()); @@ -122,7 +252,7 @@ public class TestGetBlocks { assertEquals(locs[0].getStorageIDs().length, 2); // get blocks of size 0 from dataNodes[0] - getBlocksWithException(namenode, dataNodes[0], 0); + getBlocksWithException(namenode, dataNodes[0], 0); // get blocks of size -1 from dataNodes[0] getBlocksWithException(namenode, dataNodes[0], -1); @@ -136,46 +266,39 @@ public class TestGetBlocks { } private void getBlocksWithException(NamenodeProtocol namenode, - DatanodeInfo datanode, - long size) throws IOException { + DatanodeInfo datanode, long size) throws IOException { boolean getException = false; try { - namenode.getBlocks(DFSTestUtil.getLocalDatanodeInfo(), 2); - } catch(RemoteException e) { + namenode.getBlocks(DFSTestUtil.getLocalDatanodeInfo(), 2); + } catch (RemoteException e) { getException = true; assertTrue(e.getClassName().contains("HadoopIllegalArgumentException")); } assertTrue(getException); } - + @Test public void testBlockKey() { Map map = new HashMap(); final Random RAN = new Random(); final long seed = RAN.nextLong(); - System.out.println("seed=" + seed); + System.out.println("seed=" + seed); RAN.setSeed(seed); - long[] blkids = new long[10]; - for(int i = 0; i < blkids.length; i++) { + long[] blkids = new long[10]; + for (int i = 0; i < blkids.length; i++) { blkids[i] = 1000L + RAN.nextInt(100000); map.put(new Block(blkids[i], 0, blkids[i]), blkids[i]); } System.out.println("map=" + map.toString().replace(",", "\n ")); - - for(int i = 0; i < blkids.length; i++) { - Block b = new Block(blkids[i], 0, GenerationStamp.GRANDFATHER_GENERATION_STAMP); + + for (int i = 0; i < blkids.length; i++) { + Block b = new Block(blkids[i], 0, + GenerationStamp.GRANDFATHER_GENERATION_STAMP); Long v = map.get(b); System.out.println(b + " => " + v); assertEquals(blkids[i], v.longValue()); } } - /** - * @param args - */ - public static void main(String[] args) throws Exception { - (new TestGetBlocks()).testGetBlocks(); - } - } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java Wed Sep 19 04:34:55 2012 @@ -27,7 +27,6 @@ import org.apache.hadoop.hdfs.protocol.C import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB; @@ -41,6 +40,7 @@ import org.apache.hadoop.security.Refres import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; import org.apache.hadoop.tools.GetUserMappingsProtocol; +import org.apache.hadoop.tools.impl.pb.client.GetUserMappingsProtocolPBClientImpl; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -141,8 +141,8 @@ public class TestIsMethodSupported { @Test public void testGetUserMappingsProtocol() throws IOException { - GetUserMappingsProtocolClientSideTranslatorPB translator = - (GetUserMappingsProtocolClientSideTranslatorPB) + GetUserMappingsProtocolPBClientImpl translator = + (GetUserMappingsProtocolPBClientImpl) NameNodeProxies.createNonHAProxy(conf, nnAddress, GetUserMappingsProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java Wed Sep 19 04:34:55 2012 @@ -27,6 +27,7 @@ import java.io.RandomAccessFile; import java.net.InetSocketAddress; import java.util.Iterator; import java.util.Random; +import java.util.concurrent.TimeoutException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -420,8 +421,8 @@ public class TestReplication { } } - private void changeBlockLen(MiniDFSCluster cluster, - int lenDelta) throws IOException, InterruptedException { + private void changeBlockLen(MiniDFSCluster cluster, int lenDelta) + throws IOException, InterruptedException, TimeoutException { final Path fileName = new Path("/file1"); final short REPLICATION_FACTOR = (short)1; final FileSystem fs = cluster.getFileSystem(); Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java Wed Sep 19 04:34:55 2012 @@ -224,7 +224,8 @@ public class TestShortCircuitLocalRead { @Test public void testGetBlockLocalPathInfo() throws IOException, InterruptedException { final Configuration conf = new Configuration(); - conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, "alloweduser"); + conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, + "alloweduser1,alloweduser2"); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) .format(true).build(); cluster.waitActive(); @@ -232,8 +233,10 @@ public class TestShortCircuitLocalRead { FileSystem fs = cluster.getFileSystem(); try { DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23); - UserGroupInformation aUgi = UserGroupInformation - .createRemoteUser("alloweduser"); + UserGroupInformation aUgi1 = + UserGroupInformation.createRemoteUser("alloweduser1"); + UserGroupInformation aUgi2 = + UserGroupInformation.createRemoteUser("alloweduser2"); LocatedBlocks lb = cluster.getNameNode().getRpcServer() .getBlockLocations("/tmp/x", 0, 16); // Create a new block object, because the block inside LocatedBlock at @@ -241,7 +244,7 @@ public class TestShortCircuitLocalRead { ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock()); Token token = lb.get(0).getBlockToken(); final DatanodeInfo dnInfo = lb.get(0).getLocations()[0]; - ClientDatanodeProtocol proxy = aUgi + ClientDatanodeProtocol proxy = aUgi1 .doAs(new PrivilegedExceptionAction() { @Override public ClientDatanodeProtocol run() throws Exception { @@ -250,13 +253,29 @@ public class TestShortCircuitLocalRead { } }); - //This should succeed + // This should succeed BlockLocalPathInfo blpi = proxy.getBlockLocalPathInfo(blk, token); Assert.assertEquals( DataNodeTestUtils.getFSDataset(dn).getBlockLocalPathInfo(blk).getBlockPath(), blpi.getBlockPath()); - // Now try with a not allowed user. + // Try with the other allowed user + proxy = aUgi2 + .doAs(new PrivilegedExceptionAction() { + @Override + public ClientDatanodeProtocol run() throws Exception { + return DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, + 60000, false); + } + }); + + // This should succeed as well + blpi = proxy.getBlockLocalPathInfo(blk, token); + Assert.assertEquals( + DataNodeTestUtils.getFSDataset(dn).getBlockLocalPathInfo(blk).getBlockPath(), + blpi.getBlockPath()); + + // Now try with a disallowed user UserGroupInformation bUgi = UserGroupInformation .createRemoteUser("notalloweduser"); proxy = bUgi Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Wed Sep 19 04:34:55 2012 @@ -88,7 +88,7 @@ public class TestBalancer { /* create a file with a length of fileLen */ static void createFile(MiniDFSCluster cluster, Path filePath, long fileLen, short replicationFactor, int nnIndex) - throws IOException { + throws IOException, InterruptedException, TimeoutException { FileSystem fs = cluster.getFileSystem(nnIndex); DFSTestUtil.createFile(fs, filePath, fileLen, replicationFactor, r.nextLong()); @@ -100,7 +100,7 @@ public class TestBalancer { * whose used space to be size */ private ExtendedBlock[] generateBlocks(Configuration conf, long size, - short numNodes) throws IOException { + short numNodes) throws IOException, InterruptedException, TimeoutException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numNodes).build(); try { cluster.waitActive(); Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java Wed Sep 19 04:34:55 2012 @@ -23,6 +23,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Random; +import java.util.concurrent.TimeoutException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -96,7 +97,7 @@ public class TestBalancerWithMultipleNam /* create a file with a length of fileLen */ private static void createFile(Suite s, int index, long len - ) throws IOException { + ) throws IOException, InterruptedException, TimeoutException { final FileSystem fs = s.cluster.getFileSystem(index); DFSTestUtil.createFile(fs, FILE_PATH, len, s.replication, RANDOM.nextLong()); DFSTestUtil.waitReplication(fs, FILE_PATH, s.replication); @@ -106,7 +107,7 @@ public class TestBalancerWithMultipleNam * whose used space to be size */ private static ExtendedBlock[][] generateBlocks(Suite s, long size - ) throws IOException { + ) throws IOException, InterruptedException, TimeoutException { final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][]; for(int n = 0; n < s.clients.length; n++) { final long fileLen = size/s.replication; Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java Wed Sep 19 04:34:55 2012 @@ -53,7 +53,7 @@ public class TestOverReplicatedBlocks { * corrupt ones. */ @Test - public void testProcesOverReplicateBlock() throws IOException { + public void testProcesOverReplicateBlock() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L); conf.set( @@ -141,7 +141,7 @@ public class TestOverReplicatedBlocks { * send heartbeats. */ @Test - public void testChooseReplicaToDelete() throws IOException { + public void testChooseReplicaToDelete() throws Exception { MiniDFSCluster cluster = null; FileSystem fs = null; try { Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java Wed Sep 19 04:34:55 2012 @@ -114,6 +114,12 @@ public class DataNodeTestUtils { dn.getDnConf().socketTimeout, dn.getDnConf().connectToDnViaHostname); } + public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) { + DataBlockScanner scanner = dn.getBlockScanner(); + BlockPoolSliceScanner bpScanner = scanner.getBPScanner(b.getBlockPoolId()); + bpScanner.verifyBlock(b); + } + public static void shutdownBlockScanner(DataNode dn) { if (dn.blockScanner != null) { dn.blockScanner.shutdown(); Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Wed Sep 19 04:34:55 2012 @@ -89,7 +89,7 @@ public class TestBlockReplacement { } @Test - public void testBlockReplacement() throws IOException, TimeoutException { + public void testBlockReplacement() throws Exception { final Configuration CONF = new HdfsConfiguration(); final String[] INITIAL_RACKS = {"/RACK0", "/RACK1", "/RACK2"}; final String[] NEW_RACKS = {"/RACK2"}; Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java Wed Sep 19 04:34:55 2012 @@ -27,6 +27,9 @@ import java.util.ArrayList; import java.util.List; import java.util.Random; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeoutException; + +import junit.framework.Assert; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -65,7 +68,7 @@ import org.mockito.invocation.Invocation /** * This test simulates a variety of situations when blocks are being - * intentionally orrupted, unexpectedly modified, and so on before a block + * intentionally corrupted, unexpectedly modified, and so on before a block * report is happening */ public class TestBlockReport { @@ -316,7 +319,7 @@ public class TestBlockReport { * @throws IOException in case of an error */ @Test - public void blockReport_06() throws IOException { + public void blockReport_06() throws Exception { final String METHOD_NAME = GenericTestUtils.getMethodName(); Path filePath = new Path("/" + METHOD_NAME + ".dat"); final int DN_N1 = DN_N0 + 1; @@ -353,7 +356,7 @@ public class TestBlockReport { @Test // Currently this test is failing as expected 'cause the correct behavior is // not yet implemented (9/15/09) - public void blockReport_07() throws IOException { + public void blockReport_07() throws Exception { final String METHOD_NAME = GenericTestUtils.getMethodName(); Path filePath = new Path("/" + METHOD_NAME + ".dat"); final int DN_N1 = DN_N0 + 1; @@ -670,21 +673,24 @@ public class TestBlockReport { } private void startDNandWait(Path filePath, boolean waitReplicas) - throws IOException { - if(LOG.isDebugEnabled()) { + throws IOException, InterruptedException, TimeoutException { + if (LOG.isDebugEnabled()) { LOG.debug("Before next DN start: " + cluster.getDataNodes().size()); } cluster.startDataNodes(conf, 1, true, null, null); + cluster.waitClusterUp(); ArrayList datanodes = cluster.getDataNodes(); assertEquals(datanodes.size(), 2); - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { int lastDn = datanodes.size() - 1; LOG.debug("New datanode " + cluster.getDataNodes().get(lastDn).getDisplayName() + " has been started"); } - if (waitReplicas) DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR); + if (waitReplicas) { + DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR); + } } private ArrayList prepareForRide(final Path filePath, @@ -836,8 +842,9 @@ public class TestBlockReport { public void run() { try { startDNandWait(filePath, true); - } catch (IOException e) { - LOG.warn("Shouldn't happen", e); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail("Failed to start BlockChecker: " + e); } } } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Wed Sep 19 04:34:55 2012 @@ -105,7 +105,7 @@ public class TestDataNodeVolumeFailure { * failure if the configuration parameter allows this. */ @Test - public void testVolumeFailure() throws IOException { + public void testVolumeFailure() throws Exception { FileSystem fs = cluster.getFileSystem(); dataDir = new File(cluster.getDataDirectory()); System.out.println("Data dir: is " + dataDir.getPath()); Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java Wed Sep 19 04:34:55 2012 @@ -137,7 +137,7 @@ public class TestDatanodeRestart { } // test recovering unlinked tmp replicas - @Test public void testRecoverReplicas() throws IOException { + @Test public void testRecoverReplicas() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Wed Sep 19 04:34:55 2012 @@ -31,9 +31,7 @@ import java.io.File; import java.io.FilenameFilter; import java.io.IOException; import java.io.InputStream; -import java.io.PrintWriter; import java.io.RandomAccessFile; -import java.io.StringWriter; import java.net.URI; import java.util.ArrayList; import java.util.Arrays; @@ -1238,10 +1236,8 @@ public class TestEditLog { } } catch (IOException e) { } catch (Throwable t) { - StringWriter sw = new StringWriter(); - t.printStackTrace(new PrintWriter(sw)); - fail("caught non-IOException throwable with message " + - t.getMessage() + "\nstack trace\n" + sw.toString()); + fail("Caught non-IOException throwable " + + StringUtils.stringifyException(t)); } } finally { if ((elfos != null) && (elfos.isOpen())) Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java Wed Sep 19 04:34:55 2012 @@ -116,7 +116,7 @@ public class TestFSEditLogLoader { * automatically bumped up to the new minimum upon restart. */ @Test - public void testReplicationAdjusted() throws IOException { + public void testReplicationAdjusted() throws Exception { // start a cluster Configuration conf = new HdfsConfiguration(); // Replicate and heartbeat fast to shave a few seconds off test Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java Wed Sep 19 04:34:55 2012 @@ -53,7 +53,7 @@ public class TestProcessCorruptBlocks { * replicas (2) is equal to replication factor (2)) */ @Test - public void testWhenDecreasingReplication() throws IOException { + public void testWhenDecreasingReplication() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L); conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2)); @@ -108,7 +108,7 @@ public class TestProcessCorruptBlocks { * */ @Test - public void testByAddingAnExtraDataNode() throws IOException { + public void testByAddingAnExtraDataNode() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L); conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2)); @@ -159,7 +159,7 @@ public class TestProcessCorruptBlocks { * replicas (1) is equal to replication factor (1)) */ @Test - public void testWithReplicationFactorAsOne() throws IOException { + public void testWithReplicationFactorAsOne() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L); conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2)); @@ -208,7 +208,7 @@ public class TestProcessCorruptBlocks { * Verify that all replicas are corrupt and 3 replicas are present. */ @Test - public void testWithAllCorruptReplicas() throws IOException { + public void testWithAllCorruptReplicas() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L); conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2)); Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java?rev=1387449&r1=1387448&r2=1387449&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java Wed Sep 19 04:34:55 2012 @@ -80,7 +80,7 @@ public class TestWebHDFS { } } - @Test + @Test(timeout=300000) public void testLargeFile() throws Exception { largeFileTest(200L << 20); //200MB file length } @@ -202,7 +202,7 @@ public class TestWebHDFS { } /** Test client retry with namenode restarting. */ - @Test + @Test(timeout=300000) public void testNamenodeRestart() throws Exception { ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL); final Configuration conf = WebHdfsTestUtil.createConf();