Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id F16A19FBD for ; Tue, 8 May 2012 21:58:29 +0000 (UTC) Received: (qmail 38422 invoked by uid 500); 8 May 2012 21:58:29 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 38395 invoked by uid 500); 8 May 2012 21:58:29 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 38387 invoked by uid 99); 8 May 2012 21:58:29 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 08 May 2012 21:58:29 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 08 May 2012 21:58:27 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id A02332388B9B; Tue, 8 May 2012 21:58:07 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1335791 [2/2] - in /hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ src/main/java/org/apache/hadoop/hdfs... Date: Tue, 08 May 2012 21:58:05 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120508215807.A02332388B9B@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java?rev=1335791&r1=1335790&r2=1335791&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java (original) +++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java Tue May 8 21:57:58 2012 @@ -153,8 +153,7 @@ public class DFSck extends Configured im url.append("&startblockafter=").append(String.valueOf(cookie)); } URL path = new URL(url.toString()); - SecurityUtil.fetchServiceTicket(path); - URLConnection connection = path.openConnection(); + URLConnection connection = SecurityUtil.openSecureHttpConnection(path); InputStream stream = connection.getInputStream(); BufferedReader input = new BufferedReader(new InputStreamReader( stream, "UTF-8")); @@ -222,16 +221,11 @@ public class DFSck extends Configured im return null; } - return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf, true); + return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf, false); } private int doWork(final String[] args) throws IOException { - String proto = "http://"; - if (UserGroupInformation.isSecurityEnabled()) { - SecurityUtil.initKrb5CipherSuites(); - proto = "https://"; - } - final StringBuilder url = new StringBuilder(proto); + final StringBuilder url = new StringBuilder("http://"); String namenodeAddress = getCurrentNamenodeAddress(); if (namenodeAddress == null) { @@ -279,8 +273,7 @@ public class DFSck extends Configured im return listCorruptFileBlocks(dir, url.toString()); } URL path = new URL(url.toString()); - SecurityUtil.fetchServiceTicket(path); - URLConnection connection = path.openConnection(); + URLConnection connection = SecurityUtil.openSecureHttpConnection(path); InputStream stream = connection.getInputStream(); BufferedReader input = new BufferedReader(new InputStreamReader( stream, "UTF-8")); Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1335791&r1=1335790&r2=1335791&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original) +++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Tue May 8 21:57:58 2012 @@ -72,11 +72,6 @@ public class DelegationTokenFetcher { private static final String RENEW = "renew"; private static final String PRINT = "print"; - static { - // Enable Kerberos sockets - System.setProperty("https.cipherSuites", "TLS_KRB5_WITH_3DES_EDE_CBC_SHA"); - } - private static void printUsage(PrintStream err) throws IOException { err.println("fetchdt retrieves delegation tokens from the NameNode"); err.println(); @@ -106,7 +101,7 @@ public class DelegationTokenFetcher { final Configuration conf = new HdfsConfiguration(); Options fetcherOptions = new Options(); fetcherOptions.addOption(WEBSERVICE, true, - "HTTPS url to reach the NameNode at"); + "HTTP url to reach the NameNode at"); fetcherOptions.addOption(RENEWER, true, "Name of the delegation token renewer"); fetcherOptions.addOption(CANCEL, false, "cancel the token"); @@ -224,8 +219,7 @@ public class DelegationTokenFetcher { } URL remoteURL = new URL(url.toString()); - SecurityUtil.fetchServiceTicket(remoteURL); - URLConnection connection = URLUtils.openConnection(remoteURL); + URLConnection connection = SecurityUtil.openSecureHttpConnection(remoteURL); InputStream in = connection.getInputStream(); Credentials ts = new Credentials(); dis = new DataInputStream(in); @@ -264,7 +258,7 @@ public class DelegationTokenFetcher { try { URL url = new URL(buf.toString()); - SecurityUtil.fetchServiceTicket(url); + connection = (HttpURLConnection) SecurityUtil.openSecureHttpConnection(url); connection = (HttpURLConnection)URLUtils.openConnection(url); if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) { throw new IOException("Error renewing token: " + @@ -358,8 +352,7 @@ public class DelegationTokenFetcher { HttpURLConnection connection=null; try { URL url = new URL(buf.toString()); - SecurityUtil.fetchServiceTicket(url); - connection = (HttpURLConnection)URLUtils.openConnection(url); + connection = (HttpURLConnection) SecurityUtil.openSecureHttpConnection(url); if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) { throw new IOException("Error cancelling token: " + connection.getResponseMessage()); Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1334158-1335790 Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1335791&r1=1335790&r2=1335791&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original) +++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Tue May 8 21:57:58 2012 @@ -858,4 +858,15 @@ + + dfs.namenode.kerberos.internal.spnego.principal + ${dfs.web.authentication.kerberos.principal} + + + + dfs.secondary.namenode.kerberos.internal.spnego.principal + ${dfs.web.authentication.kerberos.principal} + + + Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1334158-1335790 Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1334158-1335790 Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1334158-1335790 Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1334158-1335790 Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java?rev=1335791&r1=1335790&r2=1335791&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java (original) +++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java Tue May 8 21:57:58 2012 @@ -105,17 +105,17 @@ public class TestViewFileSystemHdfs exte // additional mount. @Override int getExpectedDirPaths() { - return 7; + return 8; } @Override int getExpectedMountPoints() { - return 8; + return 9; } @Override int getExpectedDelegationTokenCount() { - return 8; + return 9; } @Override Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java?rev=1335791&r1=1335790&r2=1335791&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java (original) +++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java Tue May 8 21:57:58 2012 @@ -20,7 +20,6 @@ package org.apache.hadoop.fs.viewfs; import java.io.IOException; import java.net.URISyntaxException; -import java.util.List; import javax.security.auth.login.LoginException; @@ -30,20 +29,13 @@ import org.apache.hadoop.hdfs.DFSConfigK import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; - -import org.junit.After; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; -import org.junit.Test; - public class TestViewFsHdfs extends ViewFsBaseTest { private static MiniDFSCluster cluster; - private static Path defaultWorkingDirectory; private static HdfsConfiguration CONF = new HdfsConfiguration(); private static FileContext fc; @@ -57,7 +49,7 @@ public class TestViewFsHdfs extends View cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build(); cluster.waitClusterUp(); fc = FileContext.getFileContext(cluster.getURI(0), CONF); - defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + + Path defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); } @@ -73,25 +65,15 @@ public class TestViewFsHdfs extends View // create the test root on local_fs fcTarget = fc; super.setUp(); - } - - @After - public void tearDown() throws Exception { - super.tearDown(); - } - - /* - * This overides the default implementation since hdfs does have delegation + /** + * This overrides the default implementation since hdfs does have delegation * tokens. */ @Override - @Test - public void testGetDelegationTokens() throws IOException { - List> delTokens = - fcView.getDelegationTokens(new Path("/"), "sanjay"); - Assert.assertEquals(7, delTokens.size()); + int getExpectedDelegationTokenCount() { + return 8; } } Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java?rev=1335791&r1=1335790&r2=1335791&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java (original) +++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java Tue May 8 21:57:58 2012 @@ -24,6 +24,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSClient; @@ -34,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.L import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.Token; import org.junit.Test; @@ -230,6 +232,33 @@ public class TestConnCache { in.close(); } + + /** + * Test that the socket cache can be disabled by setting the capacity to + * 0. Regression test for HDFS-3365. + */ + @Test + public void testDisableCache() throws IOException { + LOG.info("Starting testDisableCache()"); + + // Reading with the normally configured filesystem should + // cache a socket. + DFSTestUtil.readFile(fs, testFile); + assertEquals(1, ((DistributedFileSystem)fs).dfs.socketCache.size()); + + // Configure a new instance with no caching, ensure that it doesn't + // cache anything + Configuration confWithoutCache = new Configuration(fs.getConf()); + confWithoutCache.setInt( + DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 0); + FileSystem fsWithoutCache = FileSystem.newInstance(confWithoutCache); + try { + DFSTestUtil.readFile(fsWithoutCache, testFile); + assertEquals(0, ((DistributedFileSystem)fsWithoutCache).dfs.socketCache.size()); + } finally { + fsWithoutCache.close(); + } + } @AfterClass public static void teardownCluster() throws Exception { Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java?rev=1335791&r1=1335790&r2=1335791&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java (original) +++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java Tue May 8 21:57:58 2012 @@ -17,10 +17,12 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY; import static org.junit.Assert.*; +import java.io.InputStream; import java.io.PrintWriter; import java.net.InetSocketAddress; import java.net.Socket; @@ -40,6 +42,8 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; +import com.google.common.io.NullOutputStream; + public class TestDataTransferKeepalive { Configuration conf = new HdfsConfiguration(); private MiniDFSCluster cluster; @@ -56,6 +60,8 @@ public class TestDataTransferKeepalive { public void setup() throws Exception { conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, KEEPALIVE_TIMEOUT); + conf.setInt(DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, + 0); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1).build(); @@ -143,6 +149,40 @@ public class TestDataTransferKeepalive { IOUtils.closeStream(stm); } } + + @Test(timeout=30000) + public void testManyClosedSocketsInCache() throws Exception { + // Make a small file + DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L); + + // Insert a bunch of dead sockets in the cache, by opening + // many streams concurrently, reading all of the data, + // and then closing them. + InputStream[] stms = new InputStream[5]; + try { + for (int i = 0; i < stms.length; i++) { + stms[i] = fs.open(TEST_FILE); + } + for (InputStream stm : stms) { + IOUtils.copyBytes(stm, new NullOutputStream(), 1024); + } + } finally { + IOUtils.cleanup(null, stms); + } + + DFSClient client = ((DistributedFileSystem)fs).dfs; + assertEquals(5, client.socketCache.size()); + + // Let all the xceivers timeout + Thread.sleep(1500); + assertXceiverCount(0); + + // Client side still has the sockets cached + assertEquals(5, client.socketCache.size()); + + // Reading should not throw an exception. + DFSTestUtil.readFile(fs, TEST_FILE); + } private void assertXceiverCount(int expected) { // Subtract 1, since the DataXceiverServer Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java?rev=1335791&r1=1335790&r2=1335791&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java (original) +++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java Tue May 8 21:57:58 2012 @@ -136,6 +136,11 @@ public class DataNodeTestUtils { ) throws IOException { return FsDatasetTestUtil.getBlockFile(dn.getFSDataset(), bpid, b); } + + public static File getMetaFile(DataNode dn, String bpid, Block b) + throws IOException { + return FsDatasetTestUtil.getMetaFile(dn.getFSDataset(), bpid, b); + } public static boolean unlinkBlock(DataNode dn, ExtendedBlock bk, int numLinks ) throws IOException { Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java?rev=1335791&r1=1335790&r2=1335791&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java (original) +++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java Tue May 8 21:57:58 2012 @@ -36,6 +36,12 @@ public class FsDatasetTestUtil { ) throws IOException { return ((FsDatasetImpl)fsd).getBlockFile(bpid, b); } + + public static File getMetaFile(FsDatasetSpi fsd, String bpid, Block b) + throws IOException { + return FsDatasetUtil.getMetaFile(getBlockFile(fsd, bpid, b), b + .getGenerationStamp()); + } public static boolean unlinkBlock(FsDatasetSpi fsd, ExtendedBlock block, int numLinks) throws IOException { Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java?rev=1335791&r1=1335790&r2=1335791&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java (original) +++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java Tue May 8 21:57:58 2012 @@ -46,9 +46,9 @@ import org.apache.hadoop.hdfs.server.blo import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; -import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; @@ -585,7 +585,7 @@ public class TestDNFencing { } @Override - public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode, + public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode, Block block, short replicationFactor, Collection first, Collection second) {