Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 9D56511517 for ; Fri, 28 Mar 2014 18:30:01 +0000 (UTC) Received: (qmail 41080 invoked by uid 500); 28 Mar 2014 18:30:00 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 40996 invoked by uid 500); 28 Mar 2014 18:29:59 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 40974 invoked by uid 99); 28 Mar 2014 18:29:57 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 28 Mar 2014 18:29:57 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 28 Mar 2014 18:29:54 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id C6128238883D; Fri, 28 Mar 2014 18:29:31 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1582856 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/test/java/org/apache/hadoop/hdfs/security/ src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/ src/test/java/org/apache/ha... Date: Fri, 28 Mar 2014 18:29:31 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140328182931.C6128238883D@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: szetszwo Date: Fri Mar 28 18:29:31 2014 New Revision: 1582856 URL: http://svn.apache.org/r1582856 Log: HDFS-6168. Remove a deprecated constructor and the deprecated methods reportChecksumFailure, getDelegationToken(Text), renewDelegationToken and cancelDelegationToken from DistributedFileSystem. Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1582856&r1=1582855&r2=1582856&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Mar 28 18:29:31 2014 @@ -249,6 +249,10 @@ Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES + HDFS-6168. Remove a deprecated constructor and the deprecated methods reportChecksumFailure, + getDelegationToken(Text), renewDelegationToken and cancelDelegationToken from + DistributedFileSystem. (szetszwo) + NEW FEATURES IMPROVEMENTS Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1582856&r1=1582855&r2=1582856&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Fri Mar 28 18:29:31 2014 @@ -124,12 +124,6 @@ public class DistributedFileSystem exten return HdfsConstants.HDFS_URI_SCHEME; } - @Deprecated - public DistributedFileSystem(InetSocketAddress namenode, - Configuration conf) throws IOException { - initialize(NameNode.getUri(namenode), conf); - } - @Override public URI getUri() { return uri; } @@ -1017,55 +1011,6 @@ public class DistributedFileSystem exten } /** - * We need to find the blocks that didn't match. Likely only one - * is corrupt but we will report both to the namenode. In the future, - * we can consider figuring out exactly which block is corrupt. - */ - // We do not see a need for user to report block checksum errors and do not - // want to rely on user to report block corruptions. - @Deprecated - public boolean reportChecksumFailure(Path f, - FSDataInputStream in, long inPos, - FSDataInputStream sums, long sumsPos) { - - if(!(in instanceof HdfsDataInputStream && sums instanceof HdfsDataInputStream)) - throw new IllegalArgumentException( - "Input streams must be types of HdfsDataInputStream"); - - LocatedBlock lblocks[] = new LocatedBlock[2]; - - // Find block in data stream. - HdfsDataInputStream dfsIn = (HdfsDataInputStream) in; - ExtendedBlock dataBlock = dfsIn.getCurrentBlock(); - if (dataBlock == null) { - LOG.error("Error: Current block in data stream is null! "); - return false; - } - DatanodeInfo[] dataNode = {dfsIn.getCurrentDatanode()}; - lblocks[0] = new LocatedBlock(dataBlock, dataNode); - LOG.info("Found checksum error in data stream at " - + dataBlock + " on datanode=" - + dataNode[0]); - - // Find block in checksum stream - HdfsDataInputStream dfsSums = (HdfsDataInputStream) sums; - ExtendedBlock sumsBlock = dfsSums.getCurrentBlock(); - if (sumsBlock == null) { - LOG.error("Error: Current block in checksum stream is null! "); - return false; - } - DatanodeInfo[] sumsNode = {dfsSums.getCurrentDatanode()}; - lblocks[1] = new LocatedBlock(sumsBlock, sumsNode); - LOG.info("Found checksum error in checksum stream at " - + sumsBlock + " on datanode=" + sumsNode[0]); - - // Ask client to delete blocks. - dfs.reportChecksumFailure(f.toString(), lblocks); - - return true; - } - - /** * Returns the stat information about the file. * @throws FileNotFoundException if the file does not exist. */ @@ -1282,66 +1227,13 @@ public class DistributedFileSystem exten } @Override - public - Token getDelegationToken(String renewer - ) throws IOException { + public Token getDelegationToken(String renewer) + throws IOException { Token result = dfs.getDelegationToken(renewer == null ? null : new Text(renewer)); return result; } - /* - * Delegation Token Operations - * These are DFS only operations. - */ - - /** - * Get a valid Delegation Token. - * - * @param renewer Name of the designated renewer for the token - * @return Token - * @throws IOException - * @deprecated use {@link #getDelegationToken(String)} - */ - @Deprecated - public Token getDelegationToken(Text renewer) - throws IOException { - return getDelegationToken(renewer.toString()); - } - - /** - * Renew an existing delegation token. - * - * @param token delegation token obtained earlier - * @return the new expiration time - * @throws IOException - * @deprecated Use Token.renew instead. - */ - public long renewDelegationToken(Token token) - throws InvalidToken, IOException { - try { - return token.renew(getConf()); - } catch (InterruptedException ie) { - throw new RuntimeException("Caught interrupted", ie); - } - } - - /** - * Cancel an existing delegation token. - * - * @param token delegation token - * @throws IOException - * @deprecated Use Token.cancel instead. - */ - public void cancelDelegationToken(Token token) - throws IOException { - try { - token.cancel(getConf()); - } catch (InterruptedException ie) { - throw new RuntimeException("Caught interrupted", ie); - } - } - /** * Requests the namenode to tell all datanodes to use a new, non-persistent * bandwidth value for dfs.balance.bandwidthPerSec. Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java?rev=1582856&r1=1582855&r2=1582856&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java Fri Mar 28 18:29:31 2014 @@ -195,7 +195,6 @@ public class TestDelegationToken { } } - @SuppressWarnings("deprecation") @Test public void testDelegationTokenWithDoAs() throws Exception { final DistributedFileSystem dfs = cluster.getFileSystem(); @@ -212,11 +211,9 @@ public class TestDelegationToken { longUgi.doAs(new PrivilegedExceptionAction() { @Override public Object run() throws IOException { - final DistributedFileSystem dfs = cluster.getFileSystem(); try { - //try renew with long name - dfs.renewDelegationToken(token); - } catch (IOException e) { + token.renew(config); + } catch (Exception e) { Assert.fail("Could not renew delegation token for user "+longUgi); } return null; @@ -224,20 +221,17 @@ public class TestDelegationToken { }); shortUgi.doAs(new PrivilegedExceptionAction() { @Override - public Object run() throws IOException { - final DistributedFileSystem dfs = cluster.getFileSystem(); - dfs.renewDelegationToken(token); + public Object run() throws Exception { + token.renew(config); return null; } }); longUgi.doAs(new PrivilegedExceptionAction() { @Override public Object run() throws IOException { - final DistributedFileSystem dfs = cluster.getFileSystem(); try { - //try cancel with long name - dfs.cancelDelegationToken(token); - } catch (IOException e) { + token.cancel(config); + } catch (Exception e) { Assert.fail("Could not cancel delegation token for user "+longUgi); } return null; Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java?rev=1582856&r1=1582855&r2=1582856&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java Fri Mar 28 18:29:31 2014 @@ -99,6 +99,9 @@ public class TestDelegationTokensWithHA .build(); cluster.waitActive(); + String logicalName = HATestUtil.getLogicalHostname(cluster); + HATestUtil.setFailoverConfigurations(cluster, conf, logicalName, 0); + nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); fs = HATestUtil.configureFailoverFs(cluster, conf); @@ -246,8 +249,7 @@ public class TestDelegationTokensWithHA doRenewOrCancel(token, clientConf, TokenTestAction.RENEW); doRenewOrCancel(token, clientConf, TokenTestAction.CANCEL); } - - @SuppressWarnings("deprecation") + @Test public void testDelegationTokenWithDoAs() throws Exception { final Token token = @@ -259,29 +261,22 @@ public class TestDelegationTokensWithHA longUgi.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - DistributedFileSystem dfs = (DistributedFileSystem) - HATestUtil.configureFailoverFs(cluster, conf); // try renew with long name - dfs.renewDelegationToken(token); + token.renew(conf); return null; } }); shortUgi.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - DistributedFileSystem dfs = (DistributedFileSystem) - HATestUtil.configureFailoverFs(cluster, conf); - dfs.renewDelegationToken(token); + token.renew(conf); return null; } }); longUgi.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - DistributedFileSystem dfs = (DistributedFileSystem) - HATestUtil.configureFailoverFs(cluster, conf); - // try cancel with long name - dfs.cancelDelegationToken(token); + token.cancel(conf);; return null; } }); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java?rev=1582856&r1=1582855&r2=1582856&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java Fri Mar 28 18:29:31 2014 @@ -62,7 +62,6 @@ public class TestDelegationTokenFetcher * Verify that when the DelegationTokenFetcher runs, it talks to the Namenode, * pulls out the correct user's token and successfully serializes it to disk. */ - @SuppressWarnings("deprecation") @Test public void expectedTokenIsRetrievedFromDFS() throws Exception { final byte[] ident = new DelegationTokenIdentifier(new Text("owner"), @@ -83,7 +82,6 @@ public class TestDelegationTokenFetcher return new Token[]{t}; } }); - when(dfs.renewDelegationToken(eq(t))).thenReturn(1000L); when(dfs.getUri()).thenReturn(uri); FakeRenewer.reset();