Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id EC3B27B96 for ; Wed, 5 Oct 2011 00:46:00 +0000 (UTC) Received: (qmail 90718 invoked by uid 500); 5 Oct 2011 00:46:00 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 90689 invoked by uid 500); 5 Oct 2011 00:46:00 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 90682 invoked by uid 99); 5 Oct 2011 00:46:00 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 05 Oct 2011 00:46:00 +0000 X-ASF-Spam-Status: No, hits=-1998.0 required=5.0 tests=ALL_TRUSTED,FB_GET_MEDS X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 05 Oct 2011 00:45:54 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id C0093238888F for ; Wed, 5 Oct 2011 00:45:31 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1179030 - in /hadoop/common/branches/branch-0.20-security: ./ src/hdfs/org/apache/hadoop/hdfs/ src/hdfs/org/apache/hadoop/hdfs/security/token/delegation/ src/hdfs/org/apache/hadoop/hdfs/server/namenode/web/resources/ src/hdfs/org/apache/ha... Date: Wed, 05 Oct 2011 00:45:31 -0000 To: common-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20111005004531.C0093238888F@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: szetszwo Date: Wed Oct 5 00:45:30 2011 New Revision: 1179030 URL: http://svn.apache.org/viewvc?rev=1179030&view=rev Log: svn merge -c -1178887 . for reverting HDFS-2385 due to HDFS-2402. Removed: hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenRenewer.java Modified: hadoop/common/branches/branch-0.20-security/CHANGES.txt hadoop/common/branches/branch-0.20-security/build.xml hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/web/resources/PutOpParam.java hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/hdfs/TestHftpFileSystem.java Modified: hadoop/common/branches/branch-0.20-security/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/CHANGES.txt?rev=1179030&r1=1179029&r2=1179030&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/CHANGES.txt (original) +++ hadoop/common/branches/branch-0.20-security/CHANGES.txt Wed Oct 5 00:45:30 2011 @@ -86,9 +86,6 @@ Release 0.20.205.0 - unreleased HDFS-2348. Support getContentSummary and getFileChecksum in webhdfs. (szetszwo) - HDFS-2385. Support renew and cancel delegation tokens in webhdfs. - (szetszwo) - BUG FIXES MAPREDUCE-3112. Calling hadoop cli inside mapreduce job leads to errors. Modified: hadoop/common/branches/branch-0.20-security/build.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/build.xml?rev=1179030&r1=1179029&r2=1179030&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/build.xml (original) +++ hadoop/common/branches/branch-0.20-security/build.xml Wed Oct 5 00:45:30 2011 @@ -711,7 +711,6 @@ - Modified: hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1179030&r1=1179029&r2=1179030&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java (original) +++ hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java Wed Oct 5 00:45:30 2011 @@ -46,10 +46,8 @@ public class DFSConfigKeys extends Commo public static final String DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.backup.dnrpc-address"; public static final String DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY = "dfs.datanode.balance.bandwidthPerSec"; public static final long DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = 1024*1024; - public static final String DFS_NAMENODE_HTTP_PORT_KEY = "dfs.http.port"; - public static final int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070; public static final String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address"; - public static final String DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTP_PORT_DEFAULT; + public static final String DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50070"; public static final String DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.servicerpc-address"; public static final String DFS_NAMENODE_MAX_OBJECTS_KEY = "dfs.namenode.max.objects"; public static final long DFS_NAMENODE_MAX_OBJECTS_DEFAULT = 0; @@ -107,10 +105,8 @@ public class DFSConfigKeys extends Commo //Following keys have no defaults public static final String DFS_DATANODE_DATA_DIR_KEY = "dfs.datanode.data.dir"; - public static final String DFS_NAMENODE_HTTPS_PORT_KEY = "dfs.https.port"; - public static final int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 50470; public static final String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address"; - public static final String DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTPS_PORT_DEFAULT; + public static final String DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:50470"; public static final String DFS_NAMENODE_NAME_DIR_KEY = "dfs.namenode.name.dir"; public static final String DFS_NAMENODE_EDITS_DIR_KEY = "dfs.namenode.edits.dir"; public static final String DFS_CLIENT_READ_PREFETCH_SIZE_KEY = "dfs.client.read.prefetch.size"; Modified: hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java?rev=1179030&r1=1179029&r2=1179030&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java (original) +++ hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java Wed Oct 5 00:45:30 2011 @@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; +import java.lang.ref.WeakReference; import java.net.HttpURLConnection; import java.net.InetSocketAddress; import java.net.URI; @@ -31,6 +32,9 @@ import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.TimeZone; +import java.util.concurrent.DelayQueue; +import java.util.concurrent.Delayed; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -45,9 +49,9 @@ import org.apache.hadoop.fs.MD5MD5CRC32F import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenRenewer; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.server.namenode.JspHelper; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.StreamFile; import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher; import org.apache.hadoop.io.Text; @@ -74,16 +78,13 @@ import org.xml.sax.helpers.XMLReaderFact * @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet * @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet */ -public class HftpFileSystem extends FileSystem - implements DelegationTokenRenewer.Renewable { - private static final DelegationTokenRenewer dtRenewer - = new DelegationTokenRenewer(HftpFileSystem.class); - +public class HftpFileSystem extends FileSystem { static { HttpURLConnection.setFollowRedirects(true); - dtRenewer.start(); } + public static final int DEFAULT_PORT = 50070; + public static final int DEFAULT_SECURE_PORT = 50470; public static final Text TOKEN_KIND = new Text("HFTP delegation"); protected UserGroupInformation ugi; @@ -94,12 +95,16 @@ public class HftpFileSystem extends File public static final String HFTP_TIMEZONE = "UTC"; public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ"; - private Token delegationToken; private Token renewToken; + public static final String HFTP_SERVICE_NAME_KEY = "hdfs.service.host_"; + private static final HftpDelegationTokenSelector hftpTokenSelector = new HftpDelegationTokenSelector(); + private static final DelegationTokenSelector hdfsTokenSelector = + new DelegationTokenSelector(); + public static final SimpleDateFormat getDateFormat() { final SimpleDateFormat df = new SimpleDateFormat(HFTP_DATE_FORMAT); df.setTimeZone(TimeZone.getTimeZone(HFTP_TIMEZONE)); @@ -115,13 +120,11 @@ public class HftpFileSystem extends File @Override protected int getDefaultPort() { - return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, - DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT); + return getConf().getInt("dfs.http.port", DEFAULT_PORT); } protected int getDefaultSecurePort() { - return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, - DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT); + return getConf().getInt("dfs.https.port", DEFAULT_SECURE_PORT); } protected InetSocketAddress getNamenodeAddr(URI uri) { @@ -174,7 +177,7 @@ public class HftpFileSystem extends File if (token != null) { setDelegationToken(token); if (createdToken) { - dtRenewer.addRenewAction(this); + renewer.addTokenToRenew(this); LOG.debug("Created new DT for " + token.getService()); } else { LOG.debug("Found existing DT for " + token.getService()); @@ -188,8 +191,26 @@ public class HftpFileSystem extends File } protected Token selectHdfsDelegationToken() { - return DelegationTokenSelector.selectHdfsDelegationToken( - nnAddr, ugi, getConf()); + // this guesses the remote cluster's rpc service port. + // the current token design assumes it's the same as the local cluster's + // rpc port unless a config key is set. there should be a way to automatic + // and correctly determine the value + String key = HftpFileSystem.HFTP_SERVICE_NAME_KEY+ + SecurityUtil.buildTokenService(nnSecureAddr); + String nnServiceName = getConf().get(key); + LOG.debug("Trying to find DT for " + getUri() + " using key=" + key + + "; conf=" + nnServiceName); + + int nnRpcPort = NameNode.DEFAULT_PORT; + if (nnServiceName != null) { + nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort(); + } + + InetSocketAddress addr = + NetUtils.makeSocketAddr(nnAddr.getHostName(), nnRpcPort); + Text serviceName = SecurityUtil.buildTokenService(addr); + + return hdfsTokenSelector.selectToken(serviceName, ugi.getTokens()); } private static URI createUri(String scheme, InetSocketAddress addr) { @@ -202,13 +223,7 @@ public class HftpFileSystem extends File return uri; } - @Override - public Token getRenewToken() { - return renewToken; - } - - @Override - public void setDelegationToken(Token token) { + protected void setDelegationToken(Token token) { renewToken = token; // emulate the 203 usage of the tokens // by setting the kind and service as if they were hdfs tokens @@ -660,6 +675,138 @@ public class HftpFileSystem extends File return cs != null? cs: super.getContentSummary(f); } + /** + * An action that will renew and replace the hftp file system's delegation + * tokens automatically. + */ + private static class RenewAction implements Delayed { + // when should the renew happen + private long timestamp; + // a weak reference to the file system so that it can be garbage collected + private final WeakReference weakFs; + + RenewAction(long timestamp, HftpFileSystem fs) { + this.timestamp = timestamp; + this.weakFs = new WeakReference(fs); + } + + /** + * Get the delay until this event should happen. + */ + @Override + public long getDelay(TimeUnit unit) { + long millisLeft = timestamp - System.currentTimeMillis(); + return unit.convert(millisLeft, TimeUnit.MILLISECONDS); + } + + /** + * Compare two events in the same queue. + */ + @Override + public int compareTo(Delayed o) { + if (o.getClass() != RenewAction.class) { + throw new IllegalArgumentException + ("Illegal comparision to non-RenewAction"); + } + RenewAction other = (RenewAction) o; + return timestamp < other.timestamp ? -1 : + (timestamp == other.timestamp ? 0 : 1); + } + + /** + * Set a new time for the renewal. Can only be called when the action + * is not in the queue. + * @param newTime the new time + */ + public void setNewTime(long newTime) { + timestamp = newTime; + } + + /** + * Renew or replace the delegation token for this file system. + * @return + * @throws IOException + */ + public boolean renew() throws IOException, InterruptedException { + final HftpFileSystem fs = weakFs.get(); + if (fs != null) { + synchronized (fs) { + try { + fs.renewToken.renew(fs.getConf()); + } catch (IOException ie) { + try { + fs.setDelegationToken(fs.getDelegationToken(null)); + } catch (IOException ie2) { + throw new IOException("Can't renew or get new delegation token ", + ie); + } + } + } + } + return fs != null; + } + + public String toString() { + StringBuilder result = new StringBuilder(); + HftpFileSystem fs = weakFs.get(); + if (fs == null) { + return "evaporated token renew"; + } + synchronized (fs) { + result.append(fs.delegationToken); + } + result.append(" renew in "); + result.append(getDelay(TimeUnit.SECONDS)); + result.append(" secs"); + return result.toString(); + } + } + + /** + * A daemon thread that waits for the next file system to renew. + */ + private static class RenewerThread extends Thread { + private DelayQueue queue = new DelayQueue(); + // wait for 95% of a day between renewals + private final int RENEW_CYCLE = (int) (0.95 * 24 * 60 * 60 * 1000); + + public RenewerThread() { + super("HFTP Delegation Token Renewer"); + setDaemon(true); + } + + public void addTokenToRenew(HftpFileSystem fs) { + queue.add(new RenewAction(RENEW_CYCLE + System.currentTimeMillis(), fs)); + } + + public void run() { + RenewAction action = null; + while (true) { + try { + action = queue.take(); + if (action.renew()) { + action.setNewTime(RENEW_CYCLE + System.currentTimeMillis()); + queue.add(action); + } + action = null; + } catch (InterruptedException ie) { + return; + } catch (Exception ie) { + if (action != null) { + LOG.warn("Failure to renew token " + action, ie); + } else { + LOG.warn("Failure in renew queue", ie); + } + } + } + } + } + + private static RenewerThread renewer = new RenewerThread(); + static { + renewer.start(); + } + @InterfaceAudience.Private public static class TokenManager extends TokenRenewer { Modified: hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java?rev=1179030&r1=1179029&r2=1179030&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java (original) +++ hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java Wed Oct 5 00:45:30 2011 @@ -22,6 +22,7 @@ package org.apache.hadoop.hdfs.security. import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; +import java.net.InetSocketAddress; import java.util.Iterator; import org.apache.commons.logging.Log; Modified: hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java?rev=1179030&r1=1179029&r2=1179030&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java (original) +++ hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java Wed Oct 5 00:45:30 2011 @@ -18,15 +18,6 @@ package org.apache.hadoop.hdfs.security.token.delegation; //import org.apache.hadoop.classification.InterfaceAudience; -import java.net.InetSocketAddress; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector; /** @@ -35,32 +26,8 @@ import org.apache.hadoop.security.token. //@InterfaceAudience.Private public class DelegationTokenSelector extends AbstractDelegationTokenSelector{ - private static final String SERVICE_NAME_KEY = "hdfs.service.host_"; - - private static final DelegationTokenSelector INSTANCE = new DelegationTokenSelector(); - - /** Select the delegation token for hdfs from the ugi. */ - public static Token selectHdfsDelegationToken( - final InetSocketAddress nnAddr, final UserGroupInformation ugi, - final Configuration conf) { - // this guesses the remote cluster's rpc service port. - // the current token design assumes it's the same as the local cluster's - // rpc port unless a config key is set. there should be a way to automatic - // and correctly determine the value - final String key = SERVICE_NAME_KEY + SecurityUtil.buildTokenService(nnAddr); - final String nnServiceName = conf.get(key); - - int nnRpcPort = NameNode.DEFAULT_PORT; - if (nnServiceName != null) { - nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort(); - } - - final Text serviceName = SecurityUtil.buildTokenService( - NetUtils.makeSocketAddr(nnAddr.getHostName(), nnRpcPort)); - return INSTANCE.selectToken(serviceName, ugi.getTokens()); - } - private DelegationTokenSelector() { + public DelegationTokenSelector() { super(DelegationTokenIdentifier.HDFS_DELEGATION_KIND); } } Modified: hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1179030&r1=1179029&r2=1179030&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original) +++ hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Wed Oct 5 00:45:30 2011 @@ -50,7 +50,6 @@ import org.apache.hadoop.hdfs.protocol.D import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.server.namenode.JspHelper; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -80,6 +79,7 @@ import org.apache.hadoop.hdfs.web.resour import org.apache.hadoop.hdfs.web.resources.ReplicationParam; import org.apache.hadoop.hdfs.web.resources.UriFsPathParam; import org.apache.hadoop.hdfs.web.resources.UserParam; +import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; @@ -137,8 +137,9 @@ public class NamenodeWebHdfsMethods { final Credentials c = DelegationTokenSecretManager.createCredentials( namenode, ugi, request.getUserPrincipal().getName()); final Token t = c.getAllTokens().iterator().next(); - t.setKind(WebHdfsFileSystem.TOKEN_KIND); - SecurityUtil.setTokenService(t, namenode.getNameNodeAddress()); + t.setService(new Text(SecurityUtil.buildDTServiceName( + NameNode.getUri(namenode.getNameNodeAddress()), + NameNode.DEFAULT_PORT))); return t; } @@ -266,21 +267,6 @@ public class NamenodeWebHdfsMethods { namenode.setTimes(fullpath, modificationTime.getValue(), accessTime.getValue()); return Response.ok().type(MediaType.APPLICATION_JSON).build(); } - case RENEWDELEGATIONTOKEN: - { - final Token token = new Token(); - token.decodeFromUrlString(delegation.getValue()); - final long expiryTime = namenode.renewDelegationToken(token); - final String js = JsonUtil.toJsonString(PutOpParam.Op.RENEWDELEGATIONTOKEN, expiryTime); - return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); - } - case CANCELDELEGATIONTOKEN: - { - final Token token = new Token(); - token.decodeFromUrlString(delegation.getValue()); - namenode.cancelDelegationToken(token); - return Response.ok().type(MediaType.APPLICATION_JSON).build(); - } default: throw new UnsupportedOperationException(op + " is not supported"); } Modified: hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1179030&r1=1179029&r2=1179030&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original) +++ hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Wed Oct 5 00:45:30 2011 @@ -24,11 +24,8 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.net.HttpURLConnection; -import java.net.InetSocketAddress; import java.net.URI; -import java.net.URISyntaxException; import java.net.URL; -import java.security.PrivilegedExceptionAction; import java.util.Map; import org.apache.hadoop.conf.Configuration; @@ -37,20 +34,16 @@ import org.apache.hadoop.fs.ContentSumma import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.ByteRangeInputStream; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.HftpFileSystem; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenRenewer; -import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; -import org.apache.hadoop.hdfs.server.namenode.JspHelper; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.web.resources.AccessTimeParam; import org.apache.hadoop.hdfs.web.resources.BlockSizeParam; @@ -73,46 +66,28 @@ import org.apache.hadoop.hdfs.web.resour import org.apache.hadoop.hdfs.web.resources.RenewerParam; import org.apache.hadoop.hdfs.web.resources.ReplicationParam; import org.apache.hadoop.hdfs.web.resources.UserParam; -import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.security.token.TokenRenewer; -import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector; import org.apache.hadoop.util.Progressable; import org.mortbay.util.ajax.JSON; /** A FileSystem for HDFS over the web. */ -public class WebHdfsFileSystem extends FileSystem - implements DelegationTokenRenewer.Renewable { +public class WebHdfsFileSystem extends HftpFileSystem { /** File System URI: {SCHEME}://namenode:port/path/to/file */ public static final String SCHEME = "webhdfs"; /** Http URI: http://namenode:port/{PATH_PREFIX}/path/to/file */ public static final String PATH_PREFIX = SCHEME; - /** SPNEGO authenticator */ private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator(); - /** Delegation token kind */ - public static final Text TOKEN_KIND = new Text("WEBHDFS delegation"); - - private static final DelegationTokenRenewer dtRenewer - = new DelegationTokenRenewer(WebHdfsFileSystem.class); - static { - dtRenewer.start(); - } private final UserGroupInformation ugi; - private InetSocketAddress nnAddr; - private Token delegationToken; - private Token renewToken; private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token(); - private Path workingDir; + protected Path workingDir; { try { @@ -128,57 +103,7 @@ public class WebHdfsFileSystem extends F super.initialize(uri, conf); setConf(conf); - this.nnAddr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort()); this.workingDir = getHomeDirectory(); - - if (UserGroupInformation.isSecurityEnabled()) { - initDelegationToken(); - } - } - - protected void initDelegationToken() throws IOException { - // look for webhdfs token, then try hdfs - final Text serviceName = SecurityUtil.buildTokenService(nnAddr); - Token token = webhdfspTokenSelector.selectToken( - serviceName, ugi.getTokens()); - if (token == null) { - token = DelegationTokenSelector.selectHdfsDelegationToken( - nnAddr, ugi, getConf()); - } - - //since we don't already have a token, go get one - boolean createdToken = false; - if (token == null) { - token = getDelegationToken(null); - createdToken = (token != null); - } - - // security might be disabled - if (token != null) { - setDelegationToken(token); - if (createdToken) { - dtRenewer.addRenewAction(this); - LOG.debug("Created new DT for " + token.getService()); - } else { - LOG.debug("Found existing DT for " + token.getService()); - } - } - } - - @Override - protected int getDefaultPort() { - return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, - DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT); - } - - @Override - public URI getUri() { - try { - return new URI(SCHEME, null, nnAddr.getHostName(), nnAddr.getPort(), - null, null, null); - } catch (URISyntaxException e) { - return null; - } } @Override @@ -243,7 +168,7 @@ public class WebHdfsFileSystem extends F * @return namenode URL referring to the given path * @throws IOException on error constructing the URL */ - private URL getNamenodeURL(String path, String query) throws IOException { + protected URL getNamenodeURL(String path, String query) throws IOException { final URL url = new URL("http", nnAddr.getHostName(), nnAddr.getPort(), path + '?' + query); if (LOG.isTraceEnabled()) { @@ -252,18 +177,6 @@ public class WebHdfsFileSystem extends F return url; } - private String addDt2Query(String query) throws IOException { - if (UserGroupInformation.isSecurityEnabled()) { - synchronized (this) { - if (delegationToken != null) { - final String encoded = delegationToken.encodeToUrlString(); - return query + JspHelper.getDelegationTokenUrlParam(encoded); - } // else we are talking to an insecure cluster - } - } - return query; - } - URL toUrl(final HttpOpParam.Op op, final Path fspath, final Param... parameters) throws IOException { //initialize URI path and query @@ -272,7 +185,7 @@ public class WebHdfsFileSystem extends F final String query = op.toQueryString() + '&' + new UserParam(ugi) + Param.toSortedString("&", parameters); - final URL url = getNamenodeURL(path, addDt2Query(query)); + final URL url = getNamenodeURL(path, updateQuery(query)); if (LOG.isTraceEnabled()) { LOG.trace("url=" + url); } @@ -456,11 +369,6 @@ public class WebHdfsFileSystem extends F } @Override - public boolean delete(final Path f) throws IOException { - return delete(f, true); - } - - @Override public boolean delete(Path f, boolean recursive) throws IOException { final HttpOpParam.Op op = DeleteOpParam.Op.DELETE; final Map json = run(op, f, new RecursiveParam(recursive)); @@ -504,41 +412,6 @@ public class WebHdfsFileSystem extends F } @Override - public Token getRenewToken() { - return renewToken; - } - - @Override - public synchronized void setDelegationToken( - final Token token) { - renewToken = token; - // emulate the 203 usage of the tokens - // by setting the kind and service as if they were hdfs tokens - delegationToken = new Token(token); - // NOTE: the remote nn must be configured to use hdfs - delegationToken.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND); - // no need to change service because we aren't exactly sure what it - // should be. we can guess, but it might be wrong if the local conf - // value is incorrect. the service is a client side field, so the remote - // end does not care about the value - } - - private synchronized long renewDelegationToken(final Token token - ) throws IOException { - delegationToken = token; - final HttpOpParam.Op op = PutOpParam.Op.RENEWDELEGATIONTOKEN; - final Map m = run(op, null); - return (Long)m.get(op.toString()); - } - - private synchronized void cancelDelegationToken(final Token token - ) throws IOException { - delegationToken = token; - final HttpOpParam.Op op = PutOpParam.Op.CANCELDELEGATIONTOKEN; - run(op, null); - } - - @Override public BlockLocation[] getFileBlockLocations(final FileStatus status, final long offset, final long length) throws IOException { if (status == null) { @@ -571,65 +444,4 @@ public class WebHdfsFileSystem extends F final Map m = run(op, p); return JsonUtil.toMD5MD5CRC32FileChecksum(m); } - - - private static final DtSelector webhdfspTokenSelector = new DtSelector(); - - private static class DtSelector - extends AbstractDelegationTokenSelector { - private DtSelector() { - super(TOKEN_KIND); - } - } - - /** Delegation token renewer. */ - public static class DtRenewer extends TokenRenewer { - @Override - public boolean handleKind(Text kind) { - return kind.equals(TOKEN_KIND); - } - - @Override - public boolean isManaged(Token token) throws IOException { - return true; - } - - @Override - public long renew(final Token token, final Configuration conf - ) throws IOException, InterruptedException { - final UserGroupInformation ugi = UserGroupInformation.getLoginUser(); - // update the kerberos credentials, if they are coming from a keytab - ugi.checkTGTAndReloginFromKeytab(); - - final String uri = WebHdfsFileSystem.SCHEME + "://" - + conf.get("dfs.http.address"); - final WebHdfsFileSystem webhdfs = ugi.doAs(new PrivilegedExceptionAction() { - @Override - public WebHdfsFileSystem run() throws Exception { - return (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf); - } - }); - - return webhdfs.renewDelegationToken(token); - } - - @Override - public void cancel(final Token token, final Configuration conf - ) throws IOException, InterruptedException { - final UserGroupInformation ugi = UserGroupInformation.getLoginUser(); - // update the kerberos credentials, if they are coming from a keytab - ugi.checkTGTAndReloginFromKeytab(); - - final String uri = WebHdfsFileSystem.SCHEME + "://" - + conf.get("dfs.http.address"); - final WebHdfsFileSystem webhdfs = ugi.doAs(new PrivilegedExceptionAction() { - @Override - public WebHdfsFileSystem run() throws Exception { - return (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf); - } - }); - - webhdfs.cancelDelegationToken(token); - } - } } \ No newline at end of file Modified: hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/web/resources/PutOpParam.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/web/resources/PutOpParam.java?rev=1179030&r1=1179029&r2=1179030&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/web/resources/PutOpParam.java (original) +++ hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/web/resources/PutOpParam.java Wed Oct 5 00:45:30 2011 @@ -33,9 +33,6 @@ public class PutOpParam extends HttpOpPa SETPERMISSION(false, HttpURLConnection.HTTP_OK), SETTIMES(false, HttpURLConnection.HTTP_OK), - RENEWDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK), - CANCELDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK), - NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED); final boolean doOutput; Modified: hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/hdfs/TestHftpFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/hdfs/TestHftpFileSystem.java?rev=1179030&r1=1179029&r2=1179030&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/hdfs/TestHftpFileSystem.java (original) +++ hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/hdfs/TestHftpFileSystem.java Wed Oct 5 00:45:30 2011 @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs; +import static org.junit.Assert.*; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; import java.io.IOException; import java.net.URI; @@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configurat import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.junit.Before; @@ -52,15 +53,15 @@ public class TestHftpFileSystem { URI uri = URI.create("hftp://localhost"); HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf); - assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort()); - assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort()); + assertEquals(HftpFileSystem.DEFAULT_PORT, fs.getDefaultPort()); + assertEquals(HftpFileSystem.DEFAULT_SECURE_PORT, fs.getDefaultSecurePort()); URI fsUri = fs.getUri(); assertEquals(uri.getHost(), fsUri.getHost()); - assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fsUri.getPort()); + assertEquals(HftpFileSystem.DEFAULT_PORT, fsUri.getPort()); assertEquals( - "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, + "127.0.0.1:"+HftpFileSystem.DEFAULT_SECURE_PORT, fs.getCanonicalServiceName() ); } @@ -93,15 +94,15 @@ public class TestHftpFileSystem { URI uri = URI.create("hftp://localhost:123"); HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf); - assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort()); - assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort()); + assertEquals(HftpFileSystem.DEFAULT_PORT, fs.getDefaultPort()); + assertEquals(HftpFileSystem.DEFAULT_SECURE_PORT, fs.getDefaultSecurePort()); URI fsUri = fs.getUri(); assertEquals(uri.getHost(), fsUri.getHost()); assertEquals(uri.getPort(), fsUri.getPort()); assertEquals( - "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, + "127.0.0.1:"+HftpFileSystem.DEFAULT_SECURE_PORT, fs.getCanonicalServiceName() ); } @@ -136,15 +137,15 @@ public class TestHftpFileSystem { URI uri = URI.create("hsftp://localhost"); HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf); - assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort()); - assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort()); + assertEquals(HsftpFileSystem.DEFAULT_SECURE_PORT, fs.getDefaultPort()); + assertEquals(HsftpFileSystem.DEFAULT_SECURE_PORT, fs.getDefaultSecurePort()); URI fsUri = fs.getUri(); assertEquals(uri.getHost(), fsUri.getHost()); - assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fsUri.getPort()); + assertEquals(HsftpFileSystem.DEFAULT_SECURE_PORT, fsUri.getPort()); assertEquals( - "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, + "127.0.0.1:"+HsftpFileSystem.DEFAULT_SECURE_PORT, fs.getCanonicalServiceName() ); } @@ -177,8 +178,8 @@ public class TestHftpFileSystem { URI uri = URI.create("hsftp://localhost:123"); HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf); - assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort()); - assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort()); + assertEquals(HsftpFileSystem.DEFAULT_SECURE_PORT, fs.getDefaultPort()); + assertEquals(HsftpFileSystem.DEFAULT_SECURE_PORT, fs.getDefaultSecurePort()); URI fsUri = fs.getUri(); assertEquals(uri.getHost(), fsUri.getHost()); @@ -234,13 +235,13 @@ public class TestHftpFileSystem { @SuppressWarnings("unchecked") @Override - public void setDelegationToken(Token token) { + protected void setDelegationToken(Token token) { gotToken = (Token) token; } } static Token makeDummyToken(String kind) { - Token token = new Token(); + Token token = new Token(); token.setKind(new Text(kind)); return token; }