hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jiten...@apache.org
Subject git commit: HDFS-6904. YARN unable to renew delegation token fetched via webhdfs due to incorrect service port.
Date Fri, 24 Oct 2014 18:43:47 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 13c60bac5 -> b576890f3


HDFS-6904. YARN unable to renew delegation token fetched via webhdfs due to incorrect service
port.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b576890f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b576890f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b576890f

Branch: refs/heads/branch-2
Commit: b576890f35ce3f2b09795c6385116b349acf9158
Parents: 13c60ba
Author: Jitendra Pandey <Jitendra@Jitendra-Pandeys-MacBook-Pro-4.local>
Authored: Fri Oct 24 11:35:19 2014 -0700
Committer: Jitendra Pandey <Jitendra@Jitendra-Pandeys-MacBook-Pro-4.local>
Committed: Fri Oct 24 11:36:54 2014 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../web/resources/NamenodeWebHdfsMethods.java   | 73 ++++++++------------
 .../hadoop-hdfs/src/site/apt/WebHDFS.apt.vm     | 47 ++++++++++++-
 .../hadoop/hdfs/web/TestWebHdfsTokens.java      | 70 +++++++++++++++++--
 4 files changed, 142 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b576890f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f08112c..bbb2df7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -372,6 +372,9 @@ Release 2.6.0 - UNRELEASED
     HDFS-7228. Add an SSD policy into the default BlockStoragePolicySuite.
     (jing9)
 
+    HDFS-6904. YARN unable to renew delegation token fetched via webhdfs 
+    due to incorrect service port. (jitendra)
+
   OPTIMIZATIONS
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b576890f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index f8c0fc2..e688bb6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -75,44 +75,7 @@ import org.apache.hadoop.hdfs.web.JsonUtil;
 import org.apache.hadoop.hdfs.web.ParamFilter;
 import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
-import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
-import org.apache.hadoop.hdfs.web.resources.AclPermissionParam;
-import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
-import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
-import org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam;
-import org.apache.hadoop.hdfs.web.resources.CreateParentParam;
-import org.apache.hadoop.hdfs.web.resources.DelegationParam;
-import org.apache.hadoop.hdfs.web.resources.DeleteOpParam;
-import org.apache.hadoop.hdfs.web.resources.DestinationParam;
-import org.apache.hadoop.hdfs.web.resources.DoAsParam;
-import org.apache.hadoop.hdfs.web.resources.ExcludeDatanodesParam;
-import org.apache.hadoop.hdfs.web.resources.GetOpParam;
-import org.apache.hadoop.hdfs.web.resources.GroupParam;
-import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
-import org.apache.hadoop.hdfs.web.resources.LengthParam;
-import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
-import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam;
-import org.apache.hadoop.hdfs.web.resources.OffsetParam;
-import org.apache.hadoop.hdfs.web.resources.OldSnapshotNameParam;
-import org.apache.hadoop.hdfs.web.resources.OverwriteParam;
-import org.apache.hadoop.hdfs.web.resources.OwnerParam;
-import org.apache.hadoop.hdfs.web.resources.Param;
-import org.apache.hadoop.hdfs.web.resources.PermissionParam;
-import org.apache.hadoop.hdfs.web.resources.PostOpParam;
-import org.apache.hadoop.hdfs.web.resources.PutOpParam;
-import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
-import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
-import org.apache.hadoop.hdfs.web.resources.RenewerParam;
-import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
-import org.apache.hadoop.hdfs.web.resources.SnapshotNameParam;
-import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
-import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
-import org.apache.hadoop.hdfs.web.resources.UserParam;
-import org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam;
-import org.apache.hadoop.hdfs.web.resources.XAttrNameParam;
-import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam;
-import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
-import org.apache.hadoop.hdfs.web.resources.FsActionParam;
+import org.apache.hadoop.hdfs.web.resources.*;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.Server;
@@ -758,10 +721,15 @@ public class NamenodeWebHdfsMethods {
       @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
           final ExcludeDatanodesParam excludeDatanodes,
       @QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT)
-          final FsActionParam fsAction
+          final FsActionParam fsAction,
+      @QueryParam(TokenKindParam.NAME) @DefaultValue(TokenKindParam.DEFAULT)
+          final TokenKindParam tokenKind,
+      @QueryParam(TokenServiceParam.NAME) @DefaultValue(TokenServiceParam.DEFAULT)
+          final TokenServiceParam tokenService
       ) throws IOException, InterruptedException {
     return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
-        renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes, fsAction);
+        renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes, fsAction,
+        tokenKind, tokenService);
   }
 
   /** Handle HTTP GET request. */
@@ -794,11 +762,16 @@ public class NamenodeWebHdfsMethods {
       @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
           final ExcludeDatanodesParam excludeDatanodes,
       @QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT)
-          final FsActionParam fsAction
+          final FsActionParam fsAction,
+      @QueryParam(TokenKindParam.NAME) @DefaultValue(TokenKindParam.DEFAULT)
+          final TokenKindParam tokenKind,
+      @QueryParam(TokenServiceParam.NAME) @DefaultValue(TokenServiceParam.DEFAULT)
+          final TokenServiceParam tokenService
       ) throws IOException, InterruptedException {
 
     init(ugi, delegation, username, doAsUser, path, op, offset, length,
-        renewer, bufferSize, xattrEncoding, excludeDatanodes, fsAction);
+        renewer, bufferSize, xattrEncoding, excludeDatanodes, fsAction,
+        tokenKind, tokenService);
 
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
@@ -806,7 +779,8 @@ public class NamenodeWebHdfsMethods {
         try {
           return get(ugi, delegation, username, doAsUser,
               path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
-              xattrNames, xattrEncoding, excludeDatanodes, fsAction);
+              xattrNames, xattrEncoding, excludeDatanodes, fsAction, tokenKind,
+              tokenService);
         } finally {
           reset();
         }
@@ -828,7 +802,9 @@ public class NamenodeWebHdfsMethods {
       final List<XAttrNameParam> xattrNames,
       final XAttrEncodingParam xattrEncoding,
       final ExcludeDatanodesParam excludeDatanodes,
-      final FsActionParam fsAction
+      final FsActionParam fsAction,
+      final TokenKindParam tokenKind,
+      final TokenServiceParam tokenService
       ) throws IOException, URISyntaxException {
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
     final NamenodeProtocols np = getRPCServer(namenode);
@@ -885,6 +861,15 @@ public class NamenodeWebHdfsMethods {
       }
       final Token<? extends TokenIdentifier> token = generateDelegationToken(
           namenode, ugi, renewer.getValue());
+
+      final String setServiceName = tokenService.getValue();
+      final String setKind = tokenKind.getValue();
+      if (setServiceName != null) {
+        token.setService(new Text(setServiceName));
+      }
+      if (setKind != null) {
+        token.setKind(new Text(setKind));
+      }
       final String js = JsonUtil.toJsonString(token);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b576890f/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
index c3f6a6b..54cd2ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
@@ -1210,7 +1210,7 @@ Content-Length: 0
   * Submit a HTTP GET request.
 
 +---------------------------------
-curl -i "http://<HOST>:<PORT>/webhdfs/v1/?op=GETDELEGATIONTOKEN&renewer=<USER>"
+curl -i "http://<HOST>:<PORT>/webhdfs/v1/?op=GETDELEGATIONTOKEN&renewer=<USER>&service=<SERVICE>&kind=<KIND>"
 +---------------------------------
 
   The client receives a response with a {{{Token JSON Schema}<<<Token>>>
JSON object}}:
@@ -1232,7 +1232,10 @@ Transfer-Encoding: chunked
 
   See also:
   {{{Renewer}<<<renewer>>>}},
-   {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getDelegationToken
+   {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getDelegationToken,
+  {{{Token Kind}<<<kind>>>}},
+  {{{Token Service}<<<service>>>}}
+
 
 
 ** {Get Delegation Tokens}
@@ -2518,6 +2521,46 @@ var tokenProperties =
   {{{Cancel Delegation Token}<<<CANCELDELEGATIONTOKEN>>>}}
 
 
+** {Token Kind}
+
+*----------------+-------------------------------------------------------------------+
+|| Name          | <<<kind>>> |
+*----------------+-------------------------------------------------------------------+
+|| Description   | The kind of the delegation token requested |
+*----------------+-------------------------------------------------------------------+
+|| Type          | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | \<empty\> (Server sets the default kind for the service) |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values  | A string that represents token kind e.g "HDFS_DELEGATION_TOKEN" or "WEBHDFS
delegation" |
+*----------------+-------------------------------------------------------------------+
+|| Syntax        | Any string. |
+*----------------+-------------------------------------------------------------------+
+
+  See also:
+  {{{Get Delegation Token}<<<GETDELEGATIONTOKEN>>>}}
+
+
+** {Token Service}
+
+*----------------+-------------------------------------------------------------------+
+|| Name          | <<<service>>> |
+*----------------+-------------------------------------------------------------------+
+|| Description   | The name of the service where the token is supposed to be used, e.g. ip:port
of the namenode |
+*----------------+-------------------------------------------------------------------+
+|| Type          | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | \<empty\> |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values  | ip:port in string format or logical name of the service |
+*----------------+-------------------------------------------------------------------+
+|| Syntax        | Any string. |
+*----------------+-------------------------------------------------------------------+
+
+  See also:
+  {{{Get Delegation Token}<<<GETDELEGATIONTOKEN>>>}}
+
+
 ** {Username}
 
 *----------------+-------------------------------------------------------------------+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b576890f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
index 4c8f125..eb16259 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
@@ -28,10 +28,15 @@ import static org.mockito.Mockito.*;
 import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
+import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
 import java.net.URI;
+import java.net.URL;
+import java.net.URLConnection;
 import java.security.PrivilegedExceptionAction;
+import java.util.Map;
 
+import org.apache.commons.httpclient.HttpConnection;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -41,22 +46,21 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.web.resources.DeleteOpParam;
-import org.apache.hadoop.hdfs.web.resources.GetOpParam;
-import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
-import org.apache.hadoop.hdfs.web.resources.PostOpParam;
-import org.apache.hadoop.hdfs.web.resources.PutOpParam;
+import org.apache.hadoop.hdfs.web.resources.*;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
 
 public class TestWebHdfsTokens {
   private static Configuration conf;
@@ -234,6 +238,62 @@ public class TestWebHdfsTokens {
         }
      }
   }
+
+  @Test
+  public void testSetTokenServiceAndKind() throws Exception {
+    MiniDFSCluster cluster = null;
+
+    try {
+      final Configuration clusterConf = new HdfsConfiguration(conf);
+      SecurityUtil.setAuthenticationMethod(SIMPLE, clusterConf);
+      clusterConf.setBoolean(DFSConfigKeys
+              .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+
+      // trick the NN into thinking s[ecurity is enabled w/o it trying
+      // to login from a keytab
+      UserGroupInformation.setConfiguration(clusterConf);
+      cluster = new MiniDFSCluster.Builder(clusterConf).numDataNodes(0).build();
+      cluster.waitActive();
+      SecurityUtil.setAuthenticationMethod(KERBEROS, clusterConf);
+      final WebHdfsFileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem
+              (clusterConf, "webhdfs");
+      Whitebox.setInternalState(fs, "canRefreshDelegationToken", true);
+
+      URLConnectionFactory factory = new URLConnectionFactory(new ConnectionConfigurator()
{
+        @Override
+        public HttpURLConnection configure(HttpURLConnection conn)
+                throws IOException {
+          return conn;
+        }
+      }) {
+        @Override
+        public URLConnection openConnection(URL url) throws IOException {
+          return super.openConnection(new URL(url + "&service=foo&kind=bar"));
+        }
+      };
+      Whitebox.setInternalState(fs, "connectionFactory", factory);
+      Token<?> token1 = fs.getDelegationToken();
+      Assert.assertEquals(new Text("bar"), token1.getKind());
+
+      final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
+      Token<DelegationTokenIdentifier> token2 =
+          fs.new FsPathResponseRunner<Token<DelegationTokenIdentifier>>(
+              op, null, new RenewerParam(null)) {
+            @Override
+            Token<DelegationTokenIdentifier> decodeResponse(Map<?, ?> json)
+                throws IOException {
+              return JsonUtil.toDelegationToken(json);
+            }
+          }.run();
+
+      Assert.assertEquals(new Text("bar"), token2.getKind());
+      Assert.assertEquals(new Text("foo"), token2.getService());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
   
   @SuppressWarnings("unchecked")
   private void validateLazyTokenFetch(final Configuration clusterConf) throws Exception{


Mime
View raw message