Return-Path: Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: (qmail 40140 invoked from network); 3 Jun 2010 17:49:28 -0000 Received: from unknown (HELO mail.apache.org) (140.211.11.3) by 140.211.11.9 with SMTP; 3 Jun 2010 17:49:28 -0000 Received: (qmail 59862 invoked by uid 500); 3 Jun 2010 17:49:27 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 59821 invoked by uid 500); 3 Jun 2010 17:49:27 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 59813 invoked by uid 99); 3 Jun 2010 17:49:27 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 03 Jun 2010 17:49:27 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=10.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 03 Jun 2010 17:49:22 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id 59EAA23888EA; Thu, 3 Jun 2010 17:48:59 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: svn commit: r951086 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/java/org/apache/hadoop/hdfs/server/protocol/ src/java/org/apache/hadoop/hdfs... Date: Thu, 03 Jun 2010 17:48:59 -0000 To: hdfs-commits@hadoop.apache.org From: boryas@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20100603174859.59EAA23888EA@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: boryas Date: Thu Jun 3 17:48:58 2010 New Revision: 951086 URL: http://svn.apache.org/viewvc?rev=951086&view=rev Log: HDFS-1096. allow dfsadmin/mradmin refresh of superuser proxy group mappings Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestRefreshUserMappings.java Removed: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestGroupMappingServiceRefresh.java Modified: hadoop/hdfs/trunk/CHANGES.txt hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocols.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Modified: hadoop/hdfs/trunk/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=951086&r1=951085&r2=951086&view=diff ============================================================================== --- hadoop/hdfs/trunk/CHANGES.txt (original) +++ hadoop/hdfs/trunk/CHANGES.txt Thu Jun 3 17:48:58 2010 @@ -9,6 +9,9 @@ Trunk (unreleased changes) IMPROVEMENTS + HDFS-1096. allow dfsadmin/mradmin refresh of superuser proxy group + mappings (boryas) + HDFS-1146. Javadoc for getDelegationTokenSecretManager in FSNamesystem (jnp via boryas) HDFS-1132. Refactor TestFileStatus (Eli Collins via cos) Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java?rev=951086&r1=951085&r2=951086&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java Thu Jun 3 17:48:58 2010 @@ -22,7 +22,7 @@ import org.apache.hadoop.hdfs.protocol.C import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; -import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol; +import org.apache.hadoop.security.RefreshUserMappingsProtocol; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; import org.apache.hadoop.security.authorize.Service; @@ -42,8 +42,8 @@ public class HDFSPolicyProvider extends new Service("security.namenode.protocol.acl", NamenodeProtocol.class), new Service("security.refresh.policy.protocol.acl", RefreshAuthorizationPolicyProtocol.class), - new Service("security.refresh.usertogroups.mappings.protocol.acl", - RefreshUserToGroupMappingsProtocol.class), + new Service("security.refresh.user.mappings.protocol.acl", + RefreshUserMappingsProtocol.class), }; @Override Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=951086&r1=951085&r2=951086&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Thu Jun 3 17:48:58 2010 @@ -599,7 +599,7 @@ public interface ClientProtocol extends *
  • 2 get safe mode state.
  • * @return
    • 0 if the safe mode is OFF or
    • *
    • 1 if the safe mode is ON.
    - ¥ + * * @throws IOException */ public boolean setSafeMode(FSConstants.SafeModeAction action) Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=951086&r1=951085&r2=951086&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Thu Jun 3 17:48:58 2010 @@ -31,17 +31,19 @@ import org.apache.commons.logging.LogFac import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; -import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Trash; -import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HDFSPolicyProvider; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.ClientProtocol; @@ -55,6 +57,7 @@ import org.apache.hadoop.hdfs.protocol.L import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; @@ -71,9 +74,6 @@ import org.apache.hadoop.hdfs.server.pro import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NodeRegistration; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; @@ -84,14 +84,14 @@ import org.apache.hadoop.net.NetworkTopo import org.apache.hadoop.net.Node; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.Groups; -import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol; +import org.apache.hadoop.security.RefreshUserMappingsProtocol; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.SecretManager.InvalidToken; -import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.util.ServicePlugin; import org.apache.hadoop.util.StringUtils; @@ -145,8 +145,8 @@ public class NameNode implements Namenod return NamenodeProtocol.versionID; } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){ return RefreshAuthorizationPolicyProtocol.versionID; - } else if (protocol.equals(RefreshUserToGroupMappingsProtocol.class.getName())){ - return RefreshUserToGroupMappingsProtocol.versionID; + } else if (protocol.equals(RefreshUserMappingsProtocol.class.getName())){ + return RefreshUserMappingsProtocol.versionID; } else { throw new IOException("Unknown protocol to name node: " + protocol); } @@ -1272,6 +1272,13 @@ public class NameNode implements Namenod Groups.getUserToGroupsMappingService(conf).refresh(); } + @Override + public void refreshSuperUserGroupsConfiguration(Configuration conf) { + LOG.info("Refreshing SuperUser proxy group mapping list "); + + ProxyUsers.refreshSuperUserGroupsConfiguration(conf); + } + private static void printUsage() { System.err.println( "Usage: java NameNode [" + Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocols.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocols.java?rev=951086&r1=951085&r2=951086&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocols.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocols.java Thu Jun 3 17:48:58 2010 @@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.pr import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; -import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol; +import org.apache.hadoop.security.RefreshUserMappingsProtocol; /** The full set of RPC methods implemented by the Namenode. */ public interface NamenodeProtocols @@ -28,5 +28,5 @@ public interface NamenodeProtocols DatanodeProtocol, NamenodeProtocol, RefreshAuthorizationPolicyProtocol, - RefreshUserToGroupMappingsProtocol { + RefreshUserMappingsProtocol { } Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=951086&r1=951085&r2=951086&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Thu Jun 3 17:48:58 2010 @@ -44,7 +44,7 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol; +import org.apache.hadoop.security.RefreshUserMappingsProtocol; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; import org.apache.hadoop.util.StringUtils; @@ -475,6 +475,7 @@ public class DFSAdmin extends FsShell { "\t[" + ClearSpaceQuotaCommand.USAGE +"]\n" + "\t[-refreshServiceAcl]\n" + "\t[-refreshUserToGroupsMappings]\n" + + "\t[refreshSuperUserGroupsConfiguration]\n" + "\t[-printTopology]\n" + "\t[-help [cmd]]\n"; @@ -532,6 +533,9 @@ public class DFSAdmin extends FsShell { String refreshUserToGroupsMappings = "-refreshUserToGroupsMappings: Refresh user-to-groups mappings\n"; + String refreshSuperUserGroupsConfiguration = + "-refreshSuperUserGroupsConfiguration: Refresh superuser proxy groups mappings\n"; + String printTopology = "-printTopology: Print a tree of the racks and their\n" + "\t\tnodes as reported by the Namenode\n"; @@ -566,6 +570,8 @@ public class DFSAdmin extends FsShell { System.out.println(refreshServiceAcl); } else if ("refreshUserToGroupsMappings".equals(cmd)) { System.out.println(refreshUserToGroupsMappings); + } else if ("refreshSuperUserGroupsConfiguration".equals(cmd)) { + System.out.println(refreshSuperUserGroupsConfiguration); } else if ("printTopology".equals(cmd)) { System.out.println(printTopology); } else if ("help".equals(cmd)) { @@ -585,6 +591,8 @@ public class DFSAdmin extends FsShell { System.out.println(SetSpaceQuotaCommand.DESCRIPTION); System.out.println(ClearSpaceQuotaCommand.DESCRIPTION); System.out.println(refreshServiceAcl); + System.out.println(refreshUserToGroupsMappings); + System.out.println(refreshSuperUserGroupsConfiguration); System.out.println(printTopology); System.out.println(help); System.out.println(); @@ -767,20 +775,51 @@ public class DFSAdmin extends FsShell { conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "")); // Create the client - RefreshUserToGroupMappingsProtocol refreshProtocol = - (RefreshUserToGroupMappingsProtocol) - RPC.getProxy(RefreshUserToGroupMappingsProtocol.class, - RefreshUserToGroupMappingsProtocol.versionID, - NameNode.getAddress(conf), getUGI(), conf, - NetUtils.getSocketFactory(conf, - RefreshUserToGroupMappingsProtocol.class)); - + RefreshUserMappingsProtocol refreshProtocol = + (RefreshUserMappingsProtocol) + RPC.getProxy(RefreshUserMappingsProtocol.class, + RefreshUserMappingsProtocol.versionID, + NameNode.getAddress(conf), getUGI(), conf, + NetUtils.getSocketFactory(conf, + RefreshUserMappingsProtocol.class)); + // Refresh the user-to-groups mappings refreshProtocol.refreshUserToGroupsMappings(conf); return 0; } + + /** + * refreshSuperUserGroupsConfiguration {@link NameNode}. + * @return exitcode 0 on success, non-zero on failure + * @throws IOException + */ + public int refreshSuperUserGroupsConfiguration() throws IOException { + // Get the current configuration + Configuration conf = getConf(); + + // for security authorization + // server principal for this call + // should be NAMENODE's one. + conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, + conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "")); + + // Create the client + RefreshUserMappingsProtocol refreshProtocol = + (RefreshUserMappingsProtocol) + RPC.getProxy(RefreshUserMappingsProtocol.class, + RefreshUserMappingsProtocol.versionID, + NameNode.getAddress(conf), getUGI(), conf, + NetUtils.getSocketFactory(conf, + RefreshUserMappingsProtocol.class)); + + // Refresh the user-to-groups mappings + refreshProtocol.refreshSuperUserGroupsConfiguration(conf); + + return 0; + } + /** * Displays format of commands. * @param cmd The command that is being executed. @@ -828,6 +867,9 @@ public class DFSAdmin extends FsShell { } else if ("-refreshUserToGroupsMappings".equals(cmd)) { System.err.println("Usage: java DFSAdmin" + " [-refreshUserToGroupsMappings]"); + } else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [-refreshSuperUserGroupsConfiguration]"); } else if ("-printTopology".equals(cmd)) { System.err.println("Usage: java DFSAdmin" + " [-printTopology]"); @@ -843,6 +885,7 @@ public class DFSAdmin extends FsShell { System.err.println(" [-metasave filename]"); System.err.println(" [-refreshServiceAcl]"); System.err.println(" [-refreshUserToGroupsMappings]"); + System.err.println(" [-refreshSuperUserGroupsConfiguration]"); System.err.println(" [-printTopology]"); System.err.println(" ["+SetQuotaCommand.USAGE+"]"); System.err.println(" ["+ClearQuotaCommand.USAGE+"]"); @@ -973,6 +1016,8 @@ public class DFSAdmin extends FsShell { exitCode = refreshServiceAcl(); } else if ("-refreshUserToGroupsMappings".equals(cmd)) { exitCode = refreshUserToGroupsMappings(); + } else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) { + exitCode = refreshSuperUserGroupsConfiguration(); } else if ("-printTopology".equals(cmd)) { exitCode = printTopology(); } else if ("-help".equals(cmd)) { Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestRefreshUserMappings.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestRefreshUserMappings.java?rev=951086&view=auto ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestRefreshUserMappings.java (added) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestRefreshUserMappings.java Thu Jun 3 17:48:58 2010 @@ -0,0 +1,202 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.security; + + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.tools.DFSAdmin; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + + +public class TestRefreshUserMappings { + private MiniDFSCluster cluster; + Configuration config; + private static long groupRefreshTimeoutSec = 1; + + public static class MockUnixGroupsMapping implements GroupMappingServiceProvider { + private int i=0; + + @Override + public List getGroups(String user) throws IOException { + System.out.println("Getting groups in MockUnixGroupsMapping"); + String g1 = user + (10 * i + 1); + String g2 = user + (10 * i + 2); + List l = new ArrayList(2); + l.add(g1); + l.add(g2); + i++; + return l; + } + } + + @Before + public void setUp() throws Exception { + config = new Configuration(); + config.setClass("hadoop.security.group.mapping", + TestRefreshUserMappings.MockUnixGroupsMapping.class, + GroupMappingServiceProvider.class); + config.setLong("hadoop.security.groups.cache.secs", groupRefreshTimeoutSec); + Groups.getUserToGroupsMappingService(config); + + FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0"); + cluster = new MiniDFSCluster(0, config, 1, true, true, true, null, null, null, null); + cluster.waitActive(); + } + + @After + public void tearDown() throws Exception { + if(cluster!=null) { + cluster.shutdown(); + } + } + + @Test + public void testGroupMappingRefresh() throws Exception { + DFSAdmin admin = new DFSAdmin(config); + String [] args = new String[]{"-refreshUserToGroupsMappings"}; + Groups groups = Groups.getUserToGroupsMappingService(config); + String user = UserGroupInformation.getCurrentUser().getUserName(); + System.out.println("first attempt:"); + List g1 = groups.getGroups(user); + String [] str_groups = new String [g1.size()]; + g1.toArray(str_groups); + System.out.println(Arrays.toString(str_groups)); + + System.out.println("second attempt, should be same:"); + List g2 = groups.getGroups(user); + g2.toArray(str_groups); + System.out.println(Arrays.toString(str_groups)); + for(int i=0; i g3 = groups.getGroups(user); + g3.toArray(str_groups); + System.out.println(Arrays.toString(str_groups)); + for(int i=0; i g4 = groups.getGroups(user); + g4.toArray(str_groups); + System.out.println(Arrays.toString(str_groups)); + for(int i=0; i