Return-Path: Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: (qmail 17901 invoked from network); 18 Dec 2009 17:48:36 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.3) by minotaur.apache.org with SMTP; 18 Dec 2009 17:48:36 -0000 Received: (qmail 85056 invoked by uid 500); 18 Dec 2009 17:48:36 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 85009 invoked by uid 500); 18 Dec 2009 17:48:36 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 84999 invoked by uid 99); 18 Dec 2009 17:48:36 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 18 Dec 2009 17:48:36 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=10.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 18 Dec 2009 17:48:26 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id 1255F23889D2; Fri, 18 Dec 2009 17:48:04 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r892323 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/java/org/apache/hadoop/hdfs/tools/ src/test/hdfs/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoop/security/ Date: Fri, 18 Dec 2009 17:48:03 -0000 To: hdfs-commits@hadoop.apache.org From: boryas@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20091218174804.1255F23889D2@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: boryas Date: Fri Dec 18 17:48:03 2009 New Revision: 892323 URL: http://svn.apache.org/viewvc?rev=892323&view=rev Log: HDFS-685. Use the user-to-groups mapping service in the NameNode. (boryas, acmurthy) Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestGroupMappingServiceRefresh.java Modified: hadoop/hdfs/trunk/CHANGES.txt hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Modified: hadoop/hdfs/trunk/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=892323&r1=892322&r2=892323&view=diff ============================================================================== --- hadoop/hdfs/trunk/CHANGES.txt (original) +++ hadoop/hdfs/trunk/CHANGES.txt Fri Dec 18 17:48:03 2009 @@ -41,6 +41,8 @@ HDFS-840. Change tests to use FileContext test helper introduced in HADOOP-6394. (Jitendra Nath Pandey via suresh) + HDFS-685. Use the user-to-groups mapping service in the NameNode. (boryas, acmurthy) + OPTIMIZATIONS BUG FIXES Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java?rev=892323&r1=892322&r2=892323&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java Fri Dec 18 17:48:03 2009 @@ -22,6 +22,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; +import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; import org.apache.hadoop.security.authorize.Service; @@ -41,6 +42,8 @@ new Service("security.namenode.protocol.acl", NamenodeProtocol.class), new Service("security.refresh.policy.protocol.acl", RefreshAuthorizationPolicyProtocol.class), + new Service("security.refresh.usertogroups.mappings.protocol.acl", + RefreshUserToGroupMappingsProtocol.class), }; @Override Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=892323&r1=892322&r2=892323&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Dec 18 17:48:03 2009 @@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.security.ExportedAccessKeys; +import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; @@ -123,7 +124,8 @@ **********************************************************/ public class NameNode implements ClientProtocol, DatanodeProtocol, NamenodeProtocol, FSConstants, - RefreshAuthorizationPolicyProtocol { + RefreshAuthorizationPolicyProtocol, + RefreshUserToGroupMappingsProtocol { static{ Configuration.addDefaultResource("hdfs-default.xml"); Configuration.addDefaultResource("hdfs-site.xml"); @@ -139,6 +141,8 @@ return NamenodeProtocol.versionID; } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){ return RefreshAuthorizationPolicyProtocol.versionID; + } else if (protocol.equals(RefreshUserToGroupMappingsProtocol.class.getName())){ + return RefreshUserToGroupMappingsProtocol.versionID; } else { throw new IOException("Unknown protocol to name node: " + protocol); } @@ -1160,6 +1164,13 @@ SecurityUtil.getPolicy().refresh(); } + @Override + public void refreshUserToGroupsMappings(Configuration conf) throws IOException { + LOG.info("Refreshing all user-to-groups mappings. Requested by user: " + + UserGroupInformation.getCurrentUGI().getUserName()); + SecurityUtil.getUserToGroupsMappingService(conf).refresh(); + } + private static void printUsage() { System.err.println( "Usage: java NameNode [" + Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=892323&r1=892322&r2=892323&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Fri Dec 18 17:48:03 2009 @@ -44,6 +44,7 @@ import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol; import org.apache.hadoop.security.UnixUserGroupInformation; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; import org.apache.hadoop.util.StringUtils; @@ -473,6 +474,7 @@ "\t[" + SetSpaceQuotaCommand.USAGE + "]\n" + "\t[" + ClearSpaceQuotaCommand.USAGE +"]\n" + "\t[-refreshServiceAcl]\n" + + "\t[-refreshUserToGroupsMappings]\n" + "\t[-printTopology]\n" + "\t[-help [cmd]]\n"; @@ -527,6 +529,9 @@ String refreshServiceAcl = "-refreshServiceAcl: Reload the service-level authorization policy file\n" + "\t\tNamenode will reload the authorization policy file.\n"; + String refreshUserToGroupsMappings = + "-refreshUserToGroupsMappings: Refresh user-to-groups mappings\n"; + String printTopology = "-printTopology: Print a tree of the racks and their\n" + "\t\tnodes as reported by the Namenode\n"; @@ -559,6 +564,8 @@ System.out.println(ClearSpaceQuotaCommand.DESCRIPTION); } else if ("refreshServiceAcl".equals(cmd)) { System.out.println(refreshServiceAcl); + } else if ("refreshUserToGroupsMappings".equals(cmd)) { + System.out.println(refreshUserToGroupsMappings); } else if ("printTopology".equals(cmd)) { System.out.println(printTopology); } else if ("help".equals(cmd)) { @@ -746,6 +753,30 @@ } /** + * Refresh the user-to-groups mappings on the {@link NameNode}. + * @return exitcode 0 on success, non-zero on failure + * @throws IOException + */ + public int refreshUserToGroupsMappings() throws IOException { + // Get the current configuration + Configuration conf = getConf(); + + // Create the client + RefreshUserToGroupMappingsProtocol refreshProtocol = + (RefreshUserToGroupMappingsProtocol) + RPC.getProxy(RefreshUserToGroupMappingsProtocol.class, + RefreshUserToGroupMappingsProtocol.versionID, + NameNode.getAddress(conf), getUGI(conf), conf, + NetUtils.getSocketFactory(conf, + RefreshUserToGroupMappingsProtocol.class)); + + // Refresh the user-to-groups mappings + refreshProtocol.refreshUserToGroupsMappings(conf); + + return 0; + } + + /** * Displays format of commands. * @param cmd The command that is being executed. */ @@ -789,6 +820,9 @@ } else if ("-refreshServiceAcl".equals(cmd)) { System.err.println("Usage: java DFSAdmin" + " [-refreshServiceAcl]"); + } else if ("-refreshUserToGroupsMappings".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [-refreshUserToGroupsMappings]"); } else if ("-printTopology".equals(cmd)) { System.err.println("Usage: java DFSAdmin" + " [-printTopology]"); @@ -803,6 +837,7 @@ System.err.println(" [-upgradeProgress status | details | force]"); System.err.println(" [-metasave filename]"); System.err.println(" [-refreshServiceAcl]"); + System.err.println(" [-refreshUserToGroupsMappings]"); System.err.println(" [-printTopology]"); System.err.println(" ["+SetQuotaCommand.USAGE+"]"); System.err.println(" ["+ClearQuotaCommand.USAGE+"]"); @@ -879,11 +914,15 @@ printUsage(cmd); return exitCode; } - else if ("-printTopology".equals(cmd)) { - if(argv.length != 1) { - printUsage(cmd); - return exitCode; - } + } else if ("-refreshUserToGroupsMappings".equals(cmd)) { + if (argv.length != 1) { + printUsage(cmd); + return exitCode; + } + } else if ("-printTopology".equals(cmd)) { + if(argv.length != 1) { + printUsage(cmd); + return exitCode; } } @@ -927,6 +966,8 @@ exitCode = new SetSpaceQuotaCommand(argv, i, fs).runAll(); } else if ("-refreshServiceAcl".equals(cmd)) { exitCode = refreshServiceAcl(); + } else if ("-refreshUserToGroupsMappings".equals(cmd)) { + exitCode = refreshUserToGroupsMappings(); } else if ("-printTopology".equals(cmd)) { exitCode = printTopology(); } else if ("-help".equals(cmd)) { Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=892323&r1=892322&r2=892323&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java Fri Dec 18 17:48:03 2009 @@ -25,11 +25,15 @@ import java.io.IOException; import java.net.URL; import java.net.URLConnection; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Random; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -38,8 +42,8 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.BlockAccessToken; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.security.ShellBasedUnixGroupsMapping; import org.apache.hadoop.security.UnixUserGroupInformation; import org.apache.hadoop.security.UserGroupInformation; @@ -294,4 +298,84 @@ new UnixUserGroupInformation(username, groups)); return c; } + + + /** + * modify conf to contain fake users with fake group + * @param conf to modify + * @throws IOException + */ + static public void updateConfigurationWithFakeUsername(Configuration conf) { + // fake users + String username="fakeUser1"; + String[] groups = {"fakeGroup1"}; + // mapping to groups + Map u2g_map = new HashMap(1); + u2g_map.put(username, groups); + updateConfWithFakeGroupMapping(conf, u2g_map); + + UnixUserGroupInformation.saveToConf(conf, + UnixUserGroupInformation.UGI_PROPERTY_NAME, + new UnixUserGroupInformation(username, groups)); + } + + /** + * mock class to get group mapping for fake users + * + */ + static class MockUnixGroupsMapping extends ShellBasedUnixGroupsMapping { + static Map fakeUser2GroupsMap; + private static final List defaultGroups; + static { + defaultGroups = new ArrayList(1); + defaultGroups.add("supergroup"); + fakeUser2GroupsMap = new HashMap(); + } + + @Override + public List getGroups(String user) throws IOException { + boolean found = false; + + // check to see if this is one of fake users + List l = new ArrayList(); + for(String u : fakeUser2GroupsMap.keySet()) { + if(user.equals(u)) { + found = true; + for(String gr : fakeUser2GroupsMap.get(u)) { + l.add(gr); + } + } + } + + // default + if(!found) { + l = super.getGroups(user); + if(l.size() == 0) { + System.out.println("failed to get real group for " + user + + "; using default"); + return defaultGroups; + } + } + return l; + } + } + + /** + * update the configuration with fake class for mapping user to groups + * @param conf + * @param map - user to groups mapping + */ + static public void updateConfWithFakeGroupMapping + (Configuration conf, Map map) { + if(map!=null) { + MockUnixGroupsMapping.fakeUser2GroupsMap = map; + } + + // fake mapping user to groups + conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, + DFSTestUtil.MockUnixGroupsMapping.class, + ShellBasedUnixGroupsMapping.class); + + } + } Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java?rev=892323&r1=892322&r2=892323&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java Fri Dec 18 17:48:03 2009 @@ -19,22 +19,29 @@ import java.io.IOException; import java.util.EnumSet; +import java.util.HashMap; +import java.util.Map; import java.util.Random; import javax.security.auth.login.LoginException; -import org.apache.commons.logging.*; +import junit.framework.AssertionFailedError; +import junit.framework.TestCase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.server.common.Util; -import org.apache.hadoop.fs.*; -import org.apache.hadoop.fs.permission.*; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UnixUserGroupInformation; -import junit.framework.AssertionFailedError; -import junit.framework.TestCase; - /** Unit tests for permission */ public class TestDFSPermission extends TestCase { public static final Log LOG = LogFactory.getLog(TestDFSPermission.class); @@ -81,6 +88,13 @@ // explicitly turn on permission checking conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); + // create fake mapping for the groups + Map u2g_map = new HashMap (3); + u2g_map.put(USER1_NAME, new String[] {GROUP1_NAME, GROUP2_NAME }); + u2g_map.put(USER2_NAME, new String[] {GROUP2_NAME, GROUP3_NAME }); + u2g_map.put(USER3_NAME, new String[] {GROUP3_NAME, GROUP4_NAME }); + DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map); + // Initiate all four users SUPERUSER = UnixUserGroupInformation.login(conf); USER1 = new UnixUserGroupInformation(USER1_NAME, new String[] { Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=892323&r1=892322&r2=892323&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Fri Dec 18 17:48:03 2009 @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; @@ -29,7 +31,6 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.security.UnixUserGroupInformation; import org.apache.hadoop.security.UserGroupInformation; import org.apache.log4j.Level; @@ -45,6 +46,9 @@ static final int FILE_SIZE = 1024*16; static final short REPLICATION_NUM = (short)3; static byte[] buffer = new byte[FILE_SIZE]; + + static private String fakeUsername = "fakeUser1"; + static private String fakeGroup = "supergroup"; public void testBlockSynchronization() throws Exception { final long softLease = 1000; @@ -56,6 +60,13 @@ conf.setInt("dfs.heartbeat.interval", 1); // conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 16); + // create fake mapping user to group and set it to the conf + // NOTE. this must be done at the beginning, before first call to mapping + // functions + Map u2g_map = new HashMap(1); + u2g_map.put(fakeUsername, new String[] {fakeGroup}); + DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map); + MiniDFSCluster cluster = null; DistributedFileSystem dfs = null; byte[] actual = new byte[FILE_SIZE]; @@ -93,10 +104,9 @@ // should fail but will trigger lease recovery. { Configuration conf2 = new HdfsConfiguration(conf); - String username = UserGroupInformation.getCurrentUGI().getUserName()+"_1"; UnixUserGroupInformation.saveToConf(conf2, UnixUserGroupInformation.UGI_PROPERTY_NAME, - new UnixUserGroupInformation(username, new String[]{"supergroup"})); + new UnixUserGroupInformation(fakeUsername, new String[]{fakeGroup})); FileSystem dfs2 = FileSystem.get(conf2); boolean done = false; Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestGroupMappingServiceRefresh.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestGroupMappingServiceRefresh.java?rev=892323&view=auto ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestGroupMappingServiceRefresh.java (added) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestGroupMappingServiceRefresh.java Fri Dec 18 17:48:03 2009 @@ -0,0 +1,120 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.security; + + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.tools.DFSAdmin; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.apache.hadoop.security.GroupMappingServiceProvider; + +public class TestGroupMappingServiceRefresh { + private MiniDFSCluster cluster; + Configuration config; + private static long groupRefreshTimeoutSec = 1; + + public static class MockUnixGroupsMapping implements GroupMappingServiceProvider { + private int i=0; + + @Override + public List getGroups(String user) throws IOException { + String g1 = user + (10 * i + 1); + String g2 = user + (10 * i + 2); + List l = new ArrayList(2); + l.add(g1); + l.add(g2); + i++; + return l; + } + } + + @Before + public void setUp() throws Exception { + config = new HdfsConfiguration(); + config.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, + TestGroupMappingServiceRefresh.MockUnixGroupsMapping.class, + GroupMappingServiceProvider.class); + config.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, + groupRefreshTimeoutSec); + + FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0"); + cluster = new MiniDFSCluster(0, config, 1, true, true, true, null, null, null, null); + cluster.waitActive(); + } + + @After + public void tearDown() throws Exception { + if(cluster!=null) { + cluster.shutdown(); + } + } + + @Test + public void testGroupMappingRefresh() throws Exception { + DFSAdmin admin = new DFSAdmin(config); + String [] args = new String[]{"-refreshUserToGroupsMappings"}; + Groups groups = SecurityUtil.getUserToGroupsMappingService(config); + String user = UnixUserGroupInformation.getUnixUserName(); + System.out.println("first attempt:"); + List g1 = groups.getGroups(user); + String [] str_groups = new String [g1.size()]; + g1.toArray(str_groups); + System.out.println(Arrays.toString(str_groups)); + + System.out.println("second attempt, should be same:"); + List g2 = groups.getGroups(user); + g2.toArray(str_groups); + System.out.println(Arrays.toString(str_groups)); + for(int i=0; i g3 = groups.getGroups(user); + g3.toArray(str_groups); + System.out.println(Arrays.toString(str_groups)); + for(int i=0; i g4 = groups.getGroups(user); + g4.toArray(str_groups); + System.out.println(Arrays.toString(str_groups)); + for(int i=0; i