hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject svn commit: r903562 [2/2] - in /hadoop/hdfs/trunk: ./ src/contrib/hdfsproxy/ src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/ha...
Date Wed, 27 Jan 2010 08:21:01 GMT
Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/permission/TestStickyBit.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/permission/TestStickyBit.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/permission/TestStickyBit.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/permission/TestStickyBit.java Wed Jan 27 08:20:58 2010
@@ -19,8 +19,6 @@
 
 import java.io.IOException;
 
-import javax.security.auth.login.LoginException;
-
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
@@ -28,25 +26,26 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 public class TestStickyBit extends TestCase {
 
-  static UnixUserGroupInformation user1 = new UnixUserGroupInformation(
-      "theDoctor", new String[] { "tardis" });
-  static UnixUserGroupInformation user2 = new UnixUserGroupInformation("rose",
-      new String[] { "powellestates" });
-
+  static UserGroupInformation user1 = 
+    UserGroupInformation.createUserForTesting("theDoctor", new String[] {"tardis"});
+  static UserGroupInformation user2 = 
+    UserGroupInformation.createUserForTesting("rose", new String[] {"powellestates"});
+  
   /**
    * Ensure that even if a file is in a directory with the sticky bit on,
    * another user can write to that file (assuming correct permissions).
    */
   private void confirmCanAppend(Configuration conf, FileSystem hdfs,
-      Path baseDir) throws IOException {
+      Path baseDir) throws IOException, InterruptedException {
     // Create a tmp directory with wide-open permissions and sticky bit
     Path p = new Path(baseDir, "tmp");
 
@@ -54,13 +53,13 @@
     hdfs.setPermission(p, new FsPermission((short) 01777));
 
     // Write a file to the new tmp directory as a regular user
-    hdfs = logonAs(user1, conf, hdfs);
+    hdfs = DFSTestUtil.getFileSystemAs(user1, conf);
     Path file = new Path(p, "foo");
     writeFile(hdfs, file);
     hdfs.setPermission(file, new FsPermission((short) 0777));
 
     // Log onto cluster as another user and attempt to append to file
-    hdfs = logonAs(user2, conf, hdfs);
+    hdfs = DFSTestUtil.getFileSystemAs(user2, conf);
     Path file2 = new Path(p, "foo");
     FSDataOutputStream h = hdfs.append(file2);
     h.write("Some more data".getBytes());
@@ -72,13 +71,13 @@
    * set.
    */
   private void confirmDeletingFiles(Configuration conf, FileSystem hdfs,
-      Path baseDir) throws IOException {
+      Path baseDir) throws IOException, InterruptedException {
     Path p = new Path(baseDir, "contemporary");
     hdfs.mkdirs(p);
     hdfs.setPermission(p, new FsPermission((short) 01777));
 
     // Write a file to the new temp directory as a regular user
-    hdfs = logonAs(user1, conf, hdfs);
+    hdfs = DFSTestUtil.getFileSystemAs(user1, conf);
     Path file = new Path(p, "foo");
     writeFile(hdfs, file);
 
@@ -86,7 +85,7 @@
     assertEquals(user1.getUserName(), hdfs.getFileStatus(file).getOwner());
 
     // Log onto cluster as another user and attempt to delete the file
-    FileSystem hdfs2 = logonAs(user2, conf, hdfs);
+    FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user2, conf);
 
     try {
       hdfs2.delete(file, false);
@@ -159,7 +158,7 @@
     assertFalse(hdfs.getFileStatus(f).getPermission().getStickyBit());
   }
 
-  public void testGeneralSBBehavior() throws IOException {
+  public void testGeneralSBBehavior() throws IOException, InterruptedException {
     MiniDFSCluster cluster = null;
     try {
       Configuration conf = new HdfsConfiguration();
@@ -197,7 +196,7 @@
    * Test that one user can't rename/move another user's file when the sticky
    * bit is set.
    */
-  public void testMovingFiles() throws IOException, LoginException {
+  public void testMovingFiles() throws IOException, InterruptedException {
     MiniDFSCluster cluster = null;
 
     try {
@@ -220,12 +219,12 @@
       // Write a file to the new tmp directory as a regular user
       Path file = new Path(tmpPath, "foo");
 
-      FileSystem hdfs2 = logonAs(user1, conf, hdfs);
+      FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user1, conf);
 
       writeFile(hdfs2, file);
 
       // Log onto cluster as another user and attempt to move the file
-      FileSystem hdfs3 = logonAs(user2, conf, hdfs);
+      FileSystem hdfs3 = DFSTestUtil.getFileSystemAs(user2, conf);
 
       try {
         hdfs3.rename(file, new Path(tmpPath2, "renamed"));
@@ -290,19 +289,6 @@
   }
 
   /***
-   * Create a new configuration for the specified user and return a filesystem
-   * accessed by that user
-   */
-  static private FileSystem logonAs(UnixUserGroupInformation user,
-      Configuration conf, FileSystem hdfs) throws IOException {
-    Configuration conf2 = new HdfsConfiguration(conf);
-    UnixUserGroupInformation.saveToConf(conf2,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME, user);
-
-    return FileSystem.get(conf2);
-  }
-
-  /***
    * Write a quick file to the specified file system at specified path
    */
   static private void writeFile(FileSystem hdfs, Path p) throws IOException {

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java Wed Jan 27 08:20:58 2010
@@ -29,9 +29,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 
 /** Utilities for append-related tests */ 
 public class AppendTestUtil {
@@ -92,15 +90,15 @@
    * @param conf current Configuration
    * @return FileSystem instance
    * @throws IOException
+   * @throws InterruptedException 
    */
-  public static FileSystem createHdfsWithDifferentUsername(Configuration conf
-      ) throws IOException {
-    Configuration conf2 = new HdfsConfiguration(conf);
-    String username = UserGroupInformation.getCurrentUGI().getUserName()+"_XXX";
-    UnixUserGroupInformation.saveToConf(conf2,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME,
-        new UnixUserGroupInformation(username, new String[]{"supergroup"}));
-    return FileSystem.get(conf2);
+  public static FileSystem createHdfsWithDifferentUsername(final Configuration conf
+      ) throws IOException, InterruptedException {
+    String username = UserGroupInformation.getCurrentUser().getUserName()+"_XXX";
+    UserGroupInformation ugi = 
+      UserGroupInformation.createUserForTesting(username, new String[]{"supergroup"});
+    
+    return DFSTestUtil.getFileSystemAs(ugi, conf);
   }
 
   static void write(OutputStream out, int offset, int length) throws IOException {

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java Wed Jan 27 08:20:58 2010
@@ -25,6 +25,7 @@
 import java.io.IOException;
 import java.net.URL;
 import java.net.URLConnection;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -44,7 +45,6 @@
 import org.apache.hadoop.hdfs.security.BlockAccessToken;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 /** Utilities for HDFS tests */
@@ -286,38 +286,6 @@
     IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
     return out.toString();
   }
-
-  static public Configuration getConfigurationWithDifferentUsername(Configuration conf
-      ) throws IOException {
-    final Configuration c = new HdfsConfiguration(conf);
-    final UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
-    final String username = ugi.getUserName()+"_XXX";
-    final String[] groups = {ugi.getGroupNames()[0] + "_XXX"};
-    UnixUserGroupInformation.saveToConf(c,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME,
-        new UnixUserGroupInformation(username, groups));
-    return c;
-  }
-  
-  
-  /**
-   * modify conf to contain fake users with fake group
-   * @param conf to modify
-   * @throws IOException
-   */
-  static public void updateConfigurationWithFakeUsername(Configuration conf) {
-    // fake users
-    String username="fakeUser1";
-    String[] groups = {"fakeGroup1"};
-    // mapping to groups
-    Map<String, String[]> u2g_map = new HashMap<String, String[]>(1);
-    u2g_map.put(username, groups);
-    updateConfWithFakeGroupMapping(conf, u2g_map);
-    
-    UnixUserGroupInformation.saveToConf(conf,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME,
-        new UnixUserGroupInformation(username, groups));
-  }
   
   /**
    * mock class to get group mapping for fake users
@@ -378,4 +346,17 @@
     
   }
   
+  /**
+   * Get a FileSystem instance as specified user in a doAs block.
+   */
+  static public FileSystem getFileSystemAs(UserGroupInformation ugi, 
+                                   final Configuration conf) throws IOException, 
+                                                        InterruptedException {
+    return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws Exception {
+        return FileSystem.get(conf);
+      }
+    });
+  }
 }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Wed Jan 27 08:20:58 2010
@@ -28,8 +28,6 @@
 import java.util.Collection;
 import java.util.Random;
 
-import javax.security.auth.login.LoginException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -46,12 +44,9 @@
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.StaticMapping;
-import org.apache.hadoop.security.UnixUserGroupInformation;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -245,13 +240,6 @@
                         String[] racks, String hosts[],
                         long[] simulatedCapacities) throws IOException {
     this.conf = conf;
-    try {
-      UserGroupInformation.setCurrentUser(UnixUserGroupInformation.login(conf));
-    } catch (LoginException e) {
-      IOException ioe = new IOException();
-      ioe.initCause(e);
-      throw ioe;
-    }
     base_dir = new File(getBaseDirectory());
     data_dir = new File(base_dir, "data");
     

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java Wed Jan 27 08:20:58 2010
@@ -40,7 +40,7 @@
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 /** Unit tests for permission */
 public class TestDFSPermission extends TestCase {
@@ -55,11 +55,11 @@
   final private static String USER2_NAME = "user2";
   final private static String USER3_NAME = "user3";
 
-  private static UnixUserGroupInformation SUPERUSER;
-  private static UnixUserGroupInformation USER1;
-  private static UnixUserGroupInformation USER2;
-  private static UnixUserGroupInformation USER3;
-  
+  private static UserGroupInformation SUPERUSER;
+  private static UserGroupInformation USER1;
+  private static UserGroupInformation USER2;
+  private static UserGroupInformation USER3;
+
   final private static short MAX_PERMISSION = 511;
   final private static short DEFAULT_UMASK = 022;
   final private static short FILE_MASK = 0666;
@@ -96,14 +96,14 @@
       DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
       
       // Initiate all four users
-      SUPERUSER = UnixUserGroupInformation.login(conf);
-      USER1 = new UnixUserGroupInformation(USER1_NAME, new String[] {
-          GROUP1_NAME, GROUP2_NAME });
-      USER2 = new UnixUserGroupInformation(USER2_NAME, new String[] {
-          GROUP2_NAME, GROUP3_NAME });
-      USER3 = new UnixUserGroupInformation(USER3_NAME, new String[] {
-          GROUP3_NAME, GROUP4_NAME });
-    } catch (LoginException e) {
+      SUPERUSER = UserGroupInformation.getCurrentUser();
+      USER1 = UserGroupInformation.createUserForTesting(USER1_NAME,
+          new String[] { GROUP1_NAME, GROUP2_NAME });
+      USER2 = UserGroupInformation.createUserForTesting(USER2_NAME,
+          new String[] { GROUP2_NAME, GROUP3_NAME });
+      USER3 = UserGroupInformation.createUserForTesting(USER3_NAME,
+          new String[] { GROUP3_NAME, GROUP4_NAME });
+    } catch (IOException e) {
       throw new RuntimeException(e);
     }
   }
@@ -390,7 +390,7 @@
    * for the given user for operations mkdir, open, setReplication, 
    * getFileInfo, isDirectory, exists, getContentLength, list, rename,
    * and delete */
-  private void testPermissionCheckingPerUser(UnixUserGroupInformation ugi,
+  private void testPermissionCheckingPerUser(UserGroupInformation ugi,
       short[] ancestorPermission, short[] parentPermission,
       short[] filePermission, Path[] parentDirs, Path[] files, Path[] dirs)
       throws Exception {
@@ -477,7 +477,7 @@
     final static protected short opAncestorPermission = SEARCH_MASK;
     protected short opParentPermission;
     protected short opPermission;
-    protected UnixUserGroupInformation ugi;
+    protected UserGroupInformation ugi;
 
     /* initialize */
     protected void set(Path path, short ancestorPermission,
@@ -491,7 +491,7 @@
     }
 
     /* Perform an operation and verify if the permission checking is correct */
-    void verifyPermission(UnixUserGroupInformation ugi) throws LoginException,
+    void verifyPermission(UserGroupInformation ugi) throws LoginException,
         IOException {
       if (this.ugi != ugi) {
         setRequiredPermissions(ugi);
@@ -535,7 +535,7 @@
     }
 
     /* Set the permissions required to pass the permission checking */
-    protected void setRequiredPermissions(UnixUserGroupInformation ugi)
+    protected void setRequiredPermissions(UserGroupInformation ugi)
         throws IOException {
       if (SUPERUSER.equals(ugi)) {
         requiredAncestorPermission = SUPER_MASK;
@@ -612,7 +612,7 @@
   private CreatePermissionVerifier createVerifier =
     new CreatePermissionVerifier();
   /* test if the permission checking of create/mkdir is correct */
-  private void testCreateMkdirs(UnixUserGroupInformation ugi, Path path,
+  private void testCreateMkdirs(UserGroupInformation ugi, Path path,
       short ancestorPermission, short parentPermission) throws Exception {
     createVerifier.set(path, OpType.MKDIRS, ancestorPermission,
         parentPermission);
@@ -641,7 +641,7 @@
 
   private OpenPermissionVerifier openVerifier = new OpenPermissionVerifier();
   /* test if the permission checking of open is correct */
-  private void testOpen(UnixUserGroupInformation ugi, Path path,
+  private void testOpen(UserGroupInformation ugi, Path path,
       short ancestorPermission, short parentPermission, short filePermission)
       throws Exception {
     openVerifier
@@ -667,7 +667,7 @@
   private SetReplicationPermissionVerifier replicatorVerifier =
     new SetReplicationPermissionVerifier();
   /* test if the permission checking of setReplication is correct */
-  private void testSetReplication(UnixUserGroupInformation ugi, Path path,
+  private void testSetReplication(UserGroupInformation ugi, Path path,
       short ancestorPermission, short parentPermission, short filePermission)
       throws Exception {
     replicatorVerifier.set(path, ancestorPermission, parentPermission,
@@ -695,7 +695,7 @@
   private SetTimesPermissionVerifier timesVerifier =
     new SetTimesPermissionVerifier();
   /* test if the permission checking of setReplication is correct */
-  private void testSetTimes(UnixUserGroupInformation ugi, Path path,
+  private void testSetTimes(UserGroupInformation ugi, Path path,
       short ancestorPermission, short parentPermission, short filePermission)
       throws Exception {
     timesVerifier.set(path, ancestorPermission, parentPermission,
@@ -750,7 +750,7 @@
   private StatsPermissionVerifier statsVerifier = new StatsPermissionVerifier();
   /* test if the permission checking of isDirectory, exist,
    * getFileInfo, getContentSummary is correct */
-  private void testStats(UnixUserGroupInformation ugi, Path path,
+  private void testStats(UserGroupInformation ugi, Path path,
       short ancestorPermission, short parentPermission) throws Exception {
     statsVerifier.set(path, OpType.GET_FILEINFO, ancestorPermission,
         parentPermission);
@@ -809,7 +809,7 @@
 
   ListPermissionVerifier listVerifier = new ListPermissionVerifier();
   /* test if the permission checking of list is correct */
-  private void testList(UnixUserGroupInformation ugi, Path file, Path dir,
+  private void testList(UserGroupInformation ugi, Path file, Path dir,
       short ancestorPermission, short parentPermission, short filePermission)
       throws Exception {
     listVerifier.set(file, InodeType.FILE, ancestorPermission,
@@ -864,7 +864,7 @@
 
   RenamePermissionVerifier renameVerifier = new RenamePermissionVerifier();
   /* test if the permission checking of rename is correct */
-  private void testRename(UnixUserGroupInformation ugi, Path src, Path dst,
+  private void testRename(UserGroupInformation ugi, Path src, Path dst,
       short srcAncestorPermission, short srcParentPermission,
       short dstAncestorPermission, short dstParentPermission) throws Exception {
     renameVerifier.set(src, srcAncestorPermission, srcParentPermission, dst,
@@ -928,7 +928,7 @@
     new DeletePermissionVerifier();
 
   /* test if the permission checking of file deletion is correct */
-  private void testDeleteFile(UnixUserGroupInformation ugi, Path file,
+  private void testDeleteFile(UserGroupInformation ugi, Path file,
       short ancestorPermission, short parentPermission) throws Exception {
     fileDeletionVerifier.set(file, ancestorPermission, parentPermission);
     fileDeletionVerifier.verifyPermission(ugi);
@@ -938,7 +938,7 @@
     new DeleteDirPermissionVerifier();
 
   /* test if the permission checking of directory deletion is correct */
-  private void testDeleteDir(UnixUserGroupInformation ugi, Path path,
+  private void testDeleteDir(UserGroupInformation ugi, Path path,
       short ancestorPermission, short parentPermission, short permission,
       short[] childPermissions) throws Exception {
     dirDeletionVerifier.set(path, ancestorPermission, parentPermission,
@@ -948,13 +948,13 @@
   }
 
   /* log into dfs as the given user */
-  private void login(UnixUserGroupInformation ugi) throws IOException {
+  private void login(UserGroupInformation ugi) throws IOException,
+      InterruptedException {
     if (fs != null) {
       fs.close();
     }
-    UnixUserGroupInformation.saveToConf(conf,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
-    fs = FileSystem.get(conf); // login as ugi
+
+    fs = DFSTestUtil.getFileSystemAs(ugi, conf);
   }
 
   /* test non-existent file */

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java Wed Jan 27 08:20:58 2010
@@ -25,6 +25,7 @@
 import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.security.Permission;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -47,7 +48,6 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
@@ -1121,33 +1121,38 @@
   }
 
   public void testRemoteException() throws Exception {
-    UnixUserGroupInformation tmpUGI = new UnixUserGroupInformation("tmpname",
-        new String[] {
-        "mygroup"});
+    UserGroupInformation tmpUGI = 
+      UserGroupInformation.createUserForTesting("tmpname", new String[] {"mygroup"});
     MiniDFSCluster dfs = null;
     PrintStream bak = null;
     try {
-      Configuration conf = new HdfsConfiguration();
+      final Configuration conf = new HdfsConfiguration();
       dfs = new MiniDFSCluster(conf, 2, true, null);
       FileSystem fs = dfs.getFileSystem();
       Path p = new Path("/foo");
       fs.mkdirs(p);
       fs.setPermission(p, new FsPermission((short)0700));
-      UnixUserGroupInformation.saveToConf(conf,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, tmpUGI);
-      FsShell fshell = new FsShell(conf);
       bak = System.err;
-      ByteArrayOutputStream out = new ByteArrayOutputStream();
-      PrintStream tmp = new PrintStream(out);
-      System.setErr(tmp);
-      String[] args = new String[2];
-      args[0] = "-ls";
-      args[1] = "/foo";
-      int ret = ToolRunner.run(fshell, args);
-      assertTrue("returned should be -1", (ret == -1));
-      String str = out.toString();
-      assertTrue("permission denied printed", str.indexOf("Permission denied") != -1);
-      out.reset();
+      
+      tmpUGI.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          FsShell fshell = new FsShell(conf);
+          ByteArrayOutputStream out = new ByteArrayOutputStream();
+          PrintStream tmp = new PrintStream(out);
+          System.setErr(tmp);
+          String[] args = new String[2];
+          args[0] = "-ls";
+          args[1] = "/foo";
+          int ret = ToolRunner.run(fshell, args);
+          assertEquals("returned should be -1", -1, ret);
+          String str = out.toString();
+          assertTrue("permission denied printed", 
+                     str.indexOf("Permission denied") != -1);
+          out.reset();           
+          return null;
+        }
+      });
     } finally {
       if (bak != null) {
         System.setErr(bak);
@@ -1218,7 +1223,7 @@
   }
 
   public void testLsr() throws Exception {
-    Configuration conf = new HdfsConfiguration();
+    final Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
 
@@ -1231,13 +1236,16 @@
       final Path sub = new Path(root, "sub");
       dfs.setPermission(sub, new FsPermission((short)0));
 
-      final UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
+      final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
       final String tmpusername = ugi.getUserName() + "1";
-      UnixUserGroupInformation tmpUGI = new UnixUserGroupInformation(
+      UserGroupInformation tmpUGI = UserGroupInformation.createUserForTesting(
           tmpusername, new String[] {tmpusername});
-      UnixUserGroupInformation.saveToConf(conf,
-            UnixUserGroupInformation.UGI_PROPERTY_NAME, tmpUGI);
-      String results = runLsr(new FsShell(conf), root, -1);
+      String results = tmpUGI.doAs(new PrivilegedExceptionAction<String>() {
+        @Override
+        public String run() throws Exception {
+          return runLsr(new FsShell(conf), root, -1);
+        }
+      });
       assertTrue(results.contains("zzz"));
     } finally {
       cluster.shutdown();

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java Wed Jan 27 08:20:58 2010
@@ -35,7 +35,6 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -80,7 +79,7 @@
    * @throws IOException an exception might be thrown
    */ 
   public void testSimpleAppend() throws IOException {
-    Configuration conf = new HdfsConfiguration();
+    final Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
@@ -153,16 +152,16 @@
         fs.close();
 
         // login as a different user
-        final UserGroupInformation superuser = UserGroupInformation.getCurrentUGI();
+        final UserGroupInformation superuser = 
+          UserGroupInformation.getCurrentUser();
         String username = "testappenduser";
         String group = "testappendgroup";
         assertFalse(superuser.getUserName().equals(username));
         assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
-        UnixUserGroupInformation appenduser = UnixUserGroupInformation.createImmutable(
-            new String[]{username, group});
-        UnixUserGroupInformation.saveToConf(conf,
-            UnixUserGroupInformation.UGI_PROPERTY_NAME, appenduser);
-        fs = FileSystem.get(conf);
+        UserGroupInformation appenduser = 
+          UserGroupInformation.createUserForTesting(username, new String[]{group});
+        
+        fs = DFSTestUtil.getFileSystemAs(appenduser, conf);
 
         // create a file
         Path dir = new Path(root, getClass().getSimpleName());

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java Wed Jan 27 08:20:58 2010
@@ -569,7 +569,7 @@
   /**
    * Test that all open files are closed when client dies abnormally.
    */
-  public void testDFSClientDeath() throws IOException {
+  public void testDFSClientDeath() throws IOException, InterruptedException {
     Configuration conf = new HdfsConfiguration();
     System.out.println("Testing adbornal client death.");
     if (simulatedStorage) {

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java Wed Jan 27 08:20:58 2010
@@ -35,7 +35,7 @@
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 import junit.framework.TestCase;
 /**
@@ -99,7 +99,7 @@
           cluster.getNameNodePort());
       NamenodeProtocol namenode = (NamenodeProtocol) RPC.getProxy(
           NamenodeProtocol.class, NamenodeProtocol.versionID, addr,
-          UnixUserGroupInformation.login(CONF), CONF,
+          UserGroupInformation.getCurrentUser(), CONF,
           NetUtils.getDefaultSocketFactory(CONF));
 
       // get blocks of size fileLen from dataNodes[0]

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java Wed Jan 27 08:20:58 2010
@@ -20,7 +20,7 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
   
@@ -33,7 +33,7 @@
     cluster = new MiniDFSCluster(conf, 2, true, null);
     fs = cluster.getFileSystem();
     defaultWorkingDirectory = "/user/" + 
-           UnixUserGroupInformation.login().getUserName();
+           UserGroupInformation.getCurrentUser().getUserName();
   }
   
   @Override

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Wed Jan 27 08:20:58 2010
@@ -31,7 +31,6 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.log4j.Level;
 
@@ -103,11 +102,11 @@
       // try to re-open the file before closing the previous handle. This
       // should fail but will trigger lease recovery.
       {
-        Configuration conf2 = new HdfsConfiguration(conf);
-        UnixUserGroupInformation.saveToConf(conf2,
-            UnixUserGroupInformation.UGI_PROPERTY_NAME,
-            new UnixUserGroupInformation(fakeUsername, new String[]{fakeGroup}));
-        FileSystem dfs2 = FileSystem.get(conf2);
+        UserGroupInformation ugi = 
+          UserGroupInformation.createUserForTesting(fakeUsername, 
+                                                    new String [] { fakeGroup});
+        
+        FileSystem dfs2 = DFSTestUtil.getFileSystemAs(ugi, conf);
   
         boolean done = false;
         for(int i = 0; i < 10 && !done; i++) {

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestQuota.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestQuota.java Wed Jan 27 08:20:58 2010
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.OutputStream;
+import java.security.PrivilegedExceptionAction;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
@@ -28,7 +29,7 @@
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 
@@ -242,18 +243,31 @@
                  (Long.MAX_VALUE/1024/1024 + 1024) + "m", args[2]);
       
       // 17:  setQuota by a non-administrator
-      UnixUserGroupInformation.saveToConf(conf, 
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, 
-          new UnixUserGroupInformation(new String[]{"userxx\n", "groupyy\n"}));
-      DFSAdmin userAdmin = new DFSAdmin(conf);
-      args[1] = "100";
-      runCommand(userAdmin, args, true);
-      runCommand(userAdmin, true, "-setSpaceQuota", "1g", args[2]);
-      
-      // 18: clrQuota by a non-administrator
-      args = new String[] {"-clrQuota", parent.toString()};
-      runCommand(userAdmin, args, true);
-      runCommand(userAdmin, true, "-clrSpaceQuota",  args[1]);      
+      final String username = "userxx";
+      UserGroupInformation ugi = 
+        UserGroupInformation.createUserForTesting(username, 
+                                                  new String[]{"groupyy"});
+      
+      final String[] args2 = args.clone(); // need final ref for doAs block
+      ugi.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          assertEquals("Not running as new user", username, 
+              UserGroupInformation.getCurrentUser().getUserName());
+          DFSAdmin userAdmin = new DFSAdmin(conf);
+          
+          args2[1] = "100";
+          runCommand(userAdmin, args2, true);
+          runCommand(userAdmin, true, "-setSpaceQuota", "1g", args2[2]);
+          
+          // 18: clrQuota by a non-administrator
+          String[] args3 = new String[] {"-clrQuota", parent.toString()};
+          runCommand(userAdmin, args3, true);
+          runCommand(userAdmin, true, "-clrSpaceQuota",  args3[1]); 
+          
+          return null;
+        }
+      });
     } finally {
       cluster.shutdown();
     }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java Wed Jan 27 08:20:58 2010
@@ -19,6 +19,7 @@
 
 import java.io.IOException;
 import java.io.OutputStream;
+import java.security.PrivilegedExceptionAction;
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
@@ -29,7 +30,6 @@
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.log4j.Level;
 import org.junit.Assert;
@@ -123,16 +123,17 @@
 
   static private int userCount = 0;
   //check the file
-  static void checkFile(Path p, int expectedsize, Configuration conf
-      ) throws IOException {
+  static void checkFile(Path p, int expectedsize, final Configuration conf
+      ) throws IOException, InterruptedException {
     //open the file with another user account
-    final Configuration conf2 = new HdfsConfiguration(conf);
-    final String username = UserGroupInformation.getCurrentUGI().getUserName()
+    final String username = UserGroupInformation.getCurrentUser().getUserName()
         + "_" + ++userCount;
-    UnixUserGroupInformation.saveToConf(conf2,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME,
-        new UnixUserGroupInformation(username, new String[]{"supergroup"}));
-    final FileSystem fs = FileSystem.get(conf2);
+
+    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, 
+                                 new String[] {"supergroup"});
+    
+    final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
+    
     final DFSDataInputStream in = (DFSDataInputStream)fs.open(p);
 
     //Check visible length

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Wed Jan 27 08:20:58 2010
@@ -49,7 +49,6 @@
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Level;
@@ -86,13 +85,8 @@
   static Configuration config;
   static NameNode nameNode;
 
-  private final UserGroupInformation ugi;
-
   NNThroughputBenchmark(Configuration conf) throws IOException, LoginException {
     config = conf;
-    ugi = UnixUserGroupInformation.login(config);
-    UserGroupInformation.setCurrentUser(ugi);
-
     // We do not need many handlers, since each thread simulates a handler
     // by calling name-node methods directly
     config.setInt("dfs.namenode.handler.count", 1);
@@ -341,7 +335,6 @@
     }
 
     public void run() {
-      UserGroupInformation.setCurrentUser(ugi);
       localNumOpsExecuted = 0;
       localCumulativeTime = 0;
       arg1 = statsOp.getExecutionArgument(daemonId);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Wed Jan 27 08:20:58 2010
@@ -25,6 +25,7 @@
 import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
 import java.nio.channels.FileChannel;
+import java.security.PrivilegedExceptionAction;
 import java.util.Random;
 
 import junit.framework.TestCase;
@@ -42,6 +43,7 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.log4j.Level;
 
@@ -131,24 +133,39 @@
 
     MiniDFSCluster cluster = null;
     try {
+      // Create a cluster with the current user, write some files
       cluster = new MiniDFSCluster(conf, 4, true, null);
-
-      final FileSystem fs = cluster.getFileSystem();
+      final MiniDFSCluster c2 = cluster;
       final String dir = "/dfsck";
       final Path dirpath = new Path(dir);
+      final FileSystem fs = c2.getFileSystem();
+
       util.createFiles(fs, dir);
-      util.waitReplication(fs, dir, (short)3);
-      fs.setPermission(dirpath, new FsPermission((short)0700));
+      util.waitReplication(fs, dir, (short) 3);
+      fs.setPermission(dirpath, new FsPermission((short) 0700));
 
-      //run DFSck as another user
-      final Configuration c2 = DFSTestUtil.getConfigurationWithDifferentUsername(conf);
-      System.out.println(runFsck(c2, -1, true, dir));
-
-      //set permission and try DFSck again
-      fs.setPermission(dirpath, new FsPermission((short)0777));
-      final String outStr = runFsck(c2, 0, true, dir);
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+      // run DFSck as another user, should fail with permission issue
+      UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting(
+          "ProbablyNotARealUserName", new String[] { "ShangriLa" });
+      fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          System.out.println(runFsck(conf, -1, true, dir));
+          return null;
+        }
+      });
+      
+      // set permission and try DFSck again as the fake user, should succeed
+      fs.setPermission(dirpath, new FsPermission((short) 0777));
+      fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final String outStr = runFsck(conf, 0, true, dir);
+          System.out.println(outStr);
+          assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+          return null;
+        }
+      });
 
       util.cleanup(fs, dir);
     } finally {

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java Wed Jan 27 08:20:58 2010
@@ -40,7 +40,7 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -102,7 +102,7 @@
    * @throws IOException
    */
   @Test
-  public void testConcat() throws IOException {
+  public void testConcat() throws IOException, InterruptedException {
     final int numFiles = 10;
     long fileLen = blockSize*3;
     FileStatus fStatus;
@@ -142,9 +142,10 @@
     }
     
     // check permissions -try the operation with the "wrong" user
-    final UnixUserGroupInformation user1 = new UnixUserGroupInformation(
+    final UserGroupInformation user1 = UserGroupInformation.createUserForTesting(
         "theDoctor", new String[] { "tardis" });
-    DistributedFileSystem hdfs = (DistributedFileSystem)logonAs(user1, conf, dfs);
+    DistributedFileSystem hdfs = 
+      (DistributedFileSystem)DFSTestUtil.getFileSystemAs(user1, conf);
     try {
       hdfs.concat(trgPath, files);
       fail("Permission exception expected");
@@ -239,19 +240,6 @@
     assertFalse("File content of concatenated file is different", mismatch);
   }
 
-  /***
-   * Create a new configuration for the specified user and return a filesystem
-   * accessed by that user
-   */
-  static private FileSystem logonAs(UnixUserGroupInformation user,
-      Configuration conf, FileSystem hdfs) throws IOException {
-    Configuration conf2 = new Configuration(conf);
-    UnixUserGroupInformation.saveToConf(conf2,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME, user);
-
-    return FileSystem.get(conf2);
-  }
-
   // test case when final block is not of a full length
   @Test
   public void testConcatNotCompleteBlock() throws IOException {

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestGroupMappingServiceRefresh.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestGroupMappingServiceRefresh.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestGroupMappingServiceRefresh.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestGroupMappingServiceRefresh.java Wed Jan 27 08:20:58 2010
@@ -48,6 +48,7 @@
     
     @Override
     public List<String> getGroups(String user) throws IOException {
+      System.err.println("Getting groups in MockUnixGroupsMapping");
       String g1 = user + (10 * i + 1);
       String g2 = user + (10 * i + 2);
       List<String> l = new ArrayList<String>(2);
@@ -67,6 +68,7 @@
     config.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 
         groupRefreshTimeoutSec);
     
+    Groups.getUserToGroupsMappingService(config);
     FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
     cluster = new MiniDFSCluster(0, config, 1, true, true, true,  null, null, null, null);
     cluster.waitActive();
@@ -83,8 +85,8 @@
   public void testGroupMappingRefresh() throws Exception {
     DFSAdmin admin = new DFSAdmin(config);
     String [] args =  new String[]{"-refreshUserToGroupsMappings"};
-    Groups groups = SecurityUtil.getUserToGroupsMappingService(config);
-    String user = UnixUserGroupInformation.getUnixUserName();
+    Groups groups = Groups.getUserToGroupsMappingService(config);
+    String user = UserGroupInformation.getCurrentUser().getUserName();
     System.out.println("first attempt:");
     List<String> g1 = groups.getGroups(user);
     String [] str_groups = new String [g1.size()];
@@ -104,7 +106,8 @@
     g3.toArray(str_groups);
     System.out.println(Arrays.toString(str_groups));
     for(int i=0; i<g3.size(); i++) {
-      assertFalse("Should be different group ", g1.get(i).equals(g3.get(i)));
+      assertFalse("Should be different group: " + g1.get(i) + " and " + g3.get(i), 
+          g1.get(i).equals(g3.get(i)));
     }
     
     // test time out

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestPermission.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestPermission.java?rev=903562&r1=903561&r2=903562&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestPermission.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestPermission.java Wed Jan 27 08:20:58 2010
@@ -23,15 +23,14 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.*;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.log4j.Level;
 
 import junit.framework.TestCase;
 
@@ -39,10 +38,6 @@
 public class TestPermission extends TestCase {
   public static final Log LOG = LogFactory.getLog(TestPermission.class);
 
-  {
-    ((Log4JLogger)UserGroupInformation.LOG).getLogger().setLevel(Level.ALL);
-  }
-
   final private static Path ROOT_PATH = new Path("/data");
   final private static Path CHILD_DIR1 = new Path(ROOT_PATH, "child1");
   final private static Path CHILD_DIR2 = new Path(ROOT_PATH, "child2");
@@ -120,7 +115,7 @@
   }
 
   public void testFilePermision() throws Exception {
-    Configuration conf = new HdfsConfiguration();
+    final Configuration conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
     cluster.waitActive();
@@ -163,11 +158,10 @@
 
       ////////////////////////////////////////////////////////////////
       // test illegal file/dir creation
-      UnixUserGroupInformation userGroupInfo = new UnixUserGroupInformation(
-          USER_NAME, GROUP_NAMES );
-      UnixUserGroupInformation.saveToConf(conf,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, userGroupInfo);
-      FileSystem userfs = FileSystem.get(conf);
+      UserGroupInformation userGroupInfo = 
+        UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES );
+      
+      FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
 
       // make sure mkdir of a existing directory that is not owned by 
       // this user does not throw an exception.



Mime
View raw message