hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject svn commit: r903563 [2/2] - in /hadoop/mapreduce/trunk: ./ src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/ src/contrib/raid/src/java/org/apache/hadoop/raid/ src/contrib/streaming/src/test/org/apache/hadoop/streaming/ src/java/org/apac...
Date Wed, 27 Jan 2010 08:32:22 GMT
Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/MiniMRCluster.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/MiniMRCluster.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/MiniMRCluster.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/MiniMRCluster.java Wed Jan 27 08:32:17 2010
@@ -19,6 +19,7 @@
 
 import java.io.File;
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Date;
@@ -36,7 +37,7 @@
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.StaticMapping;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 /**
  * This class creates a single-process Map-Reduce cluster for junit testing.
@@ -57,7 +58,7 @@
   private List<Thread> taskTrackerThreadList = new ArrayList<Thread>();
     
   private String namenode;
-  private UnixUserGroupInformation ugi = null;
+  private UserGroupInformation ugi = null;
   private JobConf conf;
   private int numTrackerToExclude;
     
@@ -113,9 +114,16 @@
         jc.set(MRConfig.LOCAL_DIR, f.getAbsolutePath());
         jc.setClass("topology.node.switch.mapping.impl", 
             StaticMapping.class, DNSToSwitchMapping.class);
-        String id = 
+        final String id = 
           new SimpleDateFormat("yyyyMMddHHmmssSSS").format(new Date());
-        tracker = JobTracker.startTracker(jc, clock, id);
+        if (ugi == null) {
+          ugi = UserGroupInformation.getLoginUser();
+        }
+        tracker = ugi.doAs(new PrivilegedExceptionAction<JobTracker>() {
+          public JobTracker run() throws InterruptedException, IOException {
+            return JobTracker.startTracker(jc, clock, id);
+          }
+        });
         tracker.offerService();
       } catch (Throwable e) {
         LOG.error("Job tracker crashed", e);
@@ -156,7 +164,7 @@
       this.trackerId = trackerId;
       this.numDir = numDir;
       localDirs = new String[numDir];
-      JobConf conf = null;
+      final JobConf conf;
       if (cfg == null) {
         conf = createJobConf();
       } else {
@@ -189,7 +197,11 @@
       conf.set(MRConfig.LOCAL_DIR, localPath.toString());
       LOG.info(MRConfig.LOCAL_DIR + " is " +  localPath);
       try {
-        tt = createTaskTracker(conf);
+        tt = ugi.doAs(new PrivilegedExceptionAction<TaskTracker>() {
+          public TaskTracker run() throws InterruptedException, IOException {
+            return createTaskTracker(conf);
+          }
+        }); 
         isInitialized = true;
       } catch (Throwable e) {
         isDead = true;
@@ -201,7 +213,8 @@
     /**
      * Creates a default {@link TaskTracker} using the conf passed.
      */
-    TaskTracker createTaskTracker(JobConf conf) throws IOException {
+    TaskTracker createTaskTracker(JobConf conf) 
+    throws IOException, InterruptedException {
       return new TaskTracker(conf);
     }
     
@@ -355,17 +368,12 @@
   
   static JobConf configureJobConf(JobConf conf, String namenode, 
                                   int jobTrackerPort, int jobTrackerInfoPort, 
-                                  UnixUserGroupInformation ugi) {
+                                  UserGroupInformation ugi) {
     JobConf result = new JobConf(conf);
     FileSystem.setDefaultUri(result, namenode);
     result.set(JTConfig.JT_IPC_ADDRESS, "localhost:"+jobTrackerPort);
     result.set(JTConfig.JT_HTTP_ADDRESS, 
                         "127.0.0.1:" + jobTrackerInfoPort);
-    if (ugi != null) {
-      result.set(JTConfig.JT_SYSTEM_DIR, "/mapred/system");
-      UnixUserGroupInformation.saveToConf(result,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
-    }
     // for debugging have all task output sent to the test output
     JobClient.setTaskOutputFilter(result, JobClient.TaskStatusFilter.ALL);
     return result;
@@ -443,7 +451,7 @@
 
   public MiniMRCluster(int jobTrackerPort, int taskTrackerPort,
       int numTaskTrackers, String namenode, 
-      int numDir, String[] racks, String[] hosts, UnixUserGroupInformation ugi
+      int numDir, String[] racks, String[] hosts, UserGroupInformation ugi
       ) throws IOException {
     this(jobTrackerPort, taskTrackerPort, numTaskTrackers, namenode, 
          numDir, racks, hosts, ugi, null);
@@ -451,7 +459,7 @@
 
   public MiniMRCluster(int jobTrackerPort, int taskTrackerPort,
       int numTaskTrackers, String namenode, 
-      int numDir, String[] racks, String[] hosts, UnixUserGroupInformation ugi,
+      int numDir, String[] racks, String[] hosts, UserGroupInformation ugi,
       JobConf conf) throws IOException {
     this(jobTrackerPort, taskTrackerPort, numTaskTrackers, namenode, numDir, 
          racks, hosts, ugi, conf, 0);
@@ -459,7 +467,7 @@
   
   public MiniMRCluster(int jobTrackerPort, int taskTrackerPort,
       int numTaskTrackers, String namenode, 
-      int numDir, String[] racks, String[] hosts, UnixUserGroupInformation ugi,
+      int numDir, String[] racks, String[] hosts, UserGroupInformation ugi,
       JobConf conf, int numTrackerToExclude) throws IOException {
     this(jobTrackerPort, taskTrackerPort, numTaskTrackers, namenode, numDir,
          racks, hosts, ugi, conf, numTrackerToExclude, new Clock());
@@ -467,7 +475,7 @@
 
    public MiniMRCluster(int jobTrackerPort, int taskTrackerPort,
       int numTaskTrackers, String namenode,
-      int numDir, String[] racks, String[] hosts, UnixUserGroupInformation ugi,
+      int numDir, String[] racks, String[] hosts, UserGroupInformation ugi,
       JobConf conf, int numTrackerToExclude, Clock clock) throws IOException {
     if (racks != null && racks.length < numTaskTrackers) {
       LOG.error("Invalid number of racks specified. It should be at least " +
@@ -526,6 +534,10 @@
     this.job = createJobConf(conf);
     waitUntilIdle();
   }
+   
+  public UserGroupInformation getUgi() {
+    return ugi;
+  }
     
   /**
    * Get the task completion events

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/QueueManagerTestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/QueueManagerTestUtils.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/QueueManagerTestUtils.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/QueueManagerTestUtils.java Wed Jan 27 08:32:17 2010
@@ -22,12 +22,13 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.mapreduce.Cluster;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.QueueState;
 import org.apache.hadoop.mapreduce.SleepJob;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 import static org.apache.hadoop.mapred.Queue.*;
@@ -46,6 +47,8 @@
 import javax.xml.transform.OutputKeys;
 import javax.xml.transform.stream.StreamResult;
 import javax.xml.transform.dom.DOMSource;
+
+import java.security.PrivilegedExceptionAction;
 import java.util.Properties;
 import java.util.Set;
 import java.io.File;
@@ -283,22 +286,32 @@
     }
   }
 
-  static Job submitSleepJob(int numMappers, int numReducers, long mapSleepTime,
-      long reduceSleepTime, boolean shouldComplete, String userInfo,
+  static Job submitSleepJob(final int numMappers, final int numReducers, final long mapSleepTime,
+      final long reduceSleepTime, boolean shouldComplete, String userInfo,
       String queueName, Configuration clientConf) throws IOException,
       InterruptedException, ClassNotFoundException {
     clientConf.set(JTConfig.JT_IPC_ADDRESS, "localhost:"
         + miniMRCluster.getJobTrackerPort());
+    UserGroupInformation ugi;
     if (userInfo != null) {
-      clientConf.set(UnixUserGroupInformation.UGI_PROPERTY_NAME, userInfo);
+      String[] splits = userInfo.split(",");
+      String[] groups = new String[splits.length - 1];
+      System.arraycopy(splits, 1, groups, 0, splits.length - 1);
+      ugi = UserGroupInformation.createUserForTesting(splits[0], groups);
+    } else {
+      ugi = UserGroupInformation.getCurrentUser();
     }
     if (queueName != null) {
       clientConf.set(JobContext.QUEUE_NAME, queueName);
     }
-    SleepJob sleep = new SleepJob();
+    final SleepJob sleep = new SleepJob();
     sleep.setConf(clientConf);
-    Job job = sleep.createJob(numMappers, numReducers, mapSleepTime,
-        (int) mapSleepTime, reduceSleepTime, (int) reduceSleepTime);
+    
+    Job job = ugi.doAs(new PrivilegedExceptionAction<Job>() {
+        public Job run() throws IOException {
+          return sleep.createJob(numMappers, numReducers, mapSleepTime,
+              (int) mapSleepTime, reduceSleepTime, (int) reduceSleepTime);
+      }});
     if (shouldComplete) {
       job.waitForCompletion(false);
     } else {

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScript.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScript.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScript.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScript.java Wed Jan 27 08:32:17 2010
@@ -23,10 +23,13 @@
 import java.io.IOException;
 import java.io.PrintWriter;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.Groups;
+import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
+import org.apache.hadoop.security.UserGroupInformation;
 import static org.junit.Assert.*;
 
 import org.junit.After;
@@ -158,7 +161,8 @@
     assertTrue(out.contains("failing map"));
     if (expectedPerms != null && expectedUser != null) {
       //check whether the debugout file ownership/permissions are as expected
-      String ttGroup = UnixUserGroupInformation.login().getGroupNames()[0];
+      Groups groups = new Groups(new Configuration());
+      String ttGroup = groups.getGroups(expectedUser).get(0);
       TestTaskTrackerLocalization.checkFilePermissions(output.getAbsolutePath(),
           expectedPerms, expectedUser, ttGroup);
     }

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScriptWithLinuxTaskController.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScriptWithLinuxTaskController.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScriptWithLinuxTaskController.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScriptWithLinuxTaskController.java Wed Jan 27 08:32:17 2010
@@ -18,9 +18,13 @@
 
 package org.apache.hadoop.mapred;
 
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.Test;
 
 public class TestDebugScriptWithLinuxTaskController extends
@@ -33,21 +37,28 @@
     }
     super.startCluster();
     TestDebugScript.setupDebugScriptDirs();
-    Path inDir = new Path("input");
-    Path outDir = new Path("output");
+    final Path inDir = new Path("input");
+    final Path outDir = new Path("output");
     JobConf conf = super.getClusterConf();
     FileSystem fs = inDir.getFileSystem(conf);
     fs.mkdirs(inDir);
     Path p = new Path(inDir, "1.txt");
     fs.createNewFile(p);
-    JobID jobId = TestDebugScript.runFailingMapJob(super.getClusterConf(), 
-        inDir, outDir);
-    String ugi = System
-        .getProperty(ClusterWithLinuxTaskController.TASKCONTROLLER_UGI);
+    String splits[] = System
+          .getProperty(ClusterWithLinuxTaskController.TASKCONTROLLER_UGI).
+          split(",");
+    JobID jobId = UserGroupInformation.createUserForTesting(splits[0], 
+        new String[]{splits[1]}).doAs(new PrivilegedExceptionAction<JobID>() {
+          public JobID run() throws IOException{
+          return TestDebugScript.runFailingMapJob(
+              TestDebugScriptWithLinuxTaskController.this.getClusterConf(), 
+              inDir, outDir);
+          }
+        });
     // construct the task id of first map task of failmap
     TaskAttemptID taskId = new TaskAttemptID(
         new TaskID(jobId,TaskType.MAP, 0), 0);
-    TestDebugScript.verifyDebugScriptOutput(taskId, ugi.split(",")[0],
+    TestDebugScript.verifyDebugScriptOutput(taskId, splits[0],
         "-rw-rw----");
     TestDebugScript.cleanupDebugScriptDirs();
   }

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestIsolationRunner.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestIsolationRunner.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestIsolationRunner.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestIsolationRunner.java Wed Jan 27 08:32:17 2010
@@ -113,7 +113,7 @@
     String taskid =
         new TaskAttemptID(new TaskID(jobId, taskType, 0), 0).toString();
     return new LocalDirAllocator(MRConfig.LOCAL_DIR).getLocalPathToRead(
-        TaskTracker.getTaskConfFile(UserGroupInformation.login(conf)
+        TaskTracker.getTaskConfFile(UserGroupInformation.getCurrentUser()
             .getUserName(), jobId.toString(), taskid, false), conf);
   }
 

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java Wed Jan 27 08:32:17 2010
@@ -70,7 +70,7 @@
     assertEquals(0, ToolRunner.run(myConf, new SleepJob(), args));
   }
   
-  public void testEnvironment() throws IOException {
+  public void testEnvironment() throws Exception {
     if (!shouldRun()) {
       return;
     }

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobHistory.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobHistory.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobHistory.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobHistory.java Wed Jan 27 08:32:17 2010
@@ -54,6 +54,7 @@
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
+import org.apache.hadoop.security.UserGroupInformation;
 
 /**
  *
@@ -352,10 +353,10 @@
                "match the expected value", 
                conf.getJobName().equals(
                jobInfo.getJobname()));
-
+    String user = UserGroupInformation.getCurrentUser().getUserName();
     assertTrue("User Name of job obtained from history file did not " +
                "match the expected value", 
-               conf.getUser().equals(
+               user.equals(
                jobInfo.getUsername()));
 
     // Validate job counters
@@ -807,9 +808,10 @@
       JobConf conf, JobID id, 
       Path doneDir) throws IOException {
     String name = null;
+    String user = UserGroupInformation.getCurrentUser().getUserName();
     for (int i = 0; name == null && i < 20; i++) {
       Path path = JobHistory.getJobHistoryFile(
-          jobHistory.getCompletedJobHistoryLocation(), id, conf.getUser());
+          jobHistory.getCompletedJobHistoryLocation(), id, user);
       if (path.getFileSystem(conf).exists(path)) {
         name = path.toString();
       }

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java Wed Jan 27 08:32:17 2010
@@ -28,7 +28,7 @@
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.mapreduce.QueueState;
 import org.apache.hadoop.mapreduce.SleepJob;
 import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker;

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobRetire.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobRetire.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobRetire.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobRetire.java Wed Jan 27 08:32:17 2010
@@ -135,7 +135,7 @@
    */
   class WaitingTaskTracker extends TaskTracker {
     
-    WaitingTaskTracker(JobConf conf) throws IOException {
+    WaitingTaskTracker(JobConf conf) throws IOException, InterruptedException {
       super(conf);
     }
     
@@ -198,7 +198,8 @@
       TaskTrackerRunner testTrackerRunner = 
         mr.new TaskTrackerRunner(1, 1, null, mr.createJobConf()) {
         @Override
-        TaskTracker createTaskTracker(JobConf conf) throws IOException {
+        TaskTracker createTaskTracker(JobConf conf) 
+        throws IOException, InterruptedException {
           return new WaitingTaskTracker(conf);
         }
       };

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java Wed Jan 27 08:32:17 2010
@@ -79,9 +79,11 @@
     String ugi =
         System.getProperty(ClusterWithLinuxTaskController.TASKCONTROLLER_UGI);
     JobConf jobConf = new JobConf(task.getConf());
-    jobConf.setUser(ugi.split(",")[0]);
+    String user = ugi.split(",")[0];
+    jobConf.setUser(user);
     uploadJobConf(jobConf);
     task.setConf(jobConf);
+    task.setUser(user);
   }
 
   @Override

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMapProgress.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMapProgress.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMapProgress.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMapProgress.java Wed Jan 27 08:32:17 2010
@@ -157,7 +157,9 @@
       ReflectionUtils.newInstance(jContext.getInputFormatClass(), job);
 
     List<InputSplit> splits = input.getSplits(jContext);
-    JobSplitWriter.createSplitFiles(new Path(TEST_ROOT_DIR), job, splits);
+    JobSplitWriter.createSplitFiles(new Path(TEST_ROOT_DIR), job, 
+                   new Path(TEST_ROOT_DIR).getFileSystem(job),
+                   splits);
     TaskSplitMetaInfo[] splitMetaInfo = 
       SplitMetaInfoReader.readSplitMetaInfo(jobId, fs, job, new Path(TEST_ROOT_DIR));
     job.setUseNewMapper(true); // use new api    

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMapredSystemDir.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMapredSystemDir.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMapredSystemDir.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMapredSystemDir.java Wed Jan 27 08:32:17 2010
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.mapred;
 
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+
 import junit.framework.TestCase;
 
 import org.apache.commons.logging.Log;
@@ -36,36 +39,38 @@
   private static final Log LOG = LogFactory.getLog(TestMapredSystemDir.class);
   
   // dfs ugi
-  private static final UnixUserGroupInformation DFS_UGI = 
+  private static final UserGroupInformation DFS_UGI = 
     TestMiniMRWithDFSWithDistinctUsers.createUGI("dfs", true);
   // mapred ugi
-  private static final UnixUserGroupInformation MR_UGI = 
+  private static final UserGroupInformation MR_UGI = 
     TestMiniMRWithDFSWithDistinctUsers.createUGI("mr", false);
   private static final FsPermission SYSTEM_DIR_PERMISSION =
     FsPermission.createImmutable((short) 0733); // rwx-wx-wx
   
   public void testGarbledMapredSystemDir() throws Exception {
-    MiniDFSCluster dfs = null;
+    Configuration conf = new Configuration();
+    final MiniDFSCluster dfs = new MiniDFSCluster(conf, 1, true, null);
     MiniMRCluster mr = null;
     try {
       // start dfs
-      Configuration conf = new Configuration();
-      conf.set("dfs.permissions.supergroup", "supergroup");
-      UnixUserGroupInformation.saveToConf(conf,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, DFS_UGI);
-      dfs = new MiniDFSCluster(conf, 1, true, null);
-      FileSystem fs = dfs.getFileSystem();
+      conf.set("dfs.permissions.supergroup", "supergroup");      
+      FileSystem fs = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
+        public FileSystem run() throws IOException {
+          return dfs.getFileSystem();
+        }
+      });
       
-      // create Configs.SYSTEM_DIR
-      Path mapredSysDir = new Path("/mapred");
+      // create Configs.SYSTEM_DIR's parent (the parent has to be given 
+      // permissions since the JT internally tries to delete the leaf of
+      // the directory structure
+      Path mapredSysDir = 
+        new Path(conf.get(JTConfig.JT_SYSTEM_DIR)).getParent();
       fs.mkdirs(mapredSysDir);
       fs.setPermission(mapredSysDir, new FsPermission(SYSTEM_DIR_PERMISSION));
       fs.setOwner(mapredSysDir, "mr", "mrgroup");
 
       // start mr (i.e jobtracker)
       Configuration mrConf = new Configuration();
-      UnixUserGroupInformation.saveToConf(mrConf,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, MR_UGI);
       mr = new MiniMRCluster(0, 0, 0, dfs.getFileSystem().getUri().toString(),
                              1, null, null, MR_UGI, new JobConf(mrConf));
       JobTracker jobtracker = mr.getJobTrackerRunner().getJobTracker();

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFS.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFS.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFS.java Wed Jan 27 08:32:17 2010
@@ -27,8 +27,6 @@
 import java.util.Arrays;
 import java.util.List;
 
-import javax.security.auth.login.LoginException;
-
 import junit.framework.TestCase;
 
 import org.apache.commons.logging.Log;
@@ -45,7 +43,7 @@
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.TaskCounter;
 import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -238,15 +236,7 @@
         NUM_MAPS, NUM_SAMPLES, jobconf).doubleValue();
     double error = Math.abs(Math.PI - estimate);
     assertTrue("Error in PI estimation "+error+" exceeds 0.01", (error < 0.01));
-    String userName = jobconf.getUser();
-    if (userName == null) {
-      try {
-        userName = UnixUserGroupInformation.login(jobconf).getUserName();
-      } catch (LoginException le) {
-        throw new IOException("Cannot get the login username : "
-            + StringUtils.stringifyException(le));
-      }
-    }
+    String userName = UserGroupInformation.getLoginUser().getUserName();
     checkTaskDirectories(mr, userName, new String[] {}, new String[] {});
   }
 
@@ -268,15 +258,8 @@
     JobID jobid = result.job.getID();
     TaskAttemptID taskid = new TaskAttemptID(
         new TaskID(jobid, TaskType.MAP, 1),0);
-    String userName = jobConf.getUser();
-    if (userName == null) {
-      try {
-        userName = UnixUserGroupInformation.login(jobConf).getUserName();
-      } catch (LoginException le) {
-        throw new IOException("Cannot get the login username : "
-            + StringUtils.stringifyException(le));
-      }
-    }
+    String userName = UserGroupInformation.getLoginUser().getUserName();
+    
     checkTaskDirectories(mr, userName, new String[] { jobid.toString() },
         new String[] { taskid.toString() });
     // test with maps=0

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFSWithDistinctUsers.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFSWithDistinctUsers.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFSWithDistinctUsers.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFSWithDistinctUsers.java Wed Jan 27 08:32:17 2010
@@ -18,6 +18,7 @@
 package org.apache.hadoop.mapred;
 
 import java.io.*;
+import java.security.PrivilegedExceptionAction;
 
 import junit.framework.TestCase;
 
@@ -39,27 +40,16 @@
  * A JUnit test to test Mini Map-Reduce Cluster with Mini-DFS.
  */
 public class TestMiniMRWithDFSWithDistinctUsers extends TestCase {
-  static final UnixUserGroupInformation DFS_UGI = createUGI("dfs", true); 
-  static final UnixUserGroupInformation PI_UGI = createUGI("pi", false); 
-  static final UnixUserGroupInformation WC_UGI = createUGI("wc", false); 
+  static final UserGroupInformation DFS_UGI = createUGI("dfs", true); 
+  static final UserGroupInformation PI_UGI = createUGI("pi", false); 
+  static final UserGroupInformation WC_UGI = createUGI("wc", false); 
 
-  static UnixUserGroupInformation createUGI(String name, boolean issuper) {
+  static UserGroupInformation createUGI(String name, boolean issuper) {
     String group = issuper? "supergroup": name;
-    return UnixUserGroupInformation.createImmutable(
-        new String[]{name, group});
+    
+    return UserGroupInformation.createUserForTesting(name, new String[]{group});
   }
   
-  static JobConf createJobConf(MiniMRCluster mr, UnixUserGroupInformation ugi) {
-    return createJobConf(mr.createJobConf(), ugi);
-  }
-
-  static JobConf createJobConf(JobConf conf, UnixUserGroupInformation ugi) {
-    JobConf jobconf = new JobConf(conf);
-    UnixUserGroupInformation.saveToConf(jobconf,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
-    return jobconf;
-  }
-
   static void mkdir(FileSystem fs, String dir) throws IOException {
     Path p = new Path(dir);
     fs.mkdirs(p);
@@ -67,19 +57,23 @@
   }
 
   // runs a sample job as a user (ugi)
-  RunningJob runJobAsUser(JobConf job, UserGroupInformation ugi) 
+  RunningJob runJobAsUser(final JobConf job, UserGroupInformation ugi) 
   throws Exception {
     ClientProtocol jobSubmitClient = 
       TestSubmitJob.getJobSubmitClient(job, ugi);
     org.apache.hadoop.mapreduce.JobID id = jobSubmitClient.getNewJobID();
     
     InputSplit[] splits = computeJobSplit(JobID.downgrade(id), job);
-    Path jobSubmitDir = new Path(id.toString());
-    FileSystem fs = jobSubmitDir.getFileSystem(job);
-    jobSubmitDir = jobSubmitDir.makeQualified(fs);
-    uploadJobFiles(JobID.downgrade(id), splits, jobSubmitDir, job);
+    final Path jobSubmitDir = new Path(id.toString());
+    FileSystem fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      public FileSystem run() throws IOException {
+        return jobSubmitDir.getFileSystem(job);
+      }
+    });
+    Path qJobSubmitDir = jobSubmitDir.makeQualified(fs);
+    uploadJobFiles(JobID.downgrade(id), splits, qJobSubmitDir, ugi, job);
     
-    jobSubmitClient.submitJob(id, jobSubmitDir.toString(), null);
+    jobSubmitClient.submitJob(id, qJobSubmitDir.toString(), null);
     
     JobClient jc = new JobClient(job);
     return jc.getJob(JobID.downgrade(id));
@@ -97,11 +91,16 @@
 
   // a helper api for split submission
   private void uploadJobFiles(JobID id, InputSplit[] splits,
-                             Path jobSubmitDir, JobConf conf) 
-  throws IOException {
-    Path confLocation = JobSubmissionFiles.getJobConfPath(jobSubmitDir);
-    JobSplitWriter.createSplitFiles(jobSubmitDir, conf, splits);
-    FileSystem fs = confLocation.getFileSystem(conf);
+                             Path jobSubmitDir, UserGroupInformation ugi, 
+                             final JobConf conf) 
+  throws Exception {
+    final Path confLocation = JobSubmissionFiles.getJobConfPath(jobSubmitDir);
+    FileSystem fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      public FileSystem run() throws IOException {
+        return confLocation.getFileSystem(conf);
+      }
+    });
+    JobSplitWriter.createSplitFiles(jobSubmitDir, conf, fs, splits);
     FsPermission perm = new FsPermission((short)0700);
     
     // localize conf
@@ -111,19 +110,20 @@
   }
   
   public void testDistinctUsers() throws Exception {
-    MiniDFSCluster dfs = null;
     MiniMRCluster mr = null;
+    Configuration conf = new Configuration();
+    final MiniDFSCluster dfs = new MiniDFSCluster(conf, 4, true, null);
     try {
-      Configuration conf = new Configuration();
-      UnixUserGroupInformation.saveToConf(conf,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, DFS_UGI);
-      dfs = new MiniDFSCluster(conf, 4, true, null);
-      FileSystem fs = dfs.getFileSystem();
+          
+      FileSystem fs = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
+        public FileSystem run() throws IOException {
+          return dfs.getFileSystem();
+        }
+      });
       mkdir(fs, "/user");
       mkdir(fs, "/mapred");
 
-      UnixUserGroupInformation MR_UGI = createUGI(
-          UnixUserGroupInformation.login().getUserName(), false); 
+      UserGroupInformation MR_UGI = UserGroupInformation.getLoginUser(); 
       mr = new MiniMRCluster(0, 0, 4, dfs.getFileSystem().getUri().toString(),
            1, null, null, MR_UGI);
       String jobTrackerName = "localhost:" + mr.getJobTrackerPort();
@@ -135,7 +135,6 @@
       Path outDir = new Path("/testing/distinct/output");
       TestMiniMRClasspath.configureWordCount(fs, jobTrackerName, job1, 
                                              input, 2, 1, inDir, outDir);
-      job1 = createJobConf(job1, PI_UGI);
       runJobAsUser(job1, PI_UGI);
 
       JobConf job2 = mr.createJobConf();
@@ -143,7 +142,6 @@
       Path outDir2 = new Path("/testing/distinct/output2");
       TestMiniMRClasspath.configureWordCount(fs, jobTrackerName, job2, 
                                              input, 2, 1, inDir2, outDir2);
-      job2 = createJobConf(job2, WC_UGI);
       runJobAsUser(job2, WC_UGI);
     } finally {
       if (dfs != null) { dfs.shutdown(); }

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestNodeRefresh.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestNodeRefresh.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestNodeRefresh.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestNodeRefresh.java Wed Jan 27 08:32:17 2010
@@ -32,7 +32,9 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.Writable;
@@ -42,7 +44,6 @@
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 
@@ -57,7 +58,8 @@
   private JobTracker jt = null;
   private String[] hosts = null;
   private String[] trackerHosts = null;
-  public static final Log LOG = 
+  private UserGroupInformation owner, user1, user2, user3, user4;
+  private static final Log LOG = 
     LogFactory.getLog(TestNodeRefresh.class);
   
   private String getHostname(int i) {
@@ -65,17 +67,20 @@
   }
 
   private void startCluster(int numHosts, int numTrackerPerHost, 
-                            int numExcluded, Configuration conf) 
+                            int numExcluded, UserGroupInformation clusterUgi,
+                            Configuration conf) 
   throws IOException {
     try {
-   // create fake mapping for the groups
-      Map<String, String[]> u2g_map = new HashMap<String, String[]> (1);
-      u2g_map.put("user1", new String[] {"user1" });
-      u2g_map.put("user2", new String[] {"user2" });
-      u2g_map.put("user3", new String[] {"abc" });
-      u2g_map.put("user4", new String[] {"supergroup" });
-      DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
-      
+      // create fake mapping for the groups
+      owner = UserGroupInformation.getLoginUser();
+      user1= UserGroupInformation.createUserForTesting("user1", 
+                                                       new String[] {"user1"});
+      user2= UserGroupInformation.createUserForTesting("user2", 
+                                                       new String[] {"user2"});
+      user3= UserGroupInformation.createUserForTesting("user3", 
+                                                       new String[] {"abc"});
+      user4= UserGroupInformation.createUserForTesting("user4", 
+                                                   new String[] {"supergroup"});
       conf.setBoolean("dfs.replication.considerLoad", false);
       
       // prepare hosts info
@@ -89,6 +94,8 @@
       dfs.waitActive();
       dfs.startDataNodes(conf, numHosts, true, null, null, hosts, null);
       dfs.waitActive();
+      FileSystem.mkdirs(dfs.getFileSystem(), new Path("/"),
+          new FsPermission((short) 0777));
 
       namenode = (dfs.getFileSystem()).getUri().getHost() + ":" + 
       (dfs.getFileSystem()).getUri().getPort(); 
@@ -102,7 +109,7 @@
       // start mini mr
       JobConf jtConf = new JobConf(conf);
       mr = new MiniMRCluster(0, 0, numHosts * numTrackerPerHost, namenode, 1, 
-                             null, trackerHosts, null, jtConf, 
+                             null, trackerHosts, clusterUgi, jtConf, 
                              numExcluded * numTrackerPerHost);
       
       jt = mr.getJobTrackerRunner().getJobTracker();
@@ -150,14 +157,12 @@
     // start a cluster with 2 hosts and no exclude-hosts file
     Configuration conf = new Configuration();
     conf.set(JTConfig.JT_HOSTS_EXCLUDE_FILENAME, "");
-    startCluster(2, 1, 0, conf);
+    startCluster(2, 1, 0, UserGroupInformation.getLoginUser(),conf);
 
     conf = mr.createJobConf(new JobConf(conf));
 
     // refresh with wrong user
-    UserGroupInformation ugi_wrong =
-      TestMiniMRWithDFSWithDistinctUsers.createUGI("user1", false);
-    AdminOperationsProtocol client = getClient(conf, ugi_wrong);
+    AdminOperationsProtocol client = getClient(conf, user1);
     boolean success = false;
     try {
       // Also try tool runner
@@ -168,10 +173,7 @@
 
     // refresh with correct user
     success = false;
-    String owner = ShellCommandExecutor.execCommand("whoami").trim();
-    UserGroupInformation ugi_correct =
-      TestMiniMRWithDFSWithDistinctUsers.createUGI(owner, false);
-    client = getClient(conf, ugi_correct);
+    client = getClient(conf, owner);
     try {
       client.refreshNodes();
       success = true;
@@ -181,9 +183,7 @@
 
     // refresh with super user
     success = false;
-    UserGroupInformation ugi_super =
-      TestMiniMRWithDFSWithDistinctUsers.createUGI("user4", true);
-    client = getClient(conf, ugi_super);
+    client = getClient(conf, user4);
     try {
       client.refreshNodes();
       success = true;
@@ -213,21 +213,15 @@
    */
   public void testMRSuperUsers() throws IOException {  
     // start a cluster with 1 host and specified superuser and supergroup
-    UnixUserGroupInformation ugi =
-      TestMiniMRWithDFSWithDistinctUsers.createUGI("user1", false);
     Configuration conf = new Configuration();
-    UnixUserGroupInformation.saveToConf(conf, 
-        UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
     // set the supergroup
     conf.set(JTConfig.JT_SUPERGROUP, "abc");
-    startCluster(2, 1, 0, conf);
+    startCluster(2, 1, 0, UserGroupInformation.createRemoteUser("user1"), conf);
 
     conf = mr.createJobConf(new JobConf(conf));
 
     // refresh with wrong user
-    UserGroupInformation ugi_wrong =
-      TestMiniMRWithDFSWithDistinctUsers.createUGI("user2", false);
-    AdminOperationsProtocol client = getClient(conf, ugi_wrong);
+    AdminOperationsProtocol client = getClient(conf, user2);
     boolean success = false;
     try {
       // Also try tool runner
@@ -238,7 +232,7 @@
 
     // refresh with correct user
     success = false;
-    client = getClient(conf, ugi);
+    client = getClient(conf, user1);
     try {
       client.refreshNodes();
       success = true;
@@ -248,9 +242,7 @@
 
     // refresh with super user
     success = false;
-    UserGroupInformation ugi_super =
-      UnixUserGroupInformation.createImmutable(new String[]{"user3", "abc"});
-    client = getClient(conf, ugi_super);
+    client = getClient(conf, user3);
     try {
       client.refreshNodes();
       success = true;
@@ -271,7 +263,7 @@
     Configuration conf = new Configuration();
     File file = new File("hosts.exclude");
     file.delete();
-    startCluster(2, 1, 0, conf);
+    startCluster(2, 1, 0, UserGroupInformation.getLoginUser(), conf);
     String hostToDecommission = getHostname(1);
     conf = mr.createJobConf(new JobConf(conf));
 
@@ -290,10 +282,7 @@
     }
     file.deleteOnExit();
 
-    String owner = ShellCommandExecutor.execCommand("whoami").trim();
-    UserGroupInformation ugi_correct =
-      TestMiniMRWithDFSWithDistinctUsers.createUGI(owner, false);
-    AdminOperationsProtocol client = getClient(conf, ugi_correct);
+    AdminOperationsProtocol client = getClient(conf, owner);
     try {
       client.refreshNodes();
     } catch (IOException ioe){}
@@ -339,7 +328,7 @@
       out.close();
     }
     
-    startCluster(2, 1, 1, conf);
+    startCluster(2, 1, 1, UserGroupInformation.getLoginUser(), conf);
     
     file.delete();
 
@@ -361,10 +350,7 @@
     
     conf = mr.createJobConf(new JobConf(conf));
 
-    String owner = ShellCommandExecutor.execCommand("whoami").trim();
-    UserGroupInformation ugi_correct =  
-      TestMiniMRWithDFSWithDistinctUsers.createUGI(owner, false);
-    AdminOperationsProtocol client = getClient(conf, ugi_correct);
+    AdminOperationsProtocol client = getClient(conf, owner);
     try {
       client.refreshNodes();
     } catch (IOException ioe){}
@@ -423,7 +409,7 @@
     Configuration conf = new Configuration();
     conf.set(JTConfig.JT_MAX_TRACKER_BLACKLISTS, "1");
 
-    startCluster(2, 1, 0, conf);
+    startCluster(2, 1, 0, UserGroupInformation.getLoginUser(), conf);
     
     assertEquals("Trackers not up", 2,
            mr.getJobTrackerRunner().getJobTracker().getActiveTrackers().length);

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueAclsForCurrentUser.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueAclsForCurrentUser.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueAclsForCurrentUser.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueAclsForCurrentUser.java Wed Jan 27 08:32:17 2010
@@ -20,7 +20,6 @@
 import java.io.IOException;
 import javax.security.auth.login.LoginException;
 import junit.framework.TestCase;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 /**
@@ -36,7 +35,7 @@
   String adminAcl  = Queue.QueueOperation.ADMINISTER_JOBS.getAclName();
 
   private void setupConfForNoAccess() throws IOException,LoginException {
-    currentUGI = UnixUserGroupInformation.login();
+    currentUGI = UserGroupInformation.getLoginUser();
     String userName = currentUGI.getUserName();
     conf = new JobConf();
 
@@ -58,7 +57,7 @@
    * @return
    */
   private void setupConf(boolean aclSwitch) throws IOException,LoginException{
-    currentUGI = UnixUserGroupInformation.login();
+    currentUGI = UserGroupInformation.getLoginUser();
     String userName = currentUGI.getUserName();
     conf = new JobConf();
 

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueManager.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueManager.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueManager.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueManager.java Wed Jan 27 08:32:17 2010
@@ -25,9 +25,8 @@
 import static org.junit.Assert.*;
 
 import org.apache.hadoop.mapreduce.QueueState;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.SecurityUtil.AccessControlList;
+import org.apache.hadoop.security.authorize.AccessControlList;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.junit.After;
 import org.junit.Test;
@@ -109,15 +108,15 @@
     assertTrue(
       q.getAcls().get(
         QueueManager.toFullPropertyName(
-          q.getName(), ACL_SUBMIT_JOB_TAG)).getUsers().contains(
-        "u1"));
+          q.getName(), ACL_SUBMIT_JOB_TAG)).isUserAllowed(
+        UserGroupInformation.createRemoteUser("u1")));
 
     assertTrue(
       q.getAcls().get(
         QueueManager.toFullPropertyName(
           q.getName(),
           ACL_ADMINISTER_JOB_TAG))
-        .getUsers().contains("u2"));
+        .isUserAllowed(UserGroupInformation.createRemoteUser("u2")));
     assertTrue(q.getState().equals(QueueState.STOPPED));
   }
 
@@ -131,26 +130,26 @@
 
     UserGroupInformation ugi;
     // test for acls access when acls are set with *
-    ugi = new UnixUserGroupInformation("u1", new String[]{" "});
+    ugi = UserGroupInformation.createRemoteUser("u1");
     assertTrue(qm.hasAccess("p1" + NAME_SEPARATOR + "p12",
         Queue.QueueOperation.SUBMIT_JOB, ugi));
-    ugi = new UnixUserGroupInformation("u2", new String[]{" "});
+    ugi = UserGroupInformation.createRemoteUser("u2");
     assertTrue(qm.hasAccess("p1" + NAME_SEPARATOR + "p12",
         Queue.QueueOperation.ADMINISTER_JOBS, ugi));
     
     // test for acls access when acls are not set with *
-    ugi = new UnixUserGroupInformation("u1", new String[]{" "});
+    ugi = UserGroupInformation.createRemoteUser("u1");
     assertTrue(qm.hasAccess("p1" + NAME_SEPARATOR + "p11",
         Queue.QueueOperation.SUBMIT_JOB, ugi));
-    ugi = new UnixUserGroupInformation("u2", new String[]{" "});
+    ugi = UserGroupInformation.createRemoteUser("u2");
     assertTrue(qm.hasAccess("p1" + NAME_SEPARATOR + "p11",
         Queue.QueueOperation.ADMINISTER_JOBS, ugi));
     
     // test for acls access when acls are not specified but acls is enabled
-    ugi = new UnixUserGroupInformation("u1", new String[]{" "});
+    ugi = UserGroupInformation.createRemoteUser("u1");
     assertTrue(qm.hasAccess("p1" + NAME_SEPARATOR + "p13",
         Queue.QueueOperation.SUBMIT_JOB, ugi));
-    ugi = new UnixUserGroupInformation("u2", new String[]{" "});
+    ugi = UserGroupInformation.createRemoteUser("u2");
     assertTrue(qm.hasAccess("p1" + NAME_SEPARATOR + "p13",
         Queue.QueueOperation.ADMINISTER_JOBS, ugi));
     
@@ -190,7 +189,7 @@
     QueueManager qm = new QueueManager(CONFIG);
 
     UserGroupInformation ugi =
-      new UnixUserGroupInformation("u1", new String[]{" "});
+      UserGroupInformation.createRemoteUser("u1");
     assertFalse(
       qm.hasAccess(
         "p1",
@@ -369,14 +368,14 @@
               child.getAcls().get(
                 QueueManager.toFullPropertyName(
                   child.getName(), ACL_SUBMIT_JOB_TAG))
-                .getUsers().contains("u1"));
+                .isUserAllowed(UserGroupInformation.createRemoteUser("u1")));
 
             assertTrue(
               child.getAcls().get(
                 QueueManager.toFullPropertyName(
                   child.getName(),
                   ACL_ADMINISTER_JOB_TAG))
-                .getUsers().contains("u2"));
+                .isUserAllowed(UserGroupInformation.createRemoteUser("u2")));
             assertTrue(child.getState().equals(QueueState.STOPPED));
           } else {
             assertTrue(child.getState().equals(QueueState.RUNNING));
@@ -413,14 +412,14 @@
                 QueueManager.toFullPropertyName(
                   child.getName(),
                   ACL_SUBMIT_JOB_TAG))
-                .getUsers().contains("u3"));
+                .isUserAllowed(UserGroupInformation.createRemoteUser("u3")));
 
             assertTrue(
               child.getAcls().get(
                 QueueManager.toFullPropertyName(
                   child.getName(),
                   ACL_ADMINISTER_JOB_TAG))
-                .getUsers().contains("u4"));
+                .isUserAllowed(UserGroupInformation.createRemoteUser("u4")));
             assertTrue(child.getState().equals(QueueState.RUNNING));
           } else {
             assertTrue(child.getState().equals(QueueState.STOPPED));

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithDeprecatedConf.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithDeprecatedConf.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithDeprecatedConf.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithDeprecatedConf.java Wed Jan 27 08:32:17 2010
@@ -45,7 +45,6 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.mapreduce.SleepJob;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import static org.apache.hadoop.mapred.DeprecatedQueueConfigurationParser.*;
 import static org.apache.hadoop.mapred.QueueManagerTestUtils.*;
@@ -92,7 +91,7 @@
 
       //properties for mapred-queue-acls.xml
       UserGroupInformation ugi =
-        new UnixUserGroupInformation("unknownUser",new String[]{" "});
+        UserGroupInformation.createRemoteUser("unknownUser");
       hadoopConfProps.put("mapred.queue.default.acl-submit-job", ugi.getUserName());
       hadoopConfProps.put("mapred.queue.q1.acl-submit-job", "u1");
       hadoopConfProps.put("mapred.queue.q2.acl-submit-job", "*");

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithJobTracker.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithJobTracker.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithJobTracker.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithJobTracker.java Wed Jan 27 08:32:17 2010
@@ -39,6 +39,7 @@
 
 import java.io.File;
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.Properties;
 
 import org.apache.hadoop.conf.Configuration;
@@ -48,7 +49,6 @@
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.QueueState;
 import org.apache.hadoop.mapreduce.JobStatus.State;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -105,7 +105,6 @@
   @Test
   public void testAclsForSubmitJob() throws Exception {
     Job job;
-    UserGroupInformation.setCurrentUGI(UnixUserGroupInformation.login());
     // submit job to queue p1:p13 with unspecified acls 
     job = submitSleepJob(0, 0, 0, 0, true, "u1,g1", "p1" + NAME_SEPARATOR
         + "p13", conf);
@@ -133,8 +132,7 @@
   public void testAccessToKillJob() throws Exception {
     Job job = submitSleepJob(1, 1, 100, 100, false, "u1,g1", "p1"
         + NAME_SEPARATOR + "p11", conf);
-    UserGroupInformation.setCurrentUGI(UnixUserGroupInformation.login());
-    JobConf jobConf = miniMRCluster.createJobConf();
+    final JobConf jobConf = miniMRCluster.createJobConf();
     Cluster cluster = null;
     JobID jobID = job.getStatus().getJobID();
     //Ensure that the jobinprogress is initied before we issue a kill 
@@ -147,9 +145,14 @@
       tracker.killJob(jobID);
       fail("current user is neither u1 nor in the administer group list");
     } catch (Exception e) {
-      Configuration userConf = new Configuration(miniMRCluster.createJobConf());
-      userConf.set("hadoop.job.ugi", "u1,g1");
-      cluster = new Cluster(userConf);
+      final Configuration userConf = new Configuration(miniMRCluster.createJobConf());
+      UserGroupInformation ugi = 
+        UserGroupInformation.createUserForTesting("u1",new String[]{"g1"});
+      cluster = ugi.doAs(new PrivilegedExceptionAction<Cluster>() {
+        public Cluster run() throws IOException {
+          return new Cluster(userConf);
+        }
+      });
       cluster.getJob(jobID).killJob();
       // kill the running job
       assertEquals("job submitted for u1 and queue p1:p11 is not killed.",
@@ -168,9 +171,14 @@
     assertEquals("job submitted for u1 and queue p1:p11 is not killed.",
         cluster.getJob(jobID).getStatus().getState(), (State.KILLED));
     
-    Configuration userConf = new Configuration(miniMRCluster.createJobConf());
-    userConf.set("hadoop.job.ugi", "u1,g1");
-    cluster = new Cluster(userConf);
+    final Configuration userConf = new Configuration(miniMRCluster.createJobConf());
+    UserGroupInformation ugi = 
+      UserGroupInformation.createUserForTesting("u1",new String[]{"g1"});
+    cluster = ugi.doAs(new PrivilegedExceptionAction<Cluster>() {
+      public Cluster run() throws IOException {
+        return new Cluster(userConf);
+      }
+    });
     job = submitSleepJob(1, 1, 10, 10, false, "u1,g1", "p1" + NAME_SEPARATOR
         + "p11", conf);
     jobID = job.getStatus().getJobID();
@@ -178,15 +186,25 @@
     //signal to the job.
     jip =  tracker.getJob(org.apache.hadoop.mapred.JobID.downgrade(jobID));
     tracker.initJob(jip);
-    jobConf.set("hadoop.job.ugi", "u3,g3");
-    cluster = new Cluster(jobConf);
+    ugi = 
+      UserGroupInformation.createUserForTesting("u3",new String[]{"g3"});
+    cluster = ugi.doAs(new PrivilegedExceptionAction<Cluster>() {
+      public Cluster run() throws IOException {
+        return new Cluster(jobConf);
+      }
+    });
     // try killing job with user not in administer list
     try {
       cluster.getJob(jobID).killJob();
       fail("u3 not in administer list");
     } catch (Exception e) {
-      jobConf.set("hadoop.job.ugi", "u1,g1");
-      cluster = new Cluster(jobConf);
+      ugi = 
+        UserGroupInformation.createUserForTesting("u1",new String[]{"g1"});
+      cluster = ugi.doAs(new PrivilegedExceptionAction<Cluster>() {
+        public Cluster run() throws IOException {
+          return new Cluster(jobConf);
+        }
+      });
       assertFalse(cluster.getJob(jobID).isComplete());
       cluster.getJob(jobID).killJob();
       // kill the running job
@@ -277,7 +295,6 @@
     MRAdmin admin = new MRAdmin(miniMRCluster.createJobConf());
     admin.run(new String[] { "-refreshQueues" });
 
-    UserGroupInformation.setCurrentUGI(UnixUserGroupInformation.login());
     // submit job to queue p1:p11 by any user not in acls-submit-job
     Job job = submitSleepJob(0, 0, 0, 0, true, "u2,g1", "p1" + NAME_SEPARATOR
         + "p11", conf);
@@ -293,9 +310,14 @@
     job = submitSleepJob(1, 1, 0, 0, false, "u1,g1", "p1" + NAME_SEPARATOR
         + "p11", conf);
     // kill the job by any user    
-    JobConf jobConf = miniMRCluster.createJobConf();
-    jobConf.set("hadoop.job.ugi", "u3,g3");
-    Cluster cluster = new Cluster(jobConf);
+    final JobConf jobConf = miniMRCluster.createJobConf();
+    UserGroupInformation ugi = 
+      UserGroupInformation.createUserForTesting("u3",new String[]{"g3"});
+    Cluster cluster = ugi.doAs(new PrivilegedExceptionAction<Cluster>() {
+      public Cluster run() throws IOException {
+        return new Cluster(jobConf);
+      }
+    });
     JobID jobID = job.getStatus().getJobID();
     //Ensure that the jobinprogress is initied before we issue a kill 
     //signal to the job.

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestRecoveryManager.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestRecoveryManager.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestRecoveryManager.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestRecoveryManager.java Wed Jan 27 08:32:17 2010
@@ -20,6 +20,7 @@
 
 import java.io.File;
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 
 import junit.framework.TestCase;
 
@@ -203,15 +204,21 @@
     }
     
     // now submit job3 with inappropriate acls
-    JobConf job3 = mr.createJobConf();
-    job3.set("hadoop.job.ugi","abc,users");
-
+    final JobConf job3 = mr.createJobConf();
+    UserGroupInformation ugi3 = 
+      UserGroupInformation.createUserForTesting("abc", new String[]{"users"});
+    
     UtilsForTests.configureWaitingJobConf(job3, 
         new Path(TEST_DIR, "input"), new Path(TEST_DIR, "output5"), 1, 0, 
         "test-recovery-manager", signalFile, signalFile);
     
     // submit the job
-    RunningJob rJob3 = (new JobClient(job3)).submitJob(job3);
+    RunningJob rJob3 = ugi3.doAs(new PrivilegedExceptionAction<RunningJob>() {
+      public RunningJob run() throws IOException {
+        return (new JobClient(job3)).submitJob(job3); 
+      }
+    });
+      
     LOG.info("Submitted job " + rJob3.getID() + " with different user");
     
     jip = jobtracker.getJob(rJob3.getID());
@@ -233,7 +240,7 @@
     mr.getJobTrackerConf().setInt(JTConfig.JT_TASKS_PER_JOB, 25);
     
     mr.getJobTrackerConf().setBoolean("mapred.acls.enabled" , true);
-    UserGroupInformation ugi = UserGroupInformation.readFrom(job1);
+    UserGroupInformation ugi = UserGroupInformation.getLoginUser();
     mr.getJobTrackerConf().set("mapred.queue.default.acl-submit-job", 
                                ugi.getUserName());
 

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestSeveral.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestSeveral.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestSeveral.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestSeveral.java Wed Jan 27 08:32:17 2010
@@ -23,6 +23,7 @@
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
+import java.security.PrivilegedExceptionAction;
 import java.util.Iterator;
 
 import junit.extensions.TestSetup;
@@ -45,7 +46,7 @@
 import org.apache.hadoop.mapred.lib.NullOutputFormat;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 /** 
  * This is a test case that tests several miscellaneous functionality. 
@@ -64,11 +65,11 @@
 @SuppressWarnings("deprecation")
 public class TestSeveral extends TestCase {
 
-  static final UnixUserGroupInformation DFS_UGI = 
+  static final UserGroupInformation DFS_UGI = 
     TestMiniMRWithDFSWithDistinctUsers.createUGI("dfs", true); 
-  static final UnixUserGroupInformation TEST1_UGI = 
+  static final UserGroupInformation TEST1_UGI = 
     TestMiniMRWithDFSWithDistinctUsers.createUGI("pi", false); 
-  static final UnixUserGroupInformation TEST2_UGI = 
+  static final UserGroupInformation TEST2_UGI = 
     TestMiniMRWithDFSWithDistinctUsers.createUGI("wc", false);
 
   private static MiniMRCluster mrCluster = null;
@@ -85,19 +86,19 @@
 
         Configuration conf = new Configuration();
         conf.setInt("dfs.replication", 1);
-        UnixUserGroupInformation.saveToConf(conf,
-            UnixUserGroupInformation.UGI_PROPERTY_NAME, DFS_UGI);
         dfs = new MiniDFSCluster(conf, numTT, true, null);
-        fs = dfs.getFileSystem();
+        fs = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
+          public FileSystem run() throws IOException {
+            return dfs.getFileSystem();
+          }
+        });
 
         TestMiniMRWithDFSWithDistinctUsers.mkdir(fs, "/user");
         TestMiniMRWithDFSWithDistinctUsers.mkdir(fs, "/mapred");
         TestMiniMRWithDFSWithDistinctUsers.mkdir(fs, 
             conf.get(JTConfig.JT_STAGING_AREA_ROOT));
 
-        UnixUserGroupInformation MR_UGI = 
-          TestMiniMRWithDFSWithDistinctUsers.createUGI(
-              UnixUserGroupInformation.login().getUserName(), false); 
+        UserGroupInformation MR_UGI = UserGroupInformation.getLoginUser(); 
 
         // Create a TestJobInProgressListener.MyListener and associate
         // it with the MiniMRCluster
@@ -105,7 +106,7 @@
         myListener = new MyListener();
         conf.set(JTConfig.JT_IPC_HANDLER_COUNT, "1");
         mrCluster =   new MiniMRCluster(0, 0,
-            numTT, dfs.getFileSystem().getUri().toString(), 
+            numTT, fs.getUri().toString(), 
             1, null, null, MR_UGI, new JobConf());
         // make cleanup inline sothat validation of existence of these directories
         // can be done
@@ -193,7 +194,7 @@
    * @throws Exception
    */
   public void testSuccessfulJob() throws Exception {
-    JobConf conf = mrCluster.createJobConf();
+    final JobConf conf = mrCluster.createJobConf();
 
     // Set a complex Job name (TestJobName)
     conf.setJobName("[name][some other value that gets" +
@@ -226,20 +227,27 @@
       TaskAttemptID.getTaskAttemptIDsPattern(null, null, TaskType.MAP, 1, null);
     conf.setKeepTaskFilesPattern(pattern);
 
-    UnixUserGroupInformation.saveToConf(conf,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME, TEST1_UGI);
-
     final Path inDir = new Path("./test/input");
     final Path outDir = new Path("./test/output");
 
-    FileInputFormat.setInputPaths(conf, inDir);
-    FileOutputFormat.setOutputPath(conf, outDir);
+    TEST1_UGI.doAs(new PrivilegedExceptionAction<Void>() {
+      public Void run() {
+        FileInputFormat.setInputPaths(conf, inDir);
+        FileOutputFormat.setOutputPath(conf, outDir);   
+        return null;
+      }
+    });
 
     clean(fs, outDir);
-    makeInput(inDir, conf);
-    JobClient jobClient = new JobClient(conf);
-    RunningJob job = jobClient.submitJob(conf);
-    JobID jobId = job.getID();
+    final RunningJob job = TEST1_UGI.doAs(new PrivilegedExceptionAction<RunningJob>() {
+      public RunningJob run() throws IOException {
+        makeInput(inDir, conf);
+        JobClient jobClient = new JobClient(conf);
+        return jobClient.submitJob(conf);
+      }
+    });
+    
+    final JobID jobId = job.getID();
 
     while (job.getJobState() != JobStatus.RUNNING) {
       try {
@@ -283,24 +291,31 @@
     TestJobClient.verifyJobPriority(jobId.toString(), "HIGH", conf);
     
     // Basic check if the job did run fine
-    verifyOutput(outDir.getFileSystem(conf), outDir);
-
-    //TestJobHistory
-    TestJobHistory.validateJobHistoryFileFormat(
-        mrCluster.getJobTrackerRunner().getJobTracker().getJobHistory(),
-        jobId, conf, "SUCCEEDED", false);
+    TEST1_UGI.doAs(new PrivilegedExceptionAction<Void>() {
+      public Void run() throws IOException {
+        verifyOutput(outDir.getFileSystem(conf), outDir);
+
+
+        //TestJobHistory
+        TestJobHistory.validateJobHistoryFileFormat(
+            mrCluster.getJobTrackerRunner().getJobTracker().getJobHistory(),
+            jobId, conf, "SUCCEEDED", false);
+
+        TestJobHistory.validateJobHistoryFileContent(mrCluster, job, conf);
+
+        // Since we keep setKeepTaskFilesPattern, these files should still be
+        // present and will not be cleaned up.
+        for(int i=0; i < numTT; ++i) {
+          Path jobDirPath =
+            new Path(mrCluster.getTaskTrackerLocalDir(i), TaskTracker
+                .getJobCacheSubdir(TEST1_UGI.getUserName()));
+          boolean b = FileSystem.getLocal(conf).delete(jobDirPath, true);
+          assertTrue(b);
+        }
+        return null;
+      }
+    });
     
-    TestJobHistory.validateJobHistoryFileContent(mrCluster, job, conf);
-
-    // Since we keep setKeepTaskFilesPattern, these files should still be
-    // present and will not be cleaned up.
-    for(int i=0; i < numTT; ++i) {
-      Path jobDirPath =
-          new Path(mrCluster.getTaskTrackerLocalDir(i), TaskTracker
-              .getJobCacheSubdir(TEST1_UGI.getUserName()));
-      boolean b = FileSystem.getLocal(conf).delete(jobDirPath, true);
-      assertTrue(b);
-    }
   }
 
   /**

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java Wed Jan 27 08:32:17 2010
@@ -20,6 +20,7 @@
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.net.URI;
+import java.security.PrivilegedExceptionAction;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -42,7 +43,6 @@
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.SleepJob;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
@@ -195,22 +195,21 @@
    */
   public void testSecureJobExecution() throws Exception {
     LOG.info("Testing secure job submission/execution");
-    MiniDFSCluster dfs = null;
     MiniMRCluster mr = null;
+    Configuration conf = new Configuration();
+    final MiniDFSCluster dfs = new MiniDFSCluster(conf, 1, true, null);
     try {
-      Configuration conf = new Configuration();
-      UnixUserGroupInformation.saveToConf(conf,
-      UnixUserGroupInformation.UGI_PROPERTY_NAME, 
-      TestMiniMRWithDFSWithDistinctUsers.DFS_UGI);
-      dfs = new MiniDFSCluster(conf, 1, true, null);
-      FileSystem fs = dfs.getFileSystem();
+      FileSystem fs = 
+        TestMiniMRWithDFSWithDistinctUsers.DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
+          public FileSystem run() throws IOException {
+            return dfs.getFileSystem();
+          }
+        });
       TestMiniMRWithDFSWithDistinctUsers.mkdir(fs, "/user");
       TestMiniMRWithDFSWithDistinctUsers.mkdir(fs, "/mapred");
       TestMiniMRWithDFSWithDistinctUsers.mkdir(fs, 
           conf.get(JTConfig.JT_STAGING_AREA_ROOT));
-      UnixUserGroupInformation MR_UGI = 
-        TestMiniMRWithDFSWithDistinctUsers.createUGI(
-           UnixUserGroupInformation.login().getUserName(), false); 
+      UserGroupInformation MR_UGI = UserGroupInformation.getLoginUser();
       mr = new MiniMRCluster(0, 0, 1, dfs.getFileSystem().getUri().toString(),
                              1, null, null, MR_UGI);
       JobTracker jt = mr.getJobTrackerRunner().getJobTracker();
@@ -223,12 +222,11 @@
       final Path reduceSignalFile = new Path(TEST_DIR, "reduce-signal");
       
       // create a ugi for user 1
-      UnixUserGroupInformation user1 = 
+      UserGroupInformation user1 = 
         TestMiniMRWithDFSWithDistinctUsers.createUGI("user1", false);
       Path inDir = new Path("/user/input");
       Path outDir = new Path("/user/output");
-      JobConf job = 
-      TestMiniMRWithDFSWithDistinctUsers.createJobConf(mr, user1);
+      final JobConf job = mr.createJobConf();
 
       UtilsForTests.configureWaitingJobConf(job, inDir, outDir, 2, 0, 
         "test-submit-job", mapSignalFile.toString(), 
@@ -238,16 +236,24 @@
       job.set(UtilsForTests.getTaskSignalParameter(false), 
       reduceSignalFile.toString());
       LOG.info("Submit job as the actual user (" + user1.getUserName() + ")");
-      JobClient jClient = new JobClient(job);
-      RunningJob rJob = jClient.submitJob(job);
+      final JobClient jClient = 
+        user1.doAs(new PrivilegedExceptionAction<JobClient>() {
+          public JobClient run() throws IOException {
+            return new JobClient(job);
+          }
+        });
+      RunningJob rJob = user1.doAs(new PrivilegedExceptionAction<RunningJob>() {
+        public RunningJob run() throws IOException {
+          return jClient.submitJob(job);
+        }
+      });
       JobID id = rJob.getID();
       LOG.info("Running job " + id);
 
       // create user2
-      UnixUserGroupInformation user2 = 
+      UserGroupInformation user2 = 
         TestMiniMRWithDFSWithDistinctUsers.createUGI("user2", false);
-      JobConf conf_other = 
-      TestMiniMRWithDFSWithDistinctUsers.createJobConf(mr, user2);
+      JobConf conf_other = mr.createJobConf();
       org.apache.hadoop.hdfs.protocol.ClientProtocol client = 
         getDFSClient(conf_other, user2);
 

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java Wed Jan 27 08:32:17 2010
@@ -22,8 +22,6 @@
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.PrintWriter;
-import java.util.ArrayList;
-import java.util.List;
 import java.util.jar.JarOutputStream;
 import java.util.zip.ZipEntry;
 
@@ -43,7 +41,6 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Shell;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.mapred.JvmManager.JvmEnv;
 import org.apache.hadoop.mapred.TaskController.JobInitializationContext;
 import org.apache.hadoop.mapred.TaskController.TaskControllerContext;
@@ -126,8 +123,6 @@
     // Create the job configuration file. Same as trackerConf in this test.
     Job job = new Job(trackerFConf);
 
-    job.setUGIAndUserGroupNames();
-
     // JobClient uploads the job jar to the file system and sets it in the
     // jobConf.
     uploadJobJar(job);
@@ -144,7 +139,7 @@
     tracker.setLocalFileSystem(tracker.systemFS);
     tracker.systemDirectory = new Path(TEST_ROOT_DIR.getAbsolutePath());
     
-    taskTrackerUGI = UserGroupInformation.login(trackerFConf);
+    taskTrackerUGI = UserGroupInformation.getCurrentUser();
 
     // Set up the task to be localized
     String jtIdentifier = "200907202331";
@@ -154,6 +149,7 @@
     task =
         new MapTask(jobConfFile.toURI().toString(), taskId, 1, null, 1);
     task.setConf(job.getConfiguration()); // Set conf. Set user name in particular.
+    task.setUser(UserGroupInformation.getCurrentUser().getUserName());
 
     // create jobTokens file
     uploadJobTokensFile(); 
@@ -401,7 +397,7 @@
    * @throws IOException
    */
   public void testJobLocalization()
-      throws IOException {
+      throws Exception {
     if (!canRun()) {
       return;
     }
@@ -497,7 +493,7 @@
    * @throws IOException
    */
   public void testTaskLocalization()
-      throws IOException {
+      throws Exception {
     if (!canRun()) {
       return;
     }
@@ -670,7 +666,7 @@
    * @throws IOException
    */
   public void testTaskCleanup()
-      throws IOException {
+      throws Exception {
     if (!canRun()) {
       return;
     }
@@ -682,7 +678,7 @@
    * @throws IOException
    */
   public void testFailedTaskCleanup()
-  throws IOException {
+  throws Exception {
     if (!canRun()) {
       return;
     }
@@ -694,7 +690,7 @@
    * @throws IOException
    */
   public void testTaskCleanupWithJvmUse()
-      throws IOException {
+      throws Exception {
     if (!canRun()) {
       return;
     }
@@ -705,7 +701,7 @@
    * Validates if task cleanup is done properly
    */
   private void testTaskCleanup(boolean needCleanup, boolean jvmReuse)
-      throws IOException {
+      throws Exception {
     // Localize job and localize task.
     tracker.getLocalizer().initializeUserDirs(task.getUser());
     localizedJobConf = tracker.localizeJobFiles(task);

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/filecache/TestTrackerDistributedCacheManager.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/filecache/TestTrackerDistributedCacheManager.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/filecache/TestTrackerDistributedCacheManager.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/filecache/TestTrackerDistributedCacheManager.java Wed Jan 27 08:32:17 2010
@@ -384,9 +384,8 @@
     }
   }
 
-  protected String getJobOwnerName() throws LoginException {
-    UserGroupInformation ugi = UserGroupInformation.login(conf);
-    return ugi.getUserName();
+  protected String getJobOwnerName() throws IOException {
+    return UserGroupInformation.getLoginUser().getUserName();
   }
 
   /** test delete cache */

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java Wed Jan 27 08:32:17 2010
@@ -78,6 +78,7 @@
         ShellBasedUnixGroupsMapping.class,GroupMappingServiceProvider.class).
         getName());
     
+    Groups.getUserToGroupsMappingService(config);
     String namenodeUrl = "hdfs://localhost:" + "0";
     FileSystem.setDefaultUri(config, namenodeUrl);
     
@@ -104,8 +105,8 @@
     MRAdmin admin = new MRAdmin(config);
     String [] args = new String[] { "-refreshUserToGroupsMappings" };
     
-    Groups groups = SecurityUtil.getUserToGroupsMappingService(config);
-    String user = UnixUserGroupInformation.getUnixUserName();
+    Groups groups = Groups.getUserToGroupsMappingService(config);
+    String user = UserGroupInformation.getLoginUser().getShortUserName();
     System.out.println("first attempt:");
     List<String> g1 = groups.getGroups(user);
     String [] str_groups = new String [g1.size()];

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java Wed Jan 27 08:32:17 2010
@@ -20,6 +20,7 @@
 import java.io.File;
 import java.io.FileWriter;
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -32,7 +33,7 @@
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.MiniMRCluster;
 import org.apache.hadoop.mapred.TestMiniMRWithDFS;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
 import junit.framework.TestCase;
@@ -105,7 +106,7 @@
       final int slaves = 4;
 
       // Turn on service-level authorization
-      Configuration conf = new Configuration();
+      final Configuration conf = new Configuration();
       conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
                     HDFSPolicyProvider.class, PolicyProvider.class);
       conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
@@ -120,12 +121,13 @@
       // Simulate an 'edit' of hadoop-policy.xml
       String confDir = System.getProperty("test.build.extraconf", 
                                           "build/test/extraconf");
-      File policyFile = new File(confDir, ConfiguredPolicy.HADOOP_POLICY_FILE);
-      String policyFileCopy = ConfiguredPolicy.HADOOP_POLICY_FILE + ".orig";
+      String HADOOP_POLICY_FILE = System.getProperty("hadoop.policy.file");
+      File policyFile = new File(confDir, HADOOP_POLICY_FILE);
+      String policyFileCopy = HADOOP_POLICY_FILE + ".orig";
       FileUtil.copy(policyFile, FileSystem.getLocal(conf),   // first save original 
                     new Path(confDir, policyFileCopy), false, conf);
       rewriteHadoopPolicyFile(                               // rewrite the file
-          new File(confDir, ConfiguredPolicy.HADOOP_POLICY_FILE));
+          new File(confDir, HADOOP_POLICY_FILE));
       
       // Refresh the service level authorization policy
       refreshPolicy(conf);
@@ -135,17 +137,23 @@
       try {
         // Note: hadoop-policy.xml for tests has 
         // security.refresh.policy.protocol.acl = ${user.name}
-        conf.set(UnixUserGroupInformation.UGI_PROPERTY_NAME, UNKNOWN_USER);
-        refreshPolicy(conf);
+        UserGroupInformation unknownUser = 
+          UserGroupInformation.createRemoteUser("unknown");
+        unknownUser.doAs(new PrivilegedExceptionAction<Void>() {
+          public Void run() throws IOException {
+            refreshPolicy(conf);
+            return null;
+          }
+        });
         fail("Refresh of NameNode's policy file cannot be successful!");
-      } catch (RemoteException re) {
+      } catch (Exception re) {
         System.out.println("Good, refresh worked... refresh failed with: " + 
-                           StringUtils.stringifyException(re.unwrapRemoteException()));
+                           StringUtils.stringifyException(re));
       } finally {
         // Reset to original hadoop-policy.xml
         FileUtil.fullyDelete(new File(confDir, 
-            ConfiguredPolicy.HADOOP_POLICY_FILE));
-        FileUtil.replaceFile(new File(confDir, policyFileCopy), new File(confDir, ConfiguredPolicy.HADOOP_POLICY_FILE));
+            HADOOP_POLICY_FILE));
+        FileUtil.replaceFile(new File(confDir, policyFileCopy), new File(confDir, HADOOP_POLICY_FILE));
       }
     } finally {
       if (dfs != null) { dfs.shutdown(); }

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/tools/TestCopyFiles.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/tools/TestCopyFiles.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/tools/TestCopyFiles.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/tools/TestCopyFiles.java Wed Jan 27 08:32:17 2010
@@ -23,6 +23,7 @@
 import java.io.IOException;
 import java.io.PrintStream;
 import java.net.URI;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Random;
@@ -46,7 +47,6 @@
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.MiniMRCluster;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.tools.DistCp;
 import org.apache.hadoop.util.ToolRunner;
@@ -874,11 +874,11 @@
 
   static final long now = System.currentTimeMillis();
 
-  static UnixUserGroupInformation createUGI(String name, boolean issuper) {
+  static UserGroupInformation createUGI(String name, boolean issuper) {
     String username = name + now;
     String group = issuper? "supergroup": username;
-    return UnixUserGroupInformation.createImmutable(
-        new String[]{username, group});
+    return UserGroupInformation.createUserForTesting(username, 
+        new String[]{group});
   }
 
   static Path createHomeDirectory(FileSystem fs, UserGroupInformation ugi
@@ -893,39 +893,55 @@
   public void testHftpAccessControl() throws Exception {
     MiniDFSCluster cluster = null;
     try {
-      final UnixUserGroupInformation DFS_UGI = createUGI("dfs", true); 
-      final UnixUserGroupInformation USER_UGI = createUGI("user", false); 
+      final UserGroupInformation DFS_UGI = createUGI("dfs", true); 
+      final UserGroupInformation USER_UGI = createUGI("user", false); 
 
       //start cluster by DFS_UGI
       final Configuration dfsConf = new Configuration();
-      UnixUserGroupInformation.saveToConf(dfsConf,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, DFS_UGI);
       cluster = new MiniDFSCluster(dfsConf, 2, true, null);
       cluster.waitActive();
 
       final String httpAdd = dfsConf.get("dfs.http.address");
       final URI nnURI = FileSystem.getDefaultUri(dfsConf);
       final String nnUri = nnURI.toString();
-      final Path home = createHomeDirectory(FileSystem.get(nnURI, dfsConf), USER_UGI);
+      FileSystem fs1 = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
+        public FileSystem run() throws IOException {
+          return FileSystem.get(nnURI, dfsConf);
+        }
+      });
+      final Path home = 
+        createHomeDirectory(fs1, USER_UGI);
       
       //now, login as USER_UGI
       final Configuration userConf = new Configuration();
-      UnixUserGroupInformation.saveToConf(userConf,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, USER_UGI);
-      final FileSystem fs = FileSystem.get(nnURI, userConf);
-
+      final FileSystem fs = 
+        USER_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
+        public FileSystem run() throws IOException {
+          return FileSystem.get(nnURI, userConf);
+        }
+      });
+      
       final Path srcrootpath = new Path(home, "src_root"); 
       final String srcrootdir =  srcrootpath.toString();
       final Path dstrootpath = new Path(home, "dst_root"); 
       final String dstrootdir =  dstrootpath.toString();
-      final DistCp distcp = new DistCp(userConf);
+      final DistCp distcp = USER_UGI.doAs(new PrivilegedExceptionAction<DistCp>() {
+        public DistCp run() {
+          return new DistCp(userConf);
+        }
+      });
 
       FileSystem.mkdirs(fs, srcrootpath, new FsPermission((short)0700));
       final String[] args = {"hftp://"+httpAdd+srcrootdir, nnUri+dstrootdir};
 
       { //copy with permission 000, should fail
         fs.setPermission(srcrootpath, new FsPermission((short)0));
-        assertEquals(-3, ToolRunner.run(distcp, args));
+        USER_UGI.doAs(new PrivilegedExceptionAction<Void>() {
+          public Void run() throws Exception {
+            assertEquals(-3, ToolRunner.run(distcp, args));
+            return null;
+          }
+        });
       }
     } finally {
       if (cluster != null) { cluster.shutdown(); }

Modified: hadoop/mapreduce/trunk/src/tools/org/apache/hadoop/tools/HadoopArchives.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/tools/org/apache/hadoop/tools/HadoopArchives.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/tools/org/apache/hadoop/tools/HadoopArchives.java (original)
+++ hadoop/mapreduce/trunk/src/tools/org/apache/hadoop/tools/HadoopArchives.java Wed Jan 27 08:32:17 2010
@@ -63,7 +63,6 @@
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.JobSubmissionFiles;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;



Mime
View raw message