hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject svn commit: r903563 [1/2] - in /hadoop/mapreduce/trunk: ./ src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/ src/contrib/raid/src/java/org/apache/hadoop/raid/ src/contrib/streaming/src/test/org/apache/hadoop/streaming/ src/java/org/apac...
Date Wed, 27 Jan 2010 08:32:22 GMT
Author: omalley
Date: Wed Jan 27 08:32:17 2010
New Revision: 903563

URL: http://svn.apache.org/viewvc?rev=903563&view=rev
Log:
MAPREDUCE-1385. Use the new UserGroupInformation from HADOOP-6299.
(ddas via omalley)

Modified:
    hadoop/mapreduce/trunk/CHANGES.txt
    hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/CapacityTestUtils.java
    hadoop/mapreduce/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidShell.java
    hadoop/mapreduce/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingAsDifferentUser.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/DeprecatedQueueConfigurationParser.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/IsolationRunner.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobClient.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobInProgress.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobQueueClient.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LinuxTaskController.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LocalJobRunner.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Queue.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/QueueConfigurationParser.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/QueueManager.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Task.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskInProgress.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskRunner.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/tools/MRAdmin.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Cluster.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Job.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmitter.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/split/SplitMetaInfoReader.java
    hadoop/mapreduce/trunk/src/test/mapred-site.xml
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/cli/testMRConf.xml
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/TestFileSystem.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/MiniMRCluster.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/QueueManagerTestUtils.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScript.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScriptWithLinuxTaskController.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestIsolationRunner.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobHistory.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobRetire.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMapProgress.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMapredSystemDir.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFS.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFSWithDistinctUsers.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestNodeRefresh.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueAclsForCurrentUser.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueManager.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithDeprecatedConf.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithJobTracker.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestRecoveryManager.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestSeveral.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/filecache/TestTrackerDistributedCacheManager.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/tools/TestCopyFiles.java
    hadoop/mapreduce/trunk/src/tools/org/apache/hadoop/tools/HadoopArchives.java

Modified: hadoop/mapreduce/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/CHANGES.txt?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/CHANGES.txt (original)
+++ hadoop/mapreduce/trunk/CHANGES.txt Wed Jan 27 08:32:17 2010
@@ -7,6 +7,9 @@
     MAPREDUCE-1287. Only call the partitioner with more than one reducer.
     (cdouglas)
 
+    MAPREDUCE-1385. Use the new UserGroupInformation from HADOOP-6299.
+    (ddas via omalley)
+
   NEW FEATURES
 
     MAPREDUCE-698. Per-pool task limits for the fair scheduler.

Modified: hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/CapacityTestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/CapacityTestUtils.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/CapacityTestUtils.java (original)
+++ hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/CapacityTestUtils.java Wed Jan 27 08:32:17 2010
@@ -43,7 +43,7 @@
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker;
 import org.apache.hadoop.mapreduce.split.JobSplit;
-import org.apache.hadoop.security.SecurityUtil.AccessControlList;
+import org.apache.hadoop.security.authorize.AccessControlList;
 
 
 public class CapacityTestUtils {

Modified: hadoop/mapreduce/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidShell.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidShell.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidShell.java (original)
+++ hadoop/mapreduce/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidShell.java Wed Jan 27 08:32:17 2010
@@ -37,7 +37,7 @@
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryProxy;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.fs.Path;
 
 import org.apache.hadoop.raid.protocol.PolicyInfo;
@@ -52,7 +52,7 @@
   public static final Log LOG = LogFactory.getLog( "org.apache.hadoop.RaidShell");
   public RaidProtocol raidnode;
   final RaidProtocol rpcRaidnode;
-  private UnixUserGroupInformation ugi;
+  private UserGroupInformation ugi;
   volatile boolean clientRunning = true;
   private Configuration conf;
 
@@ -75,11 +75,7 @@
    */
   public RaidShell(Configuration conf) throws IOException {
     super(conf);
-    try {
-      this.ugi = UnixUserGroupInformation.login(conf, true);
-    } catch (LoginException e) {
-      throw (IOException)(new IOException().initCause(e));
-    }
+    this.ugi = UserGroupInformation.getCurrentUser();
 
     this.rpcRaidnode = createRPCRaidnode(RaidNode.getAddress(conf), conf, ugi);
     this.raidnode = createRaidnode(rpcRaidnode);
@@ -91,16 +87,13 @@
 
   public static RaidProtocol createRaidnode(InetSocketAddress raidNodeAddr,
       Configuration conf) throws IOException {
-    try {
-      return createRaidnode(createRPCRaidnode(raidNodeAddr, conf,
-        UnixUserGroupInformation.login(conf, true)));
-    } catch (LoginException e) {
-      throw (IOException)(new IOException().initCause(e));
-    }
+    return createRaidnode(createRPCRaidnode(raidNodeAddr, conf,
+      UserGroupInformation.getCurrentUser()));
+
   }
 
   private static RaidProtocol createRPCRaidnode(InetSocketAddress raidNodeAddr,
-      Configuration conf, UnixUserGroupInformation ugi)
+      Configuration conf, UserGroupInformation ugi)
     throws IOException {
     LOG.info("RaidShell connecting to " + raidNodeAddr);
     return (RaidProtocol)RPC.getProxy(RaidProtocol.class,

Modified: hadoop/mapreduce/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingAsDifferentUser.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingAsDifferentUser.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingAsDifferentUser.java (original)
+++ hadoop/mapreduce/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingAsDifferentUser.java Wed Jan 27 08:32:17 2010
@@ -20,6 +20,7 @@
 
 import java.io.DataOutputStream;
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -47,25 +48,32 @@
       return;
     }
     startCluster();
-    JobConf myConf = getClusterConf();
-    FileSystem inFs = inputPath.getFileSystem(myConf);
-    FileSystem outFs = outputPath.getFileSystem(myConf);
-    outFs.delete(outputPath, true);
-    if (!inFs.mkdirs(inputPath)) {
-      throw new IOException("Mkdirs failed to create " + inFs.toString());
-    }
-    DataOutputStream file = inFs.create(new Path(inputPath, "part-0"));
-    file.writeBytes(input);
-    file.close();
-    String[] args =
-        new String[] { "-input", inputPath.makeQualified(inFs).toString(),
+    final JobConf myConf = getClusterConf();
+    taskControllerUser.doAs(new PrivilegedExceptionAction<Void>() {
+      public Void run() throws IOException{
+
+        FileSystem inFs = inputPath.getFileSystem(myConf);
+        FileSystem outFs = outputPath.getFileSystem(myConf);
+        outFs.delete(outputPath, true);
+        if (!inFs.mkdirs(inputPath)) {
+          throw new IOException("Mkdirs failed to create " + inFs.toString());
+        }
+        DataOutputStream file = inFs.create(new Path(inputPath, "part-0"));
+        file.writeBytes(input);
+        file.close();
+        final String[] args =
+          new String[] { "-input", inputPath.makeQualified(inFs).toString(),
             "-output", outputPath.makeQualified(outFs).toString(), "-mapper",
             map, "-reducer", reduce, "-jobconf",
             "mapreduce.task.files.preserve.failedtasks=true", "-jobconf",
             "stream.tmpdir=" + System.getProperty("test.build.data", "/tmp") };
-    StreamJob streamJob = new StreamJob(args, true);
-    streamJob.setConf(myConf);
-    assertTrue("Job has not succeeded", streamJob.go() == 0);
-    assertOwnerShip(outputPath);
+
+        StreamJob streamJob = new StreamJob(args, true);
+        streamJob.setConf(myConf);
+        assertTrue("Job has not succeeded", streamJob.go() == 0);
+        assertOwnerShip(outputPath);
+        return null;
+      }
+    });
   }
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/DeprecatedQueueConfigurationParser.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/DeprecatedQueueConfigurationParser.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/DeprecatedQueueConfigurationParser.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/DeprecatedQueueConfigurationParser.java Wed Jan 27 08:32:17 2010
@@ -20,7 +20,7 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.QueueState;
-import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authorize.AccessControlList;
 import static org.apache.hadoop.mapred.QueueManager.*;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -60,7 +60,7 @@
     List<Queue> list = new ArrayList<Queue>();
     for (String name : queueNameValues) {
       try {
-        Map<String, SecurityUtil.AccessControlList> acls = getQueueAcls(
+        Map<String, AccessControlList> acls = getQueueAcls(
           name, conf);
         QueueState state = getQueueState(name, conf);
         Queue q = new Queue(name, acls, state);
@@ -143,15 +143,15 @@
   /**
    * Parse ACLs for the queue from the configuration.
    */
-  private Map<String, SecurityUtil.AccessControlList> getQueueAcls(
+  private Map<String, AccessControlList> getQueueAcls(
     String name,
     Configuration conf) {
-    HashMap<String, SecurityUtil.AccessControlList> map =
-      new HashMap<String, SecurityUtil.AccessControlList>();
+    HashMap<String, AccessControlList> map =
+      new HashMap<String, AccessControlList>();
     for (Queue.QueueOperation oper : Queue.QueueOperation.values()) {
       String aclKey = toFullPropertyName(name, oper.getAclName());
       map.put(
-        aclKey, new SecurityUtil.AccessControlList(
+        aclKey, new AccessControlList(
           conf.get(
             aclKey, "*")));
     }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/IsolationRunner.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/IsolationRunner.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/IsolationRunner.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/IsolationRunner.java Wed Jan 27 08:32:17 2010
@@ -27,6 +27,7 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
@@ -143,8 +144,9 @@
    */
   boolean run(String[] args) 
       throws ClassNotFoundException, IOException, InterruptedException {
-    if (args.length != 1) {
-      System.out.println("Usage: IsolationRunner <path>/job.xml");
+    if (args.length < 1) {
+      System.out.println("Usage: IsolationRunner <path>/job.xml " +
+      		"<optional-user-name>");
       return false;
     }
     File jobFilename = new File(args[0]);
@@ -152,7 +154,14 @@
       System.out.println(jobFilename + " is not a valid job file.");
       return false;
     }
+    String user;
+    if (args.length > 1) {
+      user = args[1];
+    } else {
+      user = UserGroupInformation.getCurrentUser().getUserName();
+    }
     JobConf conf = new JobConf(new Path(jobFilename.toString()));
+    conf.setUser(user);
     TaskAttemptID taskId = TaskAttemptID.forName(conf.get(JobContext.TASK_ATTEMPT_ID));
     if (taskId == null) {
       System.out.println("mapreduce.task.attempt.id not found in configuration;" + 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobClient.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobClient.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobClient.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobClient.java Wed Jan 27 08:32:17 2010
@@ -521,7 +521,6 @@
       conf.setBooleanIfUnset("mapred.reducer.new-api", false);
       Job job = Job.getInstance(cluster, conf);
       job.submit();
-      conf.setUser(job.getUser());
       return new NetworkedJob(job);
     } catch (InterruptedException ie) {
       throw new IOException("interrupted", ie);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobInProgress.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobInProgress.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobInProgress.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobInProgress.java Wed Jan 27 08:32:17 2010
@@ -20,6 +20,7 @@
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -77,8 +78,8 @@
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
 /*************************************************************
@@ -107,6 +108,7 @@
   JobStatus status;
   Path jobFile = null;
   Path localJobFile = null;
+  String user;
 
   TaskInProgress maps[] = new TaskInProgress[0];
   TaskInProgress reduces[] = new TaskInProgress[0];
@@ -362,8 +364,10 @@
    * to the tracker.
    */
   public JobInProgress(JobTracker jobtracker, 
-                       JobConf default_conf, int rCount,
-                       JobInfo jobInfo, TokenStorage ts) throws IOException {
+                       final JobConf default_conf, int rCount,
+                       JobInfo jobInfo,
+                       TokenStorage ts
+                      ) throws IOException, InterruptedException {
     this.restartCount = rCount;
     this.jobId = JobID.downgrade(jobInfo.getJobID());
     String url = "http://" + jobtracker.getJobTrackerMachine() + ":" 
@@ -376,12 +380,12 @@
 
     // use the user supplied token to add user credentials to the conf
     jobSubmitDir = jobInfo.getJobSubmitDir();
-    String user = jobInfo.getUser().toString();
-    conf = new JobConf();
-    conf.set(UnixUserGroupInformation.UGI_PROPERTY_NAME, 
-        new UnixUserGroupInformation(user, 
-            new String[]{UnixUserGroupInformation.DEFAULT_GROUP}).toString());
-    fs = jobSubmitDir.getFileSystem(conf);
+    user = jobInfo.getUser().toString();
+    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
+      fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
+        public FileSystem run() throws IOException {
+          return jobSubmitDir.getFileSystem(default_conf);
+      }});
     
     this.localJobFile = 
       default_conf.getLocalPath(JobTracker.SUBDIR + "/" + this.jobId + ".xml");
@@ -534,6 +538,13 @@
   public boolean inited() {
     return tasksInited.get();
   }
+  
+  /**
+   * Get the user for the job
+   */
+  public String getUser() {
+    return user;
+  }
 
   /**
    * Get the number of slots required to run a single map task-attempt.
@@ -3242,7 +3253,7 @@
 
       Path tempDir = jobtracker.getSystemDirectoryForJob(getJobID());
       new CleanupQueue().addToQueue(new PathDeletionContext(
-          jobtracker.getFileSystem(tempDir), tempDir.toUri().getPath())); 
+          jobtracker.getFileSystem(), tempDir.toUri().getPath())); 
     } catch (IOException e) {
       LOG.warn("Error cleaning up "+profile.getJobID()+": "+e);
     }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobQueueClient.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobQueueClient.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobQueueClient.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobQueueClient.java Wed Jan 27 08:32:17 2010
@@ -192,7 +192,7 @@
    
   private void displayQueueAclsInfoForCurrentUser() throws IOException {
     QueueAclsInfo[] queueAclsInfoList = jc.getQueueAclsForCurrentUser();
-    UserGroupInformation ugi = UserGroupInformation.readFrom(getConf());
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
     if (queueAclsInfoList.length > 0) {
       System.out.println("Queue acls for user :  " + ugi.getUserName());
       System.out.println("\nQueue  Operations");

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java Wed Jan 27 08:32:17 2010
@@ -26,6 +26,7 @@
 import java.net.BindException;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
+import java.security.PrivilegedExceptionAction;
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
@@ -49,8 +50,6 @@
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CopyOnWriteArrayList;
 
-import javax.security.auth.login.LoginException;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -90,14 +89,10 @@
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.ScriptBasedMapping;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.PermissionChecker;
+import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.security.authorize.ConfiguredPolicy;
-import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.util.HostsFileReader;
@@ -225,17 +220,17 @@
    * @throws IOException
    */
   public static JobTracker startTracker(JobConf conf) 
-  throws IOException, InterruptedException, LoginException {
+  throws IOException, InterruptedException {
     return startTracker(conf, DEFAULT_CLOCK);
   }
 
   static JobTracker startTracker(JobConf conf, Clock clock) 
-  throws IOException, InterruptedException, LoginException {
+  throws IOException, InterruptedException {
     return startTracker(conf, clock, generateNewIdentifier());
   }
 
   static JobTracker startTracker(JobConf conf, Clock clock, String identifier) 
-  throws IOException, InterruptedException, LoginException {
+  throws IOException, InterruptedException {
     JobTracker result = null;
     while (true) {
       try {
@@ -1047,7 +1042,6 @@
     void updateRestartCount() throws IOException {
       Path restartFile = getRestartCountFile();
       Path tmpRestartFile = getTempRestartCountFile();
-      FileSystem fs = restartFile.getFileSystem(conf);
       FsPermission filePerm = new FsPermission(SYSTEM_FILE_PERMISSION);
 
       // read the count from the jobtracker info file
@@ -1132,9 +1126,8 @@
           JobInfo token = new JobInfo();
           token.readFields(in);
           in.close();
-          UnixUserGroupInformation ugi = new UnixUserGroupInformation(
-              token.getUser().toString(), 
-              new String[]{UnixUserGroupInformation.DEFAULT_GROUP});
+          UserGroupInformation ugi = 
+            UserGroupInformation.createRemoteUser(token.getUser().toString());
           submitJob(token.getJobID(), restartCount, 
               ugi, token.getJobSubmitDir().toString(), true, null);
           recovered++;
@@ -1325,25 +1318,37 @@
 
   
   JobTracker(JobConf conf) 
-  throws IOException,InterruptedException, LoginException {
+  throws IOException,InterruptedException {
     this(conf, new Clock());
   }
   /**
    * Start the JobTracker process, listen on the indicated port
    */
   JobTracker(JobConf conf, Clock clock) 
-  throws IOException, InterruptedException, LoginException {
+  throws IOException, InterruptedException {
     this(conf, clock, generateNewIdentifier());
   }
 
-  JobTracker(JobConf conf, Clock newClock, String jobtrackerIndentifier) 
-  throws IOException, InterruptedException, LoginException {
+  JobTracker(final JobConf conf, Clock newClock, String jobtrackerIndentifier) 
+  throws IOException, InterruptedException {
     // find the owner of the process
-    clock = newClock;
-    mrOwner = UnixUserGroupInformation.login(conf);
+    // get the desired principal to load
+    String keytabFilename = conf.get(JTConfig.JT_KEYTAB_FILE);
+    UserGroupInformation.setConfiguration(conf);
+    if (keytabFilename != null) {
+      String desiredUser = conf.get(JTConfig.JT_USER_NAME,
+                                    System.getProperty("user.name"));
+      UserGroupInformation.loginUserFromKeytab(desiredUser, 
+                                               keytabFilename);
+      mrOwner = UserGroupInformation.getLoginUser();
+    } else {
+      mrOwner = UserGroupInformation.getCurrentUser();
+    }
+    
     supergroup = conf.get(JT_SUPERGROUP, "supergroup");
     LOG.info("Starting jobtracker with owner as " + mrOwner.getUserName() 
              + " and supergroup as " + supergroup);
+    clock = newClock;
 
     //
     // Grab some static constants
@@ -1400,12 +1405,7 @@
     // Set service-level authorization security policy
     if (conf.getBoolean(
           ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
-      PolicyProvider policyProvider = 
-        (PolicyProvider)(ReflectionUtils.newInstance(
-            conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
-                MapReducePolicyProvider.class, PolicyProvider.class), 
-            conf));
-      SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
+      ServiceAuthorizationManager.refresh(conf, new MapReducePolicyProvider());
     }
     
     int handlerCount = conf.getInt(JT_IPC_HANDLER_COUNT, 10);
@@ -1469,7 +1469,10 @@
       try {
         // if we haven't contacted the namenode go ahead and do it
         if (fs == null) {
-          fs = FileSystem.get(conf);
+          fs = mrOwner.doAs(new PrivilegedExceptionAction<FileSystem>() {
+            public FileSystem run() throws IOException {
+              return FileSystem.get(conf);
+          }});
         }
         // clean up the system dir, which will only work if hdfs is out of 
         // safe mode
@@ -1485,7 +1488,7 @@
           if (!systemDirStatus.getPermission().equals(SYSTEM_DIR_PERMISSION)) {
             LOG.warn("Incorrect permissions on " + systemDir + 
                 ". Setting it to " + SYSTEM_DIR_PERMISSION);
-            fs.setPermission(systemDir, SYSTEM_DIR_PERMISSION);
+            fs.setPermission(systemDir,new FsPermission(SYSTEM_DIR_PERMISSION));
           }
         } catch (FileNotFoundException fnf) {} //ignore
         // Make sure that the backup data is preserved
@@ -1544,10 +1547,14 @@
 
     // Initialize history DONE folder
     jobHistory.initDone(conf, fs);
-    String historyLogDir = 
+    final String historyLogDir = 
       jobHistory.getCompletedJobHistoryLocation().toString();
     infoServer.setAttribute("historyLogDir", historyLogDir);
-    FileSystem historyFS = new Path(historyLogDir).getFileSystem(conf);
+    FileSystem historyFS = mrOwner.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      public FileSystem run() throws IOException {
+        return new Path(historyLogDir).getFileSystem(conf);
+      }
+    });
     infoServer.setAttribute("fileSys", historyFS);
 
     this.dnsToSwitchMapping = ReflectionUtils.newInstance(
@@ -1610,14 +1617,6 @@
   }
 
   /**
-   * Get the FileSystem for the given path. This can be used to resolve
-   * filesystem for job history, local job files or mapreduce.system.dir path.
-   */
-  FileSystem getFileSystem(Path path) throws IOException {
-    return path.getFileSystem(conf);
-  }
-
-  /**
    * Get JobTracker's LocalFileSystem handle. This is used by jobs for 
    * localizing job files to the local disk.
    */
@@ -2909,9 +2908,11 @@
    * of the JobTracker.  But JobInProgress adds info that's useful for
    * the JobTracker alone.
    */
-  public synchronized org.apache.hadoop.mapreduce.JobStatus submitJob(
-    org.apache.hadoop.mapreduce.JobID jobId,String jobSubmitDir, TokenStorage ts) 
-  throws IOException {  
+  public synchronized 
+  org.apache.hadoop.mapreduce.JobStatus 
+    submitJob(org.apache.hadoop.mapreduce.JobID jobId, String jobSubmitDir,
+              TokenStorage ts
+              ) throws IOException, InterruptedException {
     return submitJob(JobID.downgrade(jobId), jobSubmitDir, ts);
   }
   
@@ -2927,20 +2928,23 @@
    *  instead
    */
   @Deprecated
-  public synchronized JobStatus submitJob(
-      JobID jobId, String jobSubmitDir, TokenStorage ts) 
-  throws IOException {
-    return submitJob(jobId, 0, 
-        UserGroupInformation.getCurrentUGI(), 
-        jobSubmitDir, false, ts);
+  public synchronized JobStatus submitJob(JobID jobId, 
+                                          String jobSubmitDir,
+                                          TokenStorage ts
+                                         ) throws IOException, 
+                                                  InterruptedException {
+    return submitJob(jobId, 0, UserGroupInformation.getCurrentUser(), 
+                     jobSubmitDir, false, ts);
   }
 
   /**
    * Submits either a new job or a job from an earlier run.
    */
-  private synchronized JobStatus submitJob(org.apache.hadoop.mapreduce.JobID jobID, 
-      int restartCount, UserGroupInformation ugi, String jobSubmitDir, 
-      boolean recovered, TokenStorage ts) throws IOException {
+  private synchronized 
+  JobStatus submitJob(org.apache.hadoop.mapreduce.JobID jobID, 
+                      int restartCount, UserGroupInformation ugi, 
+                      String jobSubmitDir, boolean recovered, TokenStorage ts
+                      ) throws IOException, InterruptedException {
     JobID jobId = JobID.downgrade(jobID);
     if(jobs.containsKey(jobId)) {
       //job already running, don't start twice
@@ -3026,7 +3030,7 @@
                                 Queue.QueueOperation oper) 
                                   throws IOException {
     // get the user group info
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
     checkAccess(job, oper, ugi);
   }
 
@@ -3279,8 +3283,8 @@
    */
   @Deprecated
   public synchronized void setJobPriority(JobID jobid, 
-                                              String priority)
-                                                throws IOException {
+                                          String priority)
+                                          throws IOException {
     JobInProgress job = jobs.get(jobid);
     if (null == job) {
         LOG.info("setJobPriority(): JobId " + jobid.toString()
@@ -3617,7 +3621,8 @@
   
   /** Mark a Task to be killed */
   @Deprecated
-  public synchronized boolean killTask(TaskAttemptID taskid, boolean shouldFail) throws IOException{
+  public synchronized boolean killTask(TaskAttemptID taskid, 
+      boolean shouldFail) throws IOException {
     TaskInProgress tip = taskidToTIPMap.get(taskid);
     if(tip != null) {
       checkAccess(tip.getJob(), Queue.QueueOperation.ADMINISTER_JOBS);
@@ -3658,12 +3663,13 @@
   }
   
   /**
+   * @throws LoginException 
    * @see org.apache.hadoop.mapreduce.protocol.ClientProtocol#getStagingAreaDir()
    */
-  public String getStagingAreaDir() {
+  public String getStagingAreaDir() throws IOException {
     Path stagingRootDir = new Path(conf.get(JTConfig.JT_STAGING_AREA_ROOT, 
         defaultStagingBaseDir));
-    String user = UserGroupInformation.getCurrentUGI().getUserName();
+    String user = UserGroupInformation.getCurrentUser().getUserName();
     return fs.makeQualified(new Path(stagingRootDir, 
                                 user+"/.staging")).toString();
   }
@@ -3882,12 +3888,35 @@
   }
   
   /**
+   * Is the current user a super user?
+   * @return true, if it is a super user
+   * @throws IOException if there are problems getting the current user
+   */
+  private synchronized boolean isSuperUser() throws IOException {
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    if (mrOwner.getUserName().equals(ugi.getUserName()) ) {
+      return true;
+    }
+    String[] groups = ugi.getGroupNames();
+    for(int i=0; i < groups.length; ++i) {
+      if (groups[i].equals(supergroup)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
    * Rereads the config to get hosts and exclude list file names.
    * Rereads the files to update the hosts and exclude lists.
    */
   public synchronized void refreshNodes() throws IOException {
     // check access
-    PermissionChecker.checkSuperuserPrivilege(mrOwner, supergroup);
+    if (!isSuperUser()) {
+      String user = UserGroupInformation.getCurrentUser().getUserName();
+      throw new AccessControlException(user + 
+                                       " is not authorized to refresh nodes.");
+    }
     
     // call the actual api
     refreshHosts();
@@ -4070,10 +4099,10 @@
   
   @Override
   public org.apache.hadoop.mapreduce.QueueAclsInfo[] 
-      getQueueAclsForCurrentUser() throws IOException{
-    return queueManager.getQueueAcls(
-            UserGroupInformation.getCurrentUGI());
+      getQueueAclsForCurrentUser() throws IOException {
+    return queueManager.getQueueAcls(UserGroupInformation.getCurrentUser());
   }
+
   private synchronized JobStatus[] getJobStatus(Collection<JobInProgress> jips,
       boolean toComplete) {
     if(jips == null || jips.isEmpty()) {
@@ -4110,13 +4139,13 @@
             ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
       throw new AuthorizationException("Service Level Authorization not enabled!");
     }
-    SecurityUtil.getPolicy().refresh();
+    ServiceAuthorizationManager.refresh(conf, new MapReducePolicyProvider());
   }
 
   @Override
   public void refreshQueues() throws IOException{
     LOG.info("Refreshing queue information. requested by : " + 
-        UserGroupInformation.getCurrentUGI().getUserName());
+             UserGroupInformation.getCurrentUser().getUserName());
     this.queueManager.refreshQueues(new Configuration(this.conf),
         taskScheduler.getQueueRefresher());
   }
@@ -4171,13 +4200,13 @@
             limitMaxMemForReduceTasks).append(")"));
   }
 
-   
+    
   @Override
   public void refreshUserToGroupsMappings(Configuration conf) throws IOException {
     LOG.info("Refreshing all user-to-groups mappings. Requested by user: " + 
-             UserGroupInformation.getCurrentUGI().getUserName());
+             UserGroupInformation.getCurrentUser().getUserName());
     
-    SecurityUtil.getUserToGroupsMappingService(conf).refresh();
+    Groups.getUserToGroupsMappingService(conf).refresh();
   }
   
   private boolean perTaskMemoryConfigurationSetOnJT() {
@@ -4282,7 +4311,7 @@
     faultyTrackers.incrementFaults(hostName);
   }
 
-  JobTracker(JobConf conf, Clock clock, boolean ignoredForSimulation) 
+  JobTracker(final JobConf conf, Clock clock, boolean ignoredForSimulation) 
   throws IOException {
     this.clock = clock;
     this.conf = conf;
@@ -4303,10 +4332,16 @@
     NUM_HEARTBEATS_IN_SECOND = 
         conf.getInt("mapred.heartbeats.in.second", 100);
     
-    try {
-      mrOwner = UnixUserGroupInformation.login(conf);
-    } catch (LoginException e) {
-      throw new IOException(StringUtils.stringifyException(e));
+    // get the desired principal to load
+    String keytabFilename = conf.get(JTConfig.JT_KEYTAB_FILE);
+    if (keytabFilename != null) {
+      String desiredUser = conf.get(JTConfig.JT_USER_NAME,
+                                    System.getProperty("user.name"));
+      UserGroupInformation.loginUserFromKeytab(desiredUser, 
+                                               keytabFilename);
+      mrOwner = UserGroupInformation.getLoginUser();
+    } else {
+      mrOwner = UserGroupInformation.getCurrentUser();
     }
     supergroup = conf.get("mapred.permissions.supergroup", "supergroup");
     
@@ -4339,15 +4374,22 @@
     infoServer.setAttribute("job.tracker", this);
     
     // initialize history parameters.
-    String historyLogDir = null;
     FileSystem historyFS = null;
 
     jobHistory = new JobHistory();
     jobHistory.init(this, conf, this.localMachine, this.startTime);
     jobHistory.initDone(conf, fs);
-    historyLogDir = jobHistory.getCompletedJobHistoryLocation().toString();
+    final String historyLogDir = jobHistory.getCompletedJobHistoryLocation().toString();
     infoServer.setAttribute("historyLogDir", historyLogDir);
-    historyFS = new Path(historyLogDir).getFileSystem(conf);
+    try {
+      historyFS = mrOwner.doAs(new PrivilegedExceptionAction<FileSystem>() {
+        public FileSystem run() throws IOException {
+          return new Path(historyLogDir).getFileSystem(conf);
+        }
+      });
+    } catch (InterruptedException e1) {
+      throw (IOException) new IOException().initCause(e1);
+    }
 
     infoServer.setAttribute("fileSys", historyFS);
     infoServer.addServlet("reducegraph", "/taskgraph", TaskGraphServlet.class);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LinuxTaskController.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LinuxTaskController.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LinuxTaskController.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LinuxTaskController.java Wed Jan 27 08:32:17 2010
@@ -387,7 +387,7 @@
       throws IOException {
     String[] taskControllerCmd = new String[3 + cmdArgs.size()];
     taskControllerCmd[0] = getTaskControllerExecutablePath();
-    taskControllerCmd[1] = userName;
+    taskControllerCmd[1] = TaskTracker.getShortUserName(userName);
     taskControllerCmd[2] = String.valueOf(command.ordinal());
     int i = 3;
     for (String cmdArg : cmdArgs) {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LocalJobRunner.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LocalJobRunner.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LocalJobRunner.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LocalJobRunner.java Wed Jan 27 08:32:17 2010
@@ -199,6 +199,8 @@
             MapTask map = new MapTask(systemJobFile.toString(),  
                                       mapId, i,
                                       taskSplitMetaInfos[i].getSplitIndex(), 1);
+            map.setUser(UserGroupInformation.getCurrentUser().
+                getShortUserName());
             JobConf localConf = new JobConf(job);
             TaskRunner.setupChildMapredLocalDirs(map, localConf);
 
@@ -207,6 +209,7 @@
             mapOutputFiles.put(mapId, mapOutput);
 
             map.setJobFile(localJobFile.toString());
+            localConf.setUser(map.getUser());
             map.localizeConfiguration(localConf);
             map.setConf(localConf);
             map_tasks += 1;
@@ -225,6 +228,8 @@
           if (numReduceTasks > 0) {
             ReduceTask reduce = new ReduceTask(systemJobFile.toString(), 
                 reduceId, 0, mapIds.size(), 1);
+            reduce.setUser(UserGroupInformation.getCurrentUser().
+                getShortUserName());
             JobConf localConf = new JobConf(job);
             TaskRunner.setupChildMapredLocalDirs(reduce, localConf);
             // move map output to reduce input  
@@ -249,6 +254,7 @@
             }
             if (!this.isInterrupted()) {
               reduce.setJobFile(localJobFile.toString());
+              localConf.setUser(reduce.getUser());
               reduce.localizeConfiguration(localConf);
               reduce.setConf(localConf);
               reduce_tasks += 1;
@@ -525,10 +531,10 @@
   /**
    * @see org.apache.hadoop.mapreduce.protocol.ClientProtocol#getStagingAreaDir()
    */
-  public String getStagingAreaDir() {
+  public String getStagingAreaDir() throws IOException {
     Path stagingRootDir = new Path(conf.get(JTConfig.JT_STAGING_AREA_ROOT, 
         "/tmp/hadoop/mapred/staging"));
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
     String user;
     if (ugi != null) {
       user = ugi.getUserName() + rand.nextInt();

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Queue.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Queue.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Queue.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Queue.java Wed Jan 27 08:32:17 2010
@@ -20,7 +20,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.mapreduce.QueueState;
-import org.apache.hadoop.security.SecurityUtil.AccessControlList;
+import org.apache.hadoop.security.authorize.AccessControlList;
 
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -42,7 +42,8 @@
   private String name = null;
 
   //acls list
-  private Map<String, AccessControlList> acls;
+  private Map<String, 
+              org.apache.hadoop.security.authorize.AccessControlList> acls;
 
   //Queue State
   private QueueState state = QueueState.RUNNING;
@@ -132,7 +133,7 @@
    * @return Map containing the operations that can be performed and
    *          who can perform the operations.
    */
-  Map<String, AccessControlList> getAcls() {
+  Map<String, org.apache.hadoop.security.authorize.AccessControlList> getAcls() {
     return acls;
   }
   

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/QueueConfigurationParser.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/QueueConfigurationParser.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/QueueConfigurationParser.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/QueueConfigurationParser.java Wed Jan 27 08:32:17 2010
@@ -22,7 +22,7 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.mapred.Queue.QueueOperation;
 import org.apache.hadoop.mapreduce.QueueState;
-import org.apache.hadoop.security.SecurityUtil.AccessControlList;
+import org.apache.hadoop.security.authorize.AccessControlList;
 import static org.apache.hadoop.mapred.QueueManager.toFullPropertyName;
 import org.xml.sax.SAXException;
 import org.w3c.dom.Document;

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/QueueManager.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/QueueManager.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/QueueManager.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/QueueManager.java Wed Jan 27 08:32:17 2010
@@ -25,8 +25,8 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.mapred.TaskScheduler.QueueRefresher;
 import org.apache.hadoop.mapreduce.QueueState;
-import org.apache.hadoop.security.SecurityUtil.AccessControlList;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.util.StringUtils;
 import org.codehaus.jackson.JsonFactory;
 import org.codehaus.jackson.JsonGenerationException;
@@ -295,20 +295,11 @@
     }
 
     // Check the ACL list
-    boolean allowed = acl.allAllowed();
+    boolean allowed = acl.isAllAllowed();
     if (!allowed) {
       // Check the allowed users list
-      if (acl.getUsers().contains(ugi.getUserName())) {
+      if (acl.isUserAllowed(ugi)) {
         allowed = true;
-      } else {
-        // Check the allowed groups list
-        Set<String> allowedGroups = acl.getGroups();
-        for (String group : ugi.getGroupNames()) {
-          if (allowedGroups.contains(group)) {
-            allowed = true;
-            break;
-          }
-        }
       }
     }
 
@@ -722,31 +713,7 @@
     }
   }
 
-  private static StringBuilder getAclsInfo(AccessControlList accessControlList) {
-    StringBuilder sb = new StringBuilder();
-    if (accessControlList.getUsers() != null &&
-        accessControlList.getUsers().size() > 0) {
-      Set<String> users = accessControlList.getUsers();
-      Iterator<String> iterator = users.iterator();
-      while (iterator.hasNext()) {
-        sb.append(iterator.next());
-        if (iterator.hasNext()) {
-          sb.append(",");
-        }
-      }
-    }
-    if (accessControlList.getGroups() != null &&
-        accessControlList.getGroups().size() > 0) {
-      sb.append(" ");
-      Set<String> groups = accessControlList.getGroups();
-      Iterator<String> iterator = groups.iterator();
-      while (iterator.hasNext()) {
-        sb.append(iterator.next());
-        if (iterator.hasNext()) {
-          sb.append(",");
-        }
-      }
-    }
-    return sb;
+  private static StringBuilder getAclsInfo(AccessControlList accessControlList){
+    return new StringBuilder(accessControlList.toString());
   }
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Task.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Task.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Task.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Task.java Wed Jan 27 08:32:17 2010
@@ -370,6 +370,10 @@
   String getUser() {
     return user;
   }
+  
+  void setUser(String user) {
+    this.user = user;
+  }
 
   ////////////////////////////////////////////
   // Writable methods
@@ -967,7 +971,6 @@
         NetUtils.addStaticResolution(name, resolvedName);
       }
     }
-    this.user = this.conf.getUser();
   }
 
   public Configuration getConf() {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskInProgress.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskInProgress.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskInProgress.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskInProgress.java Wed Jan 27 08:32:17 2010
@@ -135,6 +135,8 @@
   private HashMap<TaskAttemptID, Long> dispatchTimeMap = 
     new HashMap<TaskAttemptID, Long>();
   
+  private String user;
+  
 
   /**
    * Constructor for MapTask
@@ -157,6 +159,7 @@
     if (jobtracker != null) {
       this.jobHistory = jobtracker.getJobHistory();
     }
+    this.user = job.getUser();
   }
         
   /**
@@ -179,6 +182,7 @@
     if (jobtracker != null) {
       this.jobHistory = jobtracker.getJobHistory();
     }
+    this.user = job.getUser();
   }
   
   /**
@@ -382,7 +386,14 @@
   private void resetSuccessfulTaskid() {
     this.successfulTaskId = null; 
   }
+
+  String getUser() {
+    return user;
+  }
   
+  void setUser(String user) {
+    this.user = user;
+  }
   /**
    * Is this tip complete?
    * 
@@ -1063,6 +1074,7 @@
       cleanupTasks.put(taskid, taskTracker);
     }
     t.setConf(conf);
+    t.setUser(getUser());
     LOG.debug("Launching task with skipRanges:"+failedRanges.getSkipRanges());
     t.setSkipRanges(failedRanges.getSkipRanges());
     t.setSkipping(skipping);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskRunner.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskRunner.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskRunner.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskRunner.java Wed Jan 27 08:32:17 2010
@@ -24,6 +24,7 @@
 import java.io.PrintStream;
 import java.net.InetSocketAddress;
 import java.net.URI;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -43,6 +44,8 @@
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.TaskController.InitializationContext;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Level;
@@ -165,16 +168,24 @@
       //before preparing the job localize 
       //all the archives
       TaskAttemptID taskid = t.getTaskID();
-      LocalDirAllocator lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
-      File workDir = formWorkDir(lDirAlloc, taskid, t.isTaskCleanupTask(), conf);
+      final LocalDirAllocator lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
+      final File workDir = formWorkDir(lDirAlloc, taskid, t.isTaskCleanupTask(), conf);
 
       // We don't create any symlinks yet, so presence/absence of workDir
       // actually on the file system doesn't matter.
-      taskDistributedCacheManager = tracker.getTrackerDistributedCacheManager()
-          .newTaskDistributedCacheManager(conf);
-      taskDistributedCacheManager.setup(lDirAlloc, workDir, TaskTracker
-          .getPrivateDistributedCacheDir(conf.getUser()), 
+      UserGroupInformation ugi = 
+        UserGroupInformation.createRemoteUser(conf.getUser());
+      ugi.doAs(new PrivilegedExceptionAction<Void>() {
+        public Void run() throws IOException {
+          taskDistributedCacheManager = 
+            tracker.getTrackerDistributedCacheManager()
+            .newTaskDistributedCacheManager(conf);
+          taskDistributedCacheManager.setup(lDirAlloc, workDir, TaskTracker
+              .getPrivateDistributedCacheDir(conf.getUser()), 
           TaskTracker.getPublicDistributedCacheDir());
+          return null;
+        }
+      });
 
       // Set up the child task's configuration. After this call, no localization
       // of files should happen in the TaskTracker's process space. Any changes to

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java Wed Jan 27 08:32:17 2010
@@ -25,6 +25,8 @@
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.security.PrivilegedActionException;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -80,6 +82,7 @@
 import org.apache.hadoop.mapreduce.security.TokenStorage;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
 import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
 import org.apache.hadoop.mapreduce.server.tasktracker.Localizer;
 import org.apache.hadoop.mapreduce.task.reduce.ShuffleHeader;
@@ -90,9 +93,7 @@
 import org.apache.hadoop.metrics.Updater;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UnixUserGroupInformation;
-import org.apache.hadoop.security.authorize.ConfiguredPolicy;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.util.DiskChecker;
@@ -448,8 +449,19 @@
     localizer = l;
   }
 
+  /**
+   * This method must be called in all places where the short user name is
+   * desired (e.g. TaskTracker.getUserDir and in the LinuxTaskController).
+   * The short name is required in the path creation 
+   * (like TaskTracker.getUserDir) and while launching task processes as the
+   * user.
+   */
+  static String getShortUserName(String name) {
+    return UserGroupInformation.createRemoteUser(name).getShortUserName();
+  }
+  
   public static String getUserDir(String user) {
-    return TaskTracker.SUBDIR + Path.SEPARATOR + user;
+    return TaskTracker.SUBDIR + Path.SEPARATOR + getShortUserName(user);
   }
 
   public static String getPrivateDistributedCacheDir(String user) {
@@ -553,7 +565,20 @@
    * so we can call it again and "recycle" the object after calling
    * close().
    */
-  synchronized void initialize() throws IOException {
+  synchronized void initialize() throws IOException, InterruptedException {
+    String keytabFilename = fConf.get(TTConfig.TT_KEYTAB_FILE);
+    UserGroupInformation ttUgi;
+    UserGroupInformation.setConfiguration(fConf);
+    if (keytabFilename != null) {
+      String desiredUser = fConf.get(TTConfig.TT_USER_NAME,
+                                    System.getProperty("user.name"));
+      UserGroupInformation.loginUserFromKeytab(desiredUser, 
+                                               keytabFilename);
+      ttUgi = UserGroupInformation.getLoginUser();
+      
+    } else {
+      ttUgi = UserGroupInformation.getCurrentUser();
+    }
     localFs = FileSystem.getLocal(fConf);
     // use configured nameserver & interface to get local hostname
     if (fConf.get(TT_HOST_NAME) != null) {
@@ -615,7 +640,7 @@
             this.fConf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
                 MapReducePolicyProvider.class, PolicyProvider.class), 
             this.fConf));
-      SecurityUtil.setPolicy(new ConfiguredPolicy(this.fConf, policyProvider));
+      ServiceAuthorizationManager.refresh(fConf, policyProvider);
     }
     
     // RPC initialization
@@ -651,9 +676,13 @@
         asyncDiskService);
 
     this.jobClient = (InterTrackerProtocol) 
-      RPC.waitForProxy(InterTrackerProtocol.class,
-                       InterTrackerProtocol.versionID, 
-                       jobTrackAddr, this.fConf);
+    ttUgi.doAs(new PrivilegedExceptionAction<Object>() {
+      public Object run() throws IOException {
+        return RPC.waitForProxy(InterTrackerProtocol.class,
+            InterTrackerProtocol.versionID, 
+            jobTrackAddr, fConf);  
+      }
+    }); 
     this.justInited = true;
     this.running = true;    
     // start the thread that will fetch map task completion events
@@ -892,7 +921,8 @@
                               new LocalDirAllocator(MRConfig.LOCAL_DIR);
 
   // intialize the job directory
-  private void localizeJob(TaskInProgress tip) throws IOException {
+  private void localizeJob(TaskInProgress tip
+                           ) throws IOException, InterruptedException {
     Task t = tip.getTask();
     JobID jobId = t.getJobID();
     RunningJob rjob = addTaskToJob(jobId, tip);
@@ -911,7 +941,7 @@
         // directly under the job directory is created.
         JobInitializationContext context = new JobInitializationContext();
         context.jobid = jobId;
-        context.user = localJobConf.getUser();
+        context.user = t.getUser();
         context.workDir = new File(localJobConf.get(JOB_LOCAL_DIR));
         taskController.initializeJob(context);
 
@@ -927,11 +957,14 @@
     launchTaskForJob(tip, new JobConf(rjob.jobConf)); 
   }
 
-  private void setUgi(String user, Configuration conf) {
-    //The dummy-group used here will not be required once we have UGI
-    //object creation with just the user name.
-    conf.set(UnixUserGroupInformation.UGI_PROPERTY_NAME, 
-        user+","+UnixUserGroupInformation.DEFAULT_GROUP);
+  private FileSystem getFS(final Path filePath, String user, 
+      final Configuration conf) throws IOException, InterruptedException {
+    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
+    FileSystem userFs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
+        public FileSystem run() throws IOException {
+          return filePath.getFileSystem(conf);
+      }});
+    return userFs;
   }
   
   /**
@@ -951,7 +984,7 @@
    * @throws IOException
    */
   JobConf localizeJobFiles(Task t)
-      throws IOException {
+      throws IOException, InterruptedException {
     JobID jobId = t.getJobID();
     String userName = t.getUser();
 
@@ -964,6 +997,9 @@
         localizeJobConfFile(new Path(t.getJobFile()), userName, jobId);
 
     JobConf localJobConf = new JobConf(localJobFile);
+    //WE WILL TRUST THE USERNAME THAT WE GOT FROM THE JOBTRACKER
+    //AS PART OF THE TASK OBJECT
+    localJobConf.setUser(userName);
 
     // create the 'job-work' directory: job-specific shared directory for use as
     // scratch space by all tasks of the same job running on this TaskTracker. 
@@ -976,8 +1012,6 @@
     }
     System.setProperty(JOB_LOCAL_DIR, workDir.toUri().getPath());
     localJobConf.set(JOB_LOCAL_DIR, workDir.toUri().getPath());
-    setUgi(userName, localJobConf);
-
     // Download the job.jar for this job from the system FS
     localizeJobJarFile(userName, jobId, localFs, localJobConf);
     // save local copy of JobToken file
@@ -994,17 +1028,14 @@
    * @throws IOException
    */
   private Path localizeJobConfFile(Path jobFile, String user, JobID jobId)
-      throws IOException {
-    JobConf conf = new JobConf(getJobConf());
-    setUgi(user, conf);
-    
-    FileSystem userFs = jobFile.getFileSystem(conf);
+      throws IOException, InterruptedException {
+    final JobConf conf = new JobConf(getJobConf());
+    FileSystem userFs = getFS(jobFile, user, conf);
     // Get sizes of JobFile
     // sizes are -1 if they are not present.
     FileStatus status = null;
     long jobFileSize = -1;
     try {
-      
       status = userFs.getFileStatus(jobFile);
       jobFileSize = status.getLen();
     } catch(FileNotFoundException fe) {
@@ -1031,14 +1062,14 @@
    */
   private void localizeJobJarFile(String user, JobID jobId, FileSystem localFs,
       JobConf localJobConf)
-      throws IOException {
+      throws IOException, InterruptedException {
     // copy Jar file to the local FS and unjar it.
     String jarFile = localJobConf.getJar();
     FileStatus status = null;
     long jarFileSize = -1;
     if (jarFile != null) {
       Path jarFilePath = new Path(jarFile);
-      FileSystem fs = jarFilePath.getFileSystem(localJobConf);
+      FileSystem fs = getFS(jarFilePath, user, localJobConf);
       try {
         status = fs.getFileStatus(jarFilePath);
         jarFileSize = status.getLen();
@@ -1166,7 +1197,7 @@
   /**
    * Start with the local machine name, and the default JobTracker
    */
-  public TaskTracker(JobConf conf) throws IOException {
+  public TaskTracker(JobConf conf) throws IOException, InterruptedException {
     fConf = conf;
     maxMapSlots = conf.getInt(TT_MAP_SLOTS, 2);
     maxReduceSlots = conf.getInt(TT_REDUCE_SLOTS, 2);
@@ -2177,6 +2208,11 @@
                 StringUtils.stringifyException(iex));
       return;
     }
+    catch (InterruptedException i) {
+      LOG.error("Got interrupted while reinitializing TaskTracker: " + 
+          i.getMessage());
+      return;
+    }
   }
     
   ///////////////////////////////////////////////////////

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/tools/MRAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/tools/MRAdmin.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/tools/MRAdmin.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/tools/MRAdmin.java Wed Jan 27 08:32:17 2010
@@ -19,8 +19,6 @@
 
 import java.io.IOException;
 
-import javax.security.auth.login.LoginException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.ipc.RPC;
@@ -29,7 +27,7 @@
 import org.apache.hadoop.mapred.AdminOperationsProtocol;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
@@ -121,16 +119,9 @@
     }
   }
   
-  private static UnixUserGroupInformation getUGI(Configuration conf) 
-  throws IOException {
-    UnixUserGroupInformation ugi = null;
-    try {
-      ugi = UnixUserGroupInformation.login(conf, true);
-    } catch (LoginException e) {
-      throw (IOException)(new IOException(
-          "Failed to get the current user's information.").initCause(e));
-    }
-    return ugi;
+  private static UserGroupInformation getUGI(Configuration conf
+                                             ) throws IOException {
+    return UserGroupInformation.getCurrentUser();
   }
 
   private int refreshAuthorizationPolicy() throws IOException {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Cluster.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Cluster.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Cluster.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Cluster.java Wed Jan 27 08:32:17 2010
@@ -20,6 +20,7 @@
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -35,14 +36,14 @@
 import org.apache.hadoop.mapreduce.server.jobtracker.State;
 import org.apache.hadoop.mapreduce.util.ConfigUtil;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 /**
  * Provides a way to access information about the map/reduce cluster.
  */
 public class Cluster {
   private ClientProtocol client;
-  private UnixUserGroupInformation ugi;
+  private UserGroupInformation ugi;
   private Configuration conf;
   private FileSystem fs = null;
   private Path sysDir = null;
@@ -55,14 +56,14 @@
   
   public Cluster(Configuration conf) throws IOException {
     this.conf = conf;
-    this.ugi = Job.getUGI(conf);
+    this.ugi = UserGroupInformation.getCurrentUser();
     client = createClient(conf);
   }
 
   public Cluster(InetSocketAddress jobTrackAddr, Configuration conf) 
       throws IOException {
     this.conf = conf;
-    this.ugi = Job.getUGI(conf);
+    this.ugi = UserGroupInformation.getCurrentUser();
     client = createRPCProxy(jobTrackAddr, conf);
   }
 
@@ -120,7 +121,7 @@
   public synchronized FileSystem getFileSystem() 
       throws IOException, InterruptedException {
     if (this.fs == null) {
-      Path sysDir = new Path(client.getSystemDir());
+      final Path sysDir = new Path(client.getSystemDir());
       this.fs = sysDir.getFileSystem(getConf());
     }
     return fs;

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Job.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Job.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Job.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Job.java Wed Jan 27 08:32:17 2010
@@ -43,7 +43,7 @@
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 import org.apache.hadoop.mapreduce.task.JobContextImpl;
 import org.apache.hadoop.mapreduce.util.ConfigUtil;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -1170,22 +1170,6 @@
     return (baseUrl + "/tasklog?plaintext=true&taskid=" + taskId); 
   }
 
-  /**
-   * Set the UGI, user name and the group name for the job.
-   * 
-   * This method is called by job submission code while submitting the job.
-   * Internal to MapReduce project. 
-   * @throws IOException
-   */
-  public void setUGIAndUserGroupNames()
-      throws IOException {
-    UnixUserGroupInformation ugi = Job.getUGI(conf);
-    setUser(ugi.getUserName());
-    if (ugi.getGroupNames().length > 0) {
-      conf.set("group.name", ugi.getGroupNames()[0]);
-    }
-  }
-
   /** The interval at which monitorAndPrintJob() prints status */
   public static int getProgressPollInterval(Configuration conf) {
     // Read progress monitor poll interval from config. Default is 1 second.
@@ -1234,15 +1218,4 @@
     conf.set(Job.OUTPUT_FILTER, newValue.toString());
   }
 
-  public static UnixUserGroupInformation getUGI(Configuration job) 
-      throws IOException {
-    UnixUserGroupInformation ugi = null;
-    try {
-      ugi = UnixUserGroupInformation.login(job, true);
-    } catch (LoginException e) {
-      throw (IOException)(new IOException(
-        "Failed to get the current user's information.").initCause(e));
-    }
-    return ugi;
-  }
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java Wed Jan 27 08:32:17 2010
@@ -20,13 +20,10 @@
 
 import java.io.IOException;
 
-import javax.security.auth.login.LoginException;
-
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -94,25 +91,19 @@
    * @param conf
    */
   public static Path getStagingDir(Cluster cluster, Configuration conf) 
-  throws IOException, InterruptedException {
+  throws IOException,InterruptedException {
     Path stagingArea = cluster.getStagingAreaDir();
     FileSystem fs = stagingArea.getFileSystem(conf);
     String realUser;
     String currentUser;
-    try {
-      UserGroupInformation ugi = UnixUserGroupInformation.login();
-      realUser = ugi.getUserName();
-      ugi = UnixUserGroupInformation.login(conf);
-      currentUser = ugi.getUserName();
-    } catch (LoginException le) {
-      throw new IOException(le);
-    }
+    UserGroupInformation ugi = UserGroupInformation.getLoginUser();
+    realUser = ugi.getUserName();
+    currentUser = UserGroupInformation.getCurrentUser().getUserName();
     if (fs.exists(stagingArea)) {
       FileStatus fsStatus = fs.getFileStatus(stagingArea);
       String owner = fsStatus.getOwner();
       if (!(owner.equals(currentUser) || owner.equals(realUser)) || 
-          !fsStatus.getPermission().
-                               equals(JOB_DIR_PERMISSION)) {
+          !fsStatus.getPermission().equals(JOB_DIR_PERMISSION)) {
          throw new IOException("The ownership/permissions on the staging " +
                       "directory " + stagingArea + " is not as expected. " + 
                       "It is owned by " + owner + " and permissions are "+ 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmitter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmitter.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmitter.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmitter.java Wed Jan 27 08:32:17 2010
@@ -304,11 +304,6 @@
   @SuppressWarnings("unchecked")
   JobStatus submitJobInternal(Job job, Cluster cluster) 
   throws ClassNotFoundException, InterruptedException, IOException {
-    /*
-     * set this user's id in job configuration, so later job files can be
-     * accessed using this user's id
-     */
-    job.setUGIAndUserGroupNames();
 
     Path jobStagingArea = JobSubmissionFiles.getStagingDir(cluster, 
                                                      job.getConfiguration());
@@ -412,7 +407,8 @@
     // sort the splits into order based on size, so that the biggest
     // go first
     Arrays.sort(array, new SplitComparator());
-    JobSplitWriter.createSplitFiles(jobSubmitDir, conf, array);
+    JobSplitWriter.createSplitFiles(jobSubmitDir, conf, 
+        jobSubmitDir.getFileSystem(conf), array);
     return array.length;
   }
   
@@ -454,7 +450,8 @@
         }
       }
     });
-    JobSplitWriter.createSplitFiles(jobSubmitDir, job, splits);
+    JobSplitWriter.createSplitFiles(jobSubmitDir, job, 
+        jobSubmitDir.getFileSystem(job), splits);
     return splits.length;
   }
   

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java Wed Jan 27 08:32:17 2010
@@ -46,6 +46,7 @@
 import org.apache.hadoop.mapred.JobTracker;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -229,7 +230,8 @@
    */
   public void setupEventWriter(JobID jobId, JobConf jobConf)
   throws IOException {
-    Path logFile = getJobHistoryFile(logDir, jobId, getUserName(jobConf));
+    Path logFile = getJobHistoryFile(logDir, jobId, 
+        UserGroupInformation.createRemoteUser(getUserName(jobConf)).getShortUserName());
   
     if (logDir == null) {
       LOG.info("Log Directory is null, returning");

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java Wed Jan 27 08:32:17 2010
@@ -20,6 +20,8 @@
 
 import java.io.IOException;
 
+import javax.security.auth.login.LoginException;
+
 import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.mapreduce.ClusterMetrics;
 import org.apache.hadoop.mapreduce.Counters;

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java Wed Jan 27 08:32:17 2010
@@ -90,6 +90,9 @@
     "mapreduce.jobtracker.maxmapmemory.mb";
   public static final String JT_MAX_REDUCEMEMORY_MB = 
     "mapreduce.jobtracker.maxreducememory.mb";
-  public static final String MAX_JOB_SPLIT_METAINFO_SIZE = 
-  "mapreduce.job.split.metainfo.maxsize";
+  public static final String JT_MAX_JOB_SPLIT_METAINFO_SIZE = 
+  "mapreduce.jobtracker.split.metainfo.maxsize";
+  public static final String JT_USER_NAME = "mapreduce.jobtracker.user.name";
+  public static final String JT_KEYTAB_FILE = 
+    "mapreduce.jobtracker.keytab.file";
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java Wed Jan 27 08:32:17 2010
@@ -83,4 +83,7 @@
     "mapreduce.tasktracker.cache.local.size";
   public static final String TT_OUTOFBAND_HEARBEAT =
     "mapreduce.tasktracker.outofband.heartbeat";
+  public static final String TT_USER_NAME = "mapreduce.tasktracker.user.name";
+  public static final String TT_KEYTAB_FILE = 
+    "mapreduce.tasktracker.keytab.file";
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java Wed Jan 27 08:32:17 2010
@@ -56,16 +56,15 @@
   
   @SuppressWarnings("unchecked")
   public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir, 
-      Configuration conf, List<InputSplit> splits) 
+      Configuration conf, FileSystem fs, List<InputSplit> splits) 
   throws IOException, InterruptedException {
     T[] array = (T[]) splits.toArray(new InputSplit[splits.size()]);
-    createSplitFiles(jobSubmitDir, conf, array);
+    createSplitFiles(jobSubmitDir, conf, fs, array);
   }
   
   public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir, 
-      Configuration conf,T[] splits) 
+      Configuration conf, FileSystem fs, T[] splits) 
   throws IOException, InterruptedException {
-    FileSystem fs = jobSubmitDir.getFileSystem(conf);
     FSDataOutputStream out = createFile(fs, 
         JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
     SplitMetaInfo[] info = writeNewSplits(conf, splits, out);
@@ -76,9 +75,9 @@
   }
   
   public static void createSplitFiles(Path jobSubmitDir, 
-      Configuration conf, org.apache.hadoop.mapred.InputSplit[] splits) 
+      Configuration conf, FileSystem fs, 
+      org.apache.hadoop.mapred.InputSplit[] splits) 
   throws IOException {
-    FileSystem fs = jobSubmitDir.getFileSystem(conf);
     FSDataOutputStream out = createFile(fs, 
         JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
     SplitMetaInfo[] info = writeOldSplits(splits, out);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/split/SplitMetaInfoReader.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/split/SplitMetaInfoReader.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/split/SplitMetaInfoReader.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/split/SplitMetaInfoReader.java Wed Jan 27 08:32:17 2010
@@ -43,7 +43,7 @@
   public static JobSplit.TaskSplitMetaInfo[] readSplitMetaInfo(
       JobID jobId, FileSystem fs, Configuration conf, Path jobSubmitDir) 
   throws IOException {
-    long maxMetaInfoSize = conf.getLong(JTConfig.MAX_JOB_SPLIT_METAINFO_SIZE, 
+    long maxMetaInfoSize = conf.getLong(JTConfig.JT_MAX_JOB_SPLIT_METAINFO_SIZE, 
         10000000L);
     Path metaSplitFile = JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir);
     FileStatus fStatus = fs.getFileStatus(metaSplitFile);

Modified: hadoop/mapreduce/trunk/src/test/mapred-site.xml
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred-site.xml?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred-site.xml (original)
+++ hadoop/mapreduce/trunk/src/test/mapred-site.xml Wed Jan 27 08:32:17 2010
@@ -27,6 +27,10 @@
   <value>10</value>
 </property>
 <property>
+  <name>hadoop.security.authentication</name>
+  <value>simple</value>
+</property>
+<property>
   <name>mapreduce.jobtracker.hosts.exclude.filename</name>
   <value>hosts.exclude</value>
   <description></description>

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/cli/testMRConf.xml
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/cli/testMRConf.xml?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/cli/testMRConf.xml (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/cli/testMRConf.xml Wed Jan 27 08:32:17 2010
@@ -44,24 +44,6 @@
       </comparators>
     </test>
     
-    <test> <!--Tested -->
-      <description>refreshServiceAcl: verifying error message while refreshing security authorization policy for jobtracker</description>
-      <test-commands>
-        <!-- hadoop-policy.xml for tests has 
-             security.refresh.policy.protocol.acl = ${user.name} -->
-        <mr-admin-command>-jt JOBTRACKER -Dhadoop.job.ugi=blah,blah -refreshServiceAcl </mr-admin-command>
-      </test-commands>
-      <cleanup-commands>
-        <!-- No cleanup -->
-      </cleanup-commands>
-      <comparators>
-        <comparator>
-          <type>SubstringComparator</type>
-          <expected-output>access denied</expected-output>
-        </comparator>
-      </comparators>
-    </test>
-
     <!-- Test for Archive -->
     <test> <!-- TESTED -->
       <description>Archive: Archive does not occur in the destination</description>

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/TestFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/TestFileSystem.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/TestFileSystem.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/TestFileSystem.java Wed Jan 27 08:32:17 2010
@@ -21,6 +21,7 @@
 import java.io.DataInputStream;
 import java.io.IOException;
 import java.io.OutputStream;
+import java.security.PrivilegedExceptionAction;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Random;
@@ -47,7 +48,7 @@
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.mapred.*;
 import org.apache.hadoop.mapred.lib.LongSumReducer;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 
 public class TestFileSystem extends TestCase {
   private static final Log LOG = FileSystem.LOG;
@@ -480,24 +481,19 @@
     }
   }
 
-  static Configuration createConf4Testing(String username) throws Exception {
-    Configuration conf = new Configuration();
-    UnixUserGroupInformation.saveToConf(conf,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME,
-        new UnixUserGroupInformation(username, new String[]{"group"}));
-    return conf;    
-  }
-
   public void testFsCache() throws Exception {
     {
       long now = System.currentTimeMillis();
-      Configuration[] conf = {new Configuration(),
-          createConf4Testing("foo" + now), createConf4Testing("bar" + now)};
-      FileSystem[] fs = new FileSystem[conf.length];
+      String[] users = new String[]{"foo","bar"};
+      final Configuration conf = new Configuration();
+      FileSystem[] fs = new FileSystem[users.length];
   
-      for(int i = 0; i < conf.length; i++) {
-        fs[i] = FileSystem.get(conf[i]);
-        assertEquals(fs[i], FileSystem.get(conf[i]));
+      for(int i = 0; i < users.length; i++) {
+        UserGroupInformation ugi = UserGroupInformation.createRemoteUser(users[i]);
+        fs[i] = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
+          public FileSystem run() throws IOException {
+            return FileSystem.get(conf);
+        }});
         for(int j = 0; j < i; j++) {
           assertFalse(fs[j] == fs[i]);
         }
@@ -560,21 +556,18 @@
     {
       Configuration conf = new Configuration();
       new Path("file:///").getFileSystem(conf);
-      UnixUserGroupInformation.login(conf, true);
       FileSystem.closeAll();
     }
 
     {
       Configuration conf = new Configuration();
       new Path("hftp://localhost:12345/").getFileSystem(conf);
-      UnixUserGroupInformation.login(conf, true);
       FileSystem.closeAll();
     }
 
     {
       Configuration conf = new Configuration();
       FileSystem fs = new Path("hftp://localhost:12345/").getFileSystem(conf);
-      UnixUserGroupInformation.login(fs.getConf(), true);
       FileSystem.closeAll();
     }
   }

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java?rev=903563&r1=903562&r2=903563&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java Wed Jan 27 08:32:17 2010
@@ -22,6 +22,7 @@
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.PrintWriter;
+import java.security.PrivilegedExceptionAction;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -33,7 +34,6 @@
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
@@ -112,14 +112,14 @@
 
   private File configurationFile = null;
 
-  private UserGroupInformation taskControllerUser;
+  protected UserGroupInformation taskControllerUser;
 
   /*
    * Utility method which subclasses use to start and configure the MR Cluster
    * so they can directly submit a job.
    */
   protected void startCluster()
-      throws IOException {
+      throws IOException, InterruptedException {
     JobConf conf = new JobConf();
     dfsCluster = new MiniDFSCluster(conf, NUMBER_OF_NODES, true, null);
     conf.set(TTConfig.TT_TASK_CONTROLLER,
@@ -142,8 +142,8 @@
     String ugi = System.getProperty(TASKCONTROLLER_UGI);
     clusterConf = mrCluster.createJobConf();
     String[] splits = ugi.split(",");
-    taskControllerUser = new UnixUserGroupInformation(splits);
-    clusterConf.set(UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
+    taskControllerUser = UserGroupInformation.createUserForTesting(splits[0], 
+        new String[]{splits[1]});
     createHomeAndStagingDirectory(clusterConf);
   }
 
@@ -154,16 +154,15 @@
     homeDirectory = new Path(path);
     LOG.info("Creating Home directory : " + homeDirectory);
     fs.mkdirs(homeDirectory);
-    changePermission(conf, homeDirectory);
+    changePermission(fs);
     Path stagingArea = new Path(conf.get(JTConfig.JT_STAGING_AREA_ROOT));
     LOG.info("Creating Staging root directory : " + stagingArea);
     fs.mkdirs(stagingArea);
     fs.setPermission(stagingArea, new FsPermission((short)0777));
   }
 
-  private void changePermission(JobConf conf, Path p)
+  private void changePermission(FileSystem fs)
       throws IOException {
-    FileSystem fs = dfsCluster.getFileSystem();
     fs.setOwner(homeDirectory, taskControllerUser.getUserName(),
         taskControllerUser.getGroupNames()[0]);
   }



Mime
View raw message