hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From acmur...@apache.org
Subject svn commit: r725603 [2/2] - in /hadoop/core/trunk: ./ bin/ conf/ src/core/org/apache/hadoop/fs/permission/ src/core/org/apache/hadoop/ipc/ src/core/org/apache/hadoop/security/ src/core/org/apache/hadoop/security/authorize/ src/hdfs/org/apache/hadoop/hd...
Date Thu, 11 Dec 2008 07:21:14 GMT
Added: hadoop/core/trunk/src/core/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java?rev=725603&view=auto
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java (added)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java Wed Dec 10 23:21:13 2008
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.authorize;
+
+import java.security.AccessControlException;
+import java.security.AccessController;
+import java.security.Permission;
+import java.security.PrivilegedActionException;
+import java.security.PrivilegedExceptionAction;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.security.auth.Subject;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.security.UserGroupInformation;
+
+/**
+ * An authorization manager which handles service-level authorization
+ * for incoming service requests.
+ */
+public class ServiceAuthorizationManager {
+
+  private static final Log LOG = 
+    LogFactory.getLog(ServiceAuthorizationManager.class);
+  
+  /**
+   * Configuration key for controlling service-level authorization for Hadoop.
+   */
+  public static final String SERVICE_AUTHORIZATION_CONFIG = 
+    "hadoop.security.authorization";
+  
+  private static Map<Class<?>, Permission> protocolToPermissionMap = 
+    Collections.synchronizedMap(new HashMap<Class<?>, Permission>());
+
+  /**
+   * Authorize the user to access the protocol being used.
+   * 
+   * @param user user accessing the service 
+   * @param protocol service being accessed
+   * @throws AuthorizationException on authorization failure
+   */
+  public static void authorize(Subject user, Class<?> protocol) 
+  throws AuthorizationException {
+    Permission permission = protocolToPermissionMap.get(protocol);
+    if (permission == null) {
+      permission = new ConnectionPermission(protocol);
+      protocolToPermissionMap.put(protocol, permission);
+    }
+    
+    checkPermission(user, permission);
+  }
+  
+  /**
+   * Check if the given {@link Subject} has all of necessary {@link Permission} 
+   * set.
+   * 
+   * @param user <code>Subject</code> to be authorized
+   * @param permissions <code>Permission</code> set
+   * @throws AuthorizationException if the authorization failed
+   */
+  private static void checkPermission(final Subject user, 
+                                      final Permission... permissions) 
+  throws AuthorizationException {
+    try{
+      Subject.doAs(user, 
+                   new PrivilegedExceptionAction<Void>() {
+                     @Override
+                     public Void run() throws Exception {
+                       try {
+                         for(Permission permission : permissions) {
+                           AccessController.checkPermission(permission);
+                         }
+                       } catch (AccessControlException ace) {
+                         LOG.info("Authorization failed for " + 
+                                  UserGroupInformation.getCurrentUGI(), ace);
+                         throw new AuthorizationException(ace);
+                       }
+                      return null;
+                     }
+                   }
+                  );
+    } catch (PrivilegedActionException e) {
+      throw new AuthorizationException(e.getException());
+    }
+  }
+  
+}

Added: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/HDFSPolicyProvider.java?rev=725603&view=auto
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/HDFSPolicyProvider.java (added)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/HDFSPolicyProvider.java Wed Dec 10 23:21:13 2008
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
+import org.apache.hadoop.security.authorize.Service;
+
+/**
+ * {@link PolicyProvider} for HDFS protocols.
+ */
+public class HDFSPolicyProvider extends PolicyProvider {
+  private static final Service[] hdfsServices =
+    new Service[] {
+    new Service("security.client.protocol.acl", ClientProtocol.class),
+    new Service("security.client.datanode.protocol.acl", 
+                ClientDatanodeProtocol.class),
+    new Service("security.datanode.protocol.acl", DatanodeProtocol.class),
+    new Service("security.inter.datanode.protocol.acl", 
+                InterDatanodeProtocol.class),
+    new Service("security.namenode.protocol.acl", NamenodeProtocol.class),
+    new Service("security.refresh.policy.protocol.acl", 
+                RefreshAuthorizationPolicyProtocol.class),
+  };
+  
+  @Override
+  public Service[] getServices() {
+    return hdfsServices;
+  }
+}

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=725603&r1=725602&r2=725603&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java Wed Dec 10 23:21:13 2008
@@ -44,6 +44,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
@@ -80,6 +81,10 @@
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authorize.ConfiguredPolicy;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -367,6 +372,17 @@
     this.dnRegistration.setInfoPort(this.infoServer.getPort());
     myMetrics = new DataNodeMetrics(conf, dnRegistration.getStorageID());
     
+    // set service-level authorization security policy
+    if (conf.getBoolean(
+          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
+      PolicyProvider policyProvider = 
+        (PolicyProvider)(ReflectionUtils.newInstance(
+            conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
+                HDFSPolicyProvider.class, PolicyProvider.class), 
+            conf));
+      SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
+    }
+
     //init ipc server
     InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
         conf.get("dfs.datanode.ipc.address"));

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=725603&r1=725602&r2=725603&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java Wed Dec 10 23:21:13 2008
@@ -25,6 +25,7 @@
 import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.permission.*;
+import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.protocol.*;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
@@ -41,10 +42,17 @@
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.ipc.*;
 import org.apache.hadoop.conf.*;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.ConfiguredPolicy;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
+import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 
 import java.io.*;
 import java.net.*;
@@ -86,7 +94,8 @@
  * state, for example partial blocksMap etc.
  **********************************************************/
 public class NameNode implements ClientProtocol, DatanodeProtocol,
-                                 NamenodeProtocol, FSConstants {
+                                 NamenodeProtocol, FSConstants,
+                                 RefreshAuthorizationPolicyProtocol {
   public long getProtocolVersion(String protocol, 
                                  long clientVersion) throws IOException { 
     if (protocol.equals(ClientProtocol.class.getName())) {
@@ -95,6 +104,8 @@
       return DatanodeProtocol.versionID;
     } else if (protocol.equals(NamenodeProtocol.class.getName())){
       return NamenodeProtocol.versionID;
+    } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){
+      return RefreshAuthorizationPolicyProtocol.versionID;
     } else {
       throw new IOException("Unknown protocol to name node: " + protocol);
     }
@@ -116,7 +127,9 @@
   private Thread emptier;
   /** only used for testing purposes  */
   private boolean stopRequested = false;
-
+  /** Is service level authorization enabled? */
+  private boolean serviceAuthEnabled = false;
+  
   /** Format a new filesystem.  Destroys any filesystem that may already
    * exist at this location.  **/
   public static void format(Configuration conf) throws IOException {
@@ -155,6 +168,19 @@
   private void initialize(Configuration conf) throws IOException {
     InetSocketAddress socAddr = NameNode.getAddress(conf);
     int handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
+    
+    // set service-level authorization security policy
+    if (serviceAuthEnabled = 
+          conf.getBoolean(
+            ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
+      PolicyProvider policyProvider = 
+        (PolicyProvider)(ReflectionUtils.newInstance(
+            conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
+                HDFSPolicyProvider.class, PolicyProvider.class), 
+            conf));
+      SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
+    }
+
     // create rpc server 
     this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
                                 handlerCount, false, conf);
@@ -841,6 +867,15 @@
     return false;
   }
 
+  @Override
+  public void refreshServiceAcl() throws IOException {
+    if (!serviceAuthEnabled) {
+      throw new AuthorizationException("Service Level Authorization not enabled!");
+    }
+
+    SecurityUtil.getPolicy().refresh();
+  }
+
   private static void printUsage() {
     System.err.println(
       "Usage: java NameNode [" +

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=725603&r1=725602&r2=725603&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java Wed Dec 10 23:21:13 2008
@@ -21,6 +21,8 @@
 import java.util.ArrayList;
 import java.util.List;
 
+import javax.security.auth.login.LoginException;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus;
@@ -29,6 +31,7 @@
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
@@ -36,6 +39,9 @@
 import org.apache.hadoop.fs.shell.CommandFormat;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -387,6 +393,7 @@
       "\t[" + ClearQuotaCommand.USAGE +"]\n" +
       "\t[" + SetSpaceQuotaCommand.USAGE + "]\n" +
       "\t[" + ClearSpaceQuotaCommand.USAGE +"]\n" +
+      "\t[-refreshServiceAcl]\n" +
       "\t[-help [cmd]]\n";
 
     String report ="-report: \tReports basic filesystem information and statistics.\n";
@@ -429,6 +436,9 @@
       "\t\t\t3. Blocks currrently being replicated\n" +
       "\t\t\t4. Blocks waiting to be deleted\n";
 
+    String refreshServiceAcl = "-refreshServiceAcl: Reload the service-level authorization policy file\n" +
+      "\t\tNamenode will reload the authorization policy file.\n";
+    
     String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" +
       "\t\tis specified.\n";
 
@@ -452,6 +462,8 @@
       System.out.println(SetSpaceQuotaCommand.DESCRIPTION);
     } else if (ClearSpaceQuotaCommand.matches(cmd)) {
       System.out.println(ClearSpaceQuotaCommand.DESCRIPTION);
+    } else if ("refresh-auth-policy".equals(cmd)) {
+      System.out.println(refreshServiceAcl);
     } else if ("help".equals(cmd)) {
       System.out.println(help);
     } else {
@@ -466,6 +478,7 @@
       System.out.println(ClearQuotaCommand.DESCRIPTION);
       System.out.println(SetSpaceQuotaCommand.DESCRIPTION);
       System.out.println(ClearSpaceQuotaCommand.DESCRIPTION);
+      System.out.println(refreshServiceAcl);
       System.out.println(help);
       System.out.println();
       ToolRunner.printGenericCommandUsage(System.out);
@@ -549,6 +562,42 @@
     return 0;
   }
 
+  private static UnixUserGroupInformation getUGI(Configuration conf) 
+  throws IOException {
+    UnixUserGroupInformation ugi = null;
+    try {
+      ugi = UnixUserGroupInformation.login(conf, true);
+    } catch (LoginException e) {
+      throw (IOException)(new IOException(
+          "Failed to get the current user's information.").initCause(e));
+    }
+    return ugi;
+  }
+
+  /**
+   * Refresh the authorization policy on the {@link NameNode}.
+   * @return exitcode 0 on success, non-zero on failure
+   * @throws IOException
+   */
+  public int refreshServiceAcl() throws IOException {
+    // Get the current configuration
+    Configuration conf = getConf();
+    
+    // Create the client
+    RefreshAuthorizationPolicyProtocol refreshProtocol = 
+      (RefreshAuthorizationPolicyProtocol) 
+      RPC.getProxy(RefreshAuthorizationPolicyProtocol.class, 
+                   RefreshAuthorizationPolicyProtocol.versionID, 
+                   NameNode.getAddress(conf), getUGI(conf), conf,
+                   NetUtils.getSocketFactory(conf, 
+                                             RefreshAuthorizationPolicyProtocol.class));
+    
+    // Refresh the authorization policy in-effect
+    refreshProtocol.refreshServiceAcl();
+    
+    return 0;
+  }
+  
   /**
    * Displays format of commands.
    * @param cmd The command that is being executed.
@@ -571,7 +620,7 @@
                          + " [-upgradeProgress status | details | force]");
     } else if ("-metasave".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
-                         + " [-metasave filename]");
+          + " [-metasave filename]");
     } else if (SetQuotaCommand.matches(cmd)) {
       System.err.println("Usage: java DFSAdmin"
                          + " [" + SetQuotaCommand.USAGE+"]");
@@ -584,6 +633,9 @@
     } else if (ClearSpaceQuotaCommand.matches(cmd)) {
       System.err.println("Usage: java DFSAdmin"
                          + " ["+ClearSpaceQuotaCommand.USAGE+"]");
+    } else if ("-refreshServiceAcl".equals(cmd)) {
+      System.err.println("Usage: java DFSAdmin"
+                         + " [-refreshServiceAcl]");
     } else {
       System.err.println("Usage: java DFSAdmin");
       System.err.println("           [-report]");
@@ -592,6 +644,7 @@
       System.err.println("           [-finalizeUpgrade]");
       System.err.println("           [-upgradeProgress status | details | force]");
       System.err.println("           [-metasave filename]");
+      System.err.println("           [-refreshServiceAcl]");
       System.err.println("           ["+SetQuotaCommand.USAGE+"]");
       System.err.println("           ["+ClearQuotaCommand.USAGE+"]");
       System.err.println("           ["+SetSpaceQuotaCommand.USAGE+"]");
@@ -652,6 +705,11 @@
         printUsage(cmd);
         return exitCode;
       }
+    } else if ("-refreshServiceAcl".equals(cmd)) {
+      if (argv.length != 1) {
+        printUsage(cmd);
+        return exitCode;
+      }
     }
     
     // initialize DFSAdmin
@@ -688,6 +746,8 @@
         exitCode = new ClearSpaceQuotaCommand(argv, i, fs).runAll();
       } else if (SetSpaceQuotaCommand.matches(cmd)) {
         exitCode = new SetSpaceQuotaCommand(argv, i, fs).runAll();
+      } else if ("-refreshServiceAcl".equals(cmd)) {
+        exitCode = refreshServiceAcl();
       } else if ("-help".equals(cmd)) {
         if (i < argv.length) {
           printHelp(argv[i]);

Modified: hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/JobTracker.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/JobTracker.java?rev=725603&r1=725602&r2=725603&view=diff
==============================================================================
--- hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/JobTracker.java (original)
+++ hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/JobTracker.java Wed Dec 10 23:21:13 2008
@@ -66,7 +66,13 @@
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.ScriptBasedMapping;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.ConfiguredPolicy;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
+import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.util.HostsFileReader;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -78,7 +84,7 @@
  *
  *******************************************************/
 public class JobTracker implements MRConstants, InterTrackerProtocol,
-    JobSubmissionProtocol, TaskTrackerManager {
+    JobSubmissionProtocol, TaskTrackerManager, RefreshAuthorizationPolicyProtocol {
 
   static long TASKTRACKER_EXPIRY_INTERVAL = 10 * 60 * 1000;
   static long RETIRE_JOB_INTERVAL;
@@ -186,10 +192,13 @@
       return InterTrackerProtocol.versionID;
     } else if (protocol.equals(JobSubmissionProtocol.class.getName())){
       return JobSubmissionProtocol.versionID;
+    } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){
+      return RefreshAuthorizationPolicyProtocol.versionID;
     } else {
       throw new IOException("Unknown protocol to job tracker: " + protocol);
     }
   }
+  
   /**
    * A thread to timeout tasks that have been assigned to task trackers,
    * but that haven't reported back yet.
@@ -1335,10 +1344,22 @@
           JobQueueTaskScheduler.class, TaskScheduler.class);
     taskScheduler = (TaskScheduler) ReflectionUtils.newInstance(schedulerClass, conf);
                                            
-    // Set ports, start RPC servers, etc.
+    // Set ports, start RPC servers, setup security policy etc.
     InetSocketAddress addr = getAddress(conf);
     this.localMachine = addr.getHostName();
     this.port = addr.getPort();
+    
+    // Set service-level authorization security policy
+    if (conf.getBoolean(
+          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
+      PolicyProvider policyProvider = 
+        (PolicyProvider)(ReflectionUtils.newInstance(
+            conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
+                MapReducePolicyProvider.class, PolicyProvider.class), 
+            conf));
+      SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
+    }
+    
     int handlerCount = conf.getInt("mapred.job.tracker.handler.count", 10);
     this.interTrackerServer = RPC.getServer(this, addr.getHostName(), addr.getPort(), handlerCount, false, conf);
     if (LOG.isDebugEnabled()) {
@@ -3163,4 +3184,12 @@
     return conf.getInt("mapred.jobtracker.maxtasks.per.job", -1);
   }
   
+  @Override
+  public void refreshServiceAcl() throws IOException {
+    if (!conf.getBoolean(
+            ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
+      throw new AuthorizationException("Service Level Authorization not enabled!");
+    }
+    SecurityUtil.getPolicy().refresh();
+  }
 }

Added: hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/MapReducePolicyProvider.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/MapReducePolicyProvider.java?rev=725603&view=auto
==============================================================================
--- hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/MapReducePolicyProvider.java (added)
+++ hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/MapReducePolicyProvider.java Wed Dec 10 23:21:13 2008
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
+import org.apache.hadoop.security.authorize.Service;
+
+/**
+ * {@link PolicyProvider} for Map-Reduce protocols.
+ */
+public class MapReducePolicyProvider extends PolicyProvider {
+  private static final Service[] mapReduceServices = 
+    new Service[] {
+      new Service("security.inter.tracker.protocol.acl", 
+                  InterTrackerProtocol.class),
+      new Service("security.job.submission.protocol.acl",
+                  JobSubmissionProtocol.class),
+      new Service("security.task.umbilical.protocol.acl", 
+                  TaskUmbilicalProtocol.class),
+      new Service("security.refresh.policy.protocol.acl", 
+                  RefreshAuthorizationPolicyProtocol.class),
+  };
+  
+  @Override
+  public Service[] getServices() {
+    return mapReduceServices;
+  }
+
+}

Modified: hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/QueueManager.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/QueueManager.java?rev=725603&r1=725602&r2=725603&view=diff
==============================================================================
--- hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/QueueManager.java (original)
+++ hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/QueueManager.java Wed Dec 10 23:21:13 2008
@@ -28,6 +28,7 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.SecurityUtil.AccessControlList;
 
 /**
  * Class that exposes information about queues maintained by the Hadoop
@@ -51,12 +52,10 @@
   // Prefix in configuration for queue related keys
   private static final String QUEUE_CONF_PROPERTY_NAME_PREFIX 
                                                         = "mapred.queue.";
-  // Indicates an ACL string that represents access to all users
-  private static final String ALL_ALLOWED_ACL_VALUE = "*";
   // Configured queues
   private Set<String> queueNames;
   // Map of a queue and ACL property name with an ACL
-  private HashMap<String, ACL> aclsMap;
+  private HashMap<String, AccessControlList> aclsMap;
   // Map of a queue name to any generic object that represents 
   // scheduler information 
   private HashMap<String, Object> schedulerInfoObjects;
@@ -92,69 +91,6 @@
   }
   
   /**
-   * Class representing an access control that is configured.
-   */
-  private static class ACL {
-    
-    // Set of users who are granted access.
-    private Set<String> users;
-    // Set of groups which are granted access
-    private Set<String> groups;
-    // Whether all users are granted access.
-    private boolean allAllowed;
-    
-    /**
-     * Construct a new ACL from a String representation of the same.
-     * 
-     * The String is a a comma separated list of users and groups.
-     * The user list comes first and is separated by a space followed 
-     * by the group list. For e.g. "user1,user2 group1,group2"
-     * 
-     * @param aclString String representation of the ACL
-     */
-    ACL (String aclString) {
-      users = new TreeSet<String>();
-      groups = new TreeSet<String>();
-      if (aclString.equals(ALL_ALLOWED_ACL_VALUE)) {
-        allAllowed = true;
-      } else {
-        String[] userGroupStrings = aclString.split(" ", 2);
-        
-        if (userGroupStrings.length >= 1) {
-          String[] usersStr = userGroupStrings[0].split(",");
-          if (usersStr.length >= 1) {
-            addToSet(users, usersStr);
-          }
-        }
-        
-        if (userGroupStrings.length == 2) {
-          String[] groupsStr = userGroupStrings[1].split(",");
-          if (groupsStr.length >= 1) {
-            addToSet(groups, groupsStr);
-          }
-        }
-      }
-    }
-    
-    boolean allUsersAllowed() {
-      return allAllowed;
-    }
-    
-    boolean isUserAllowed(String user) {
-      return users.contains(user);
-    }
-    
-    boolean isAnyGroupAllowed(String[] otherGroups) {
-      for (String g : otherGroups) {
-        if (groups.contains(g)) {
-          return true;
-        }
-      }
-      return false;
-    }
-  }
-  
-  /**
    * Construct a new QueueManager using configuration specified in the passed
    * in {@link org.apache.hadoop.conf.Configuration} object.
    * 
@@ -162,7 +98,7 @@
    */
   public QueueManager(Configuration conf) {
     queueNames = new TreeSet<String>();
-    aclsMap = new HashMap<String, ACL>();
+    aclsMap = new HashMap<String, AccessControlList>();
     schedulerInfoObjects = new HashMap<String, Object>();
     initialize(conf);
   }
@@ -237,13 +173,30 @@
       }
     }
     
-    ACL acl = aclsMap.get(toFullPropertyName(queueName, oper.getAclName()));
+    AccessControlList acl = aclsMap.get(toFullPropertyName(queueName, oper.getAclName()));
     if (acl == null) {
       return false;
     }
-    return ((acl.allUsersAllowed()) ||
-              (acl.isUserAllowed(ugi.getUserName())) ||
-              (acl.isAnyGroupAllowed(ugi.getGroupNames())));    
+    
+    // Check the ACL list
+    boolean allowed = acl.allAllowed();
+    if (!allowed) {
+      // Check the allowed users list
+      if (acl.getUsers().contains(ugi.getUserName())) {
+        allowed = true;
+      } else {
+        // Check the allowed groups list
+        Set<String> allowedGroups = acl.getGroups();
+        for (String group : ugi.getGroupNames()) {
+          if (allowedGroups.contains(group)) {
+            allowed = true;
+            break;
+          }
+        }
+      }
+    }
+    
+    return allowed;    
   }
   
   /**
@@ -302,7 +255,7 @@
       for (QueueOperation oper : QueueOperation.values()) {
         String key = toFullPropertyName(queue, oper.getAclName());
         String aclString = conf.get(key, "*");
-        aclsMap.put(key, new ACL(aclString));
+        aclsMap.put(key, new AccessControlList(aclString));
       }
     }
   }
@@ -317,8 +270,7 @@
       set.add(elem);
     }
   }
-
-
+  
   synchronized JobQueueInfo[] getJobQueueInfos() {
     ArrayList<JobQueueInfo> queueInfoList = new ArrayList<JobQueueInfo>();
     for(String queue : queueNames) {

Modified: hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/TaskTracker.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/TaskTracker.java?rev=725603&r1=725602&r2=725603&view=diff
==============================================================================
--- hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/TaskTracker.java (original)
+++ hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/TaskTracker.java Wed Dec 10 23:21:13 2008
@@ -75,6 +75,10 @@
 import org.apache.hadoop.metrics.Updater;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authorize.ConfiguredPolicy;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.MemoryCalculatorPlugin;
 import org.apache.hadoop.util.ProcfsBasedProcessTree;
@@ -480,6 +484,17 @@
     
     this.jvmManager = new JvmManager(this);
 
+    // Set service-level authorization security policy
+    if (this.fConf.getBoolean(
+          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
+      PolicyProvider policyProvider = 
+        (PolicyProvider)(ReflectionUtils.newInstance(
+            this.fConf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
+                MapReducePolicyProvider.class, PolicyProvider.class), 
+            this.fConf));
+      SecurityUtil.setPolicy(new ConfiguredPolicy(this.fConf, policyProvider));
+    }
+    
     // RPC initialization
     int max = maxCurrentMapTasks > maxCurrentReduceTasks ? 
                        maxCurrentMapTasks : maxCurrentReduceTasks;

Added: hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/tools/MRAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/tools/MRAdmin.java?rev=725603&view=auto
==============================================================================
--- hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/tools/MRAdmin.java (added)
+++ hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/tools/MRAdmin.java Wed Dec 10 23:21:13 2008
@@ -0,0 +1,193 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred.tools;
+
+import java.io.IOException;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.mapred.JobTracker;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * Administrative access to Hadoop Map-Reduce.
+ *
+ * Currently it only provides the ability to connect to the {@link JobTracker}
+ * and refresh the service-level authorization policy.
+ */
+public class MRAdmin extends Configured implements Tool {
+
+  public MRAdmin() {
+    super();
+  }
+
+  public MRAdmin(Configuration conf) {
+    super(conf);
+  }
+
+  private static void printHelp(String cmd) {
+    String summary = "hadoop mradmin is the command to execute Map-Reduce administrative commands.\n" +
+    "The full syntax is: \n\n" +
+    "hadoop mradmin [-refreshServiceAcl] [-help [cmd]]\n"; 
+
+  String refreshServiceAcl = "-refreshServiceAcl: Reload the service-level authorization policy file\n" +
+    "\t\tJobtracker will reload the authorization policy file.\n";
+  
+  String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" +
+    "\t\tis specified.\n";
+
+  if ("refresh-auth-policy".equals(cmd)) {
+    System.out.println(refreshServiceAcl);
+  } else if ("help".equals(cmd)) {
+    System.out.println(help);
+  } else {
+    System.out.println(summary);
+    System.out.println(refreshServiceAcl);
+    System.out.println(help);
+    System.out.println();
+    ToolRunner.printGenericCommandUsage(System.out);
+  }
+
+}
+  
+  /**
+   * Displays format of commands.
+   * @param cmd The command that is being executed.
+   */
+  private static void printUsage(String cmd) {
+    if ("-refreshServiceAcl".equals(cmd)) {
+      System.err.println("Usage: java MRAdmin"
+                         + " [-refreshServiceAcl]");
+    } else {
+      System.err.println("Usage: java MRAdmin");
+      System.err.println("           [-refreshServiceAcl]");
+      System.err.println("           [-help [cmd]]");
+      System.err.println();
+      ToolRunner.printGenericCommandUsage(System.err);
+    }
+  }
+  
+  private static UnixUserGroupInformation getUGI(Configuration conf) 
+  throws IOException {
+    UnixUserGroupInformation ugi = null;
+    try {
+      ugi = UnixUserGroupInformation.login(conf, true);
+    } catch (LoginException e) {
+      throw (IOException)(new IOException(
+          "Failed to get the current user's information.").initCause(e));
+    }
+    return ugi;
+  }
+
+  private int refreshAuthorizationPolicy() throws IOException {
+    // Get the current configuration
+    Configuration conf = getConf();
+    
+    // Create the client
+    RefreshAuthorizationPolicyProtocol refreshProtocol = 
+      (RefreshAuthorizationPolicyProtocol) 
+      RPC.getProxy(RefreshAuthorizationPolicyProtocol.class, 
+                   RefreshAuthorizationPolicyProtocol.versionID, 
+                   JobTracker.getAddress(conf), getUGI(conf), conf,
+                   NetUtils.getSocketFactory(conf, 
+                                             RefreshAuthorizationPolicyProtocol.class));
+    
+    // Refresh the authorization policy in-effect
+    refreshProtocol.refreshServiceAcl();
+    
+    return 0;
+  }
+  
+
+  @Override
+  public int run(String[] args) throws Exception {
+    if (args.length < 1) {
+      printUsage("");
+      return -1;
+    }
+
+    int exitCode = -1;
+    int i = 0;
+    String cmd = args[i++];
+
+    //
+    // verify that we have enough command line parameters
+    //
+    if ("-refreshServiceAcl".equals(cmd)) {
+      if (args.length != 1) {
+        printUsage(cmd);
+        return exitCode;
+      }
+    }
+    
+    exitCode = 0;
+    try {
+      if ("-refreshServiceAcl".equals(cmd)) {
+        exitCode = refreshAuthorizationPolicy();
+      } else if ("-help".equals(cmd)) {
+        if (i < args.length) {
+          printUsage(args[i]);
+        } else {
+          printHelp("");
+        }
+      } else {
+        exitCode = -1;
+        System.err.println(cmd.substring(1) + ": Unknown command");
+        printUsage("");
+      }
+
+    } catch (IllegalArgumentException arge) {
+      exitCode = -1;
+      System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
+      printUsage(cmd);
+    } catch (RemoteException e) {
+      //
+      // This is a error returned by hadoop server. Print
+      // out the first line of the error mesage, ignore the stack trace.
+      exitCode = -1;
+      try {
+        String[] content;
+        content = e.getLocalizedMessage().split("\n");
+        System.err.println(cmd.substring(1) + ": "
+                           + content[0]);
+      } catch (Exception ex) {
+        System.err.println(cmd.substring(1) + ": "
+                           + ex.getLocalizedMessage());
+      }
+    } catch (Exception e) {
+      exitCode = -1;
+      System.err.println(cmd.substring(1) + ": "
+                         + e.getLocalizedMessage());
+    } 
+    return exitCode;
+  }
+
+  public static void main(String[] args) throws Exception {
+    int result = ToolRunner.run(new MRAdmin(), args);
+    System.exit(result);
+  }
+
+}

Added: hadoop/core/trunk/src/test/hadoop-policy.xml
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/hadoop-policy.xml?rev=725603&view=auto
==============================================================================
--- hadoop/core/trunk/src/test/hadoop-policy.xml (added)
+++ hadoop/core/trunk/src/test/hadoop-policy.xml Wed Dec 10 23:21:13 2008
@@ -0,0 +1,97 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientProtocol, which is used by user code 
+    via the DistributedFileSystem. 
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol 
+    for block recovery.
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DatanodeProtocol, which is used by datanodes to 
+    communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+    for updating generation timestamp.
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.namenode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NamenodeProtocol, the protocol used by the secondary
+    namenode to communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.tracker.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterTrackerProtocol, used by the tasktrackers to 
+    communicate with the jobtracker.
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.submission.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for JobSubmissionProtocol, used by job clients to 
+    communciate with the jobtracker for job submission, querying job status etc.
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.task.umbilical.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce 
+    tasks to communicate with the parent tasktracker. 
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.policy.protocol.acl</name>
+    <value>${user.name}</value>
+    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the 
+    dfsadmin and mradmin commands to refresh the security policy in-effect. 
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+</configuration>

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/cli/TestCLI.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/cli/TestCLI.java?rev=725603&r1=725602&r2=725603&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/cli/TestCLI.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/cli/TestCLI.java Wed Dec 10 23:21:13 2008
@@ -38,6 +38,11 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.MiniMRCluster;
+import org.apache.hadoop.security.authorize.HadoopPolicyProvider;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.util.StringUtils;
 import org.xml.sax.Attributes;
 import org.xml.sax.SAXException;
@@ -73,9 +78,11 @@
   static ComparatorData comparatorData = null;
   
   private static Configuration conf = null;
-  private static MiniDFSCluster cluster = null;
+  private static MiniDFSCluster dfsCluster = null;
   private static DistributedFileSystem dfs = null;
+  private static MiniMRCluster mrCluster = null;
   private static String namenode = null;
+  private static String jobtracker = null;
   private static String clitestDataDir = null;
   private static String username = null;
   
@@ -109,19 +116,31 @@
     // Start up the mini dfs cluster
     boolean success = false;
     conf = new Configuration();
-    cluster = new MiniDFSCluster(conf, 1, true, null);
+    conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
+                  HadoopPolicyProvider.class, PolicyProvider.class);
+    conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
+                    true);
+
+    dfsCluster = new MiniDFSCluster(conf, 1, true, null);
     namenode = conf.get("fs.default.name", "file:///");
     clitestDataDir = new File(TEST_CACHE_DATA_DIR).
       toURI().toString().replace(' ', '+');
     username = System.getProperty("user.name");
 
-    FileSystem fs = cluster.getFileSystem();
+    FileSystem fs = dfsCluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
                fs instanceof DistributedFileSystem);
     dfs = (DistributedFileSystem) fs;
+    
+     // Start up mini mr cluster
+    JobConf mrConf = new JobConf(conf);
+    mrCluster = new MiniMRCluster(1, dfsCluster.getFileSystem().getUri().toString(), 1, 
+                           null, null, mrConf);
+    jobtracker = mrCluster.createJobConf().get("mapred.job.tracker", "local");
+
     success = true;
 
-    assertTrue("Error setting up Mini DFS cluster", success);
+    assertTrue("Error setting up Mini DFS & MR clusters", success);
   }
   
   /**
@@ -129,12 +148,14 @@
    */
   public void tearDown() throws Exception {
     boolean success = false;
+    mrCluster.shutdown();
+    
     dfs.close();
-    cluster.shutdown();
+    dfsCluster.shutdown();
     success = true;
     Thread.sleep(2000);
 
-    assertTrue("Error tearing down Mini DFS cluster", success);
+    assertTrue("Error tearing down Mini DFS & MR clusters", success);
     
     displayResults();
   }
@@ -147,6 +168,7 @@
   private String expandCommand(final String cmd) {
     String expCmd = cmd;
     expCmd = expCmd.replaceAll("NAMENODE", namenode);
+    expCmd = expCmd.replaceAll("JOBTRACKER", jobtracker);
     expCmd = expCmd.replaceAll("CLITEST_DATA", clitestDataDir);
     expCmd = expCmd.replaceAll("USERNAME", username);
     
@@ -173,30 +195,30 @@
         LOG.info("");
 
         ArrayList<TestCmd> testCommands = td.getTestCommands();
-        for (int j = 0; j < testCommands.size(); j++) {
+        for (TestCmd cmd : testCommands) {
           LOG.info("              Test Commands: [" + 
-              expandCommand(testCommands.get(j).getCmd()) + "]");
+                   expandCommand(cmd.getCmd()) + "]");
         }
 
         LOG.info("");
         ArrayList<TestCmd> cleanupCommands = td.getCleanupCommands();
-        for (int j = 0; j < cleanupCommands.size(); j++) {
+        for (TestCmd cmd : cleanupCommands) {
           LOG.info("           Cleanup Commands: [" +
-              expandCommand(cleanupCommands.get(j).getCmd()) + "]");
+                   expandCommand(cmd.getCmd()) + "]");
         }
 
         LOG.info("");
         ArrayList<ComparatorData> compdata = td.getComparatorData();
-        for (int j = 0; j < compdata.size(); j++) {
-          boolean resultBoolean = compdata.get(j).getTestResult();
+        for (ComparatorData cd : compdata) {
+          boolean resultBoolean = cd.getTestResult();
           LOG.info("                 Comparator: [" + 
-              compdata.get(j).getComparatorType() + "]");
+                   cd.getComparatorType() + "]");
           LOG.info("         Comparision result:   [" + 
-              (resultBoolean ? "pass" : "fail") + "]");
+                   (resultBoolean ? "pass" : "fail") + "]");
           LOG.info("            Expected output:   [" + 
-              compdata.get(j).getExpectedOutput() + "]");
+                   cd.getExpectedOutput() + "]");
           LOG.info("              Actual output:   [" + 
-              compdata.get(j).getActualOutput() + "]");
+                   cd.getActualOutput() + "]");
         }
         LOG.info("");
       }
@@ -319,9 +341,9 @@
    
       // Execute the test commands
       ArrayList<TestCmd> testCommands = testdata.getTestCommands();
-      for (int i = 0; i < testCommands.size(); i++) {
+      for (TestCmd cmd : testCommands) {
       try {
-        CommandExecutor.executeCommand(testCommands.get(i), namenode);
+        CommandExecutor.executeCommand(cmd, namenode, jobtracker);
       } catch (Exception e) {
         fail(StringUtils.stringifyException(e));
       }
@@ -330,28 +352,27 @@
       boolean overallTCResult = true;
       // Run comparators
       ArrayList<ComparatorData> compdata = testdata.getComparatorData();
-      for (int i = 0; i < compdata.size(); i++) {
-        final String comptype = compdata.get(i).getComparatorType();
+      for (ComparatorData cd : compdata) {
+        final String comptype = cd.getComparatorType();
         
         boolean compareOutput = false;
         
         if (! comptype.equalsIgnoreCase("none")) {
-          compareOutput = compareTestOutput(compdata.get(i));
+          compareOutput = compareTestOutput(cd);
           overallTCResult &= compareOutput;
         }
         
-        compdata.get(i).setExitCode(CommandExecutor.getLastExitCode());
-        compdata.get(i).setActualOutput(
-          CommandExecutor.getLastCommandOutput());
-        compdata.get(i).setTestResult(compareOutput);
+        cd.setExitCode(CommandExecutor.getLastExitCode());
+        cd.setActualOutput(CommandExecutor.getLastCommandOutput());
+        cd.setTestResult(compareOutput);
       }
       testdata.setTestResult(overallTCResult);
       
       // Execute the cleanup commands
       ArrayList<TestCmd> cleanupCommands = testdata.getCleanupCommands();
-      for (int i = 0; i < cleanupCommands.size(); i++) {
+      for (TestCmd cmd : cleanupCommands) {
       try { 
-        CommandExecutor.executeCommand(cleanupCommands.get(i), namenode);
+        CommandExecutor.executeCommand(cmd, namenode, jobtracker);
       } catch (Exception e) {
         fail(StringUtils.stringifyException(e));
       }
@@ -410,12 +431,18 @@
         } else if (cleanupCommands != null) {
           cleanupCommands.add(new TestCmd(charString, CommandType.FS));
         }
-      } else if (qName.equals("admin-command")) {
+      } else if (qName.equals("dfs-admin-command")) {
           if (testCommands != null) {
-              testCommands.add(new TestCmd(charString,CommandType.ADMIN));
+              testCommands.add(new TestCmd(charString,CommandType.DFSADMIN));
             } else if (cleanupCommands != null) {
-              cleanupCommands.add(new TestCmd(charString, CommandType.ADMIN));
+              cleanupCommands.add(new TestCmd(charString, CommandType.DFSADMIN));
             } 
+      } else if (qName.equals("mr-admin-command")) {
+        if (testCommands != null) {
+            testCommands.add(new TestCmd(charString,CommandType.MRADMIN));
+          } else if (cleanupCommands != null) {
+            cleanupCommands.add(new TestCmd(charString, CommandType.MRADMIN));
+          } 
       } else if (qName.equals("comparators")) {
         td.setComparatorData(testComparators);
       } else if (qName.equals("comparator")) {

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/cli/testConf.xml
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/cli/testConf.xml?rev=725603&r1=725602&r2=725603&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/cli/testConf.xml (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/cli/testConf.xml Wed Dec 10 23:21:13 2008
@@ -3171,10 +3171,10 @@
       <test-commands>
         <command>-fs NAMENODE -mkdir /test </command>
         <command>-fs NAMENODE -touchz /test/file1 </command>
-        <admin-command>-fs NAMENODE -setQuota 1 /test/file1 </admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 1 /test/file1 </dfs-admin-command>
       </test-commands>
       <cleanup-commands>
-      	<admin-command>-fs NAMENODE -setQuota 5 /test </admin-command>
+      	<dfs-admin-command>-fs NAMENODE -setQuota 5 /test </dfs-admin-command>
         <!-- Same directory will be used in the next test -->
       </cleanup-commands>
       <comparators>
@@ -3188,7 +3188,7 @@
     <test> <!--Tested -->
       <description>verifying error messages for quota commands - setting quota on non-existing file</description>
       <test-commands>
-        <admin-command>-fs NAMENODE -setSpaceQuota 1g /test1 </admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1g /test1 </dfs-admin-command>
       </test-commands>
       <cleanup-commands>
              <!-- Same directory will be used in the next test -->   
@@ -3204,7 +3204,7 @@
     <test> <!--Tested -->
       <description>verifying error messages for quota commands - exceeding quota</description>
       <test-commands>
-        <admin-command>-fs NAMENODE -setQuota 3 /test </admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 3 /test </dfs-admin-command>
         <command>-fs NAMENODE -touchz /test/file0 </command>
         <command>-fs NAMENODE -mkdir /test/test1 </command>
       </test-commands>
@@ -3222,7 +3222,7 @@
     <test> <!--Tested -->
       <description>verifying error messages for quota commands - setting not valid quota</description>
       <test-commands>
-        <admin-command>-fs NAMENODE -setQuota 0 /test </admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 0 /test </dfs-admin-command>
       </test-commands>
       <cleanup-commands>
              <!-- Same directory will be used in the next test -->   
@@ -3238,7 +3238,7 @@
     <test> <!--Tested -->
       <description>verifying error messages for quota commands - setting not valid space quota</description>
       <test-commands>
-        <admin-command>-fs NAMENODE -setSpaceQuota a5 /test </admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota a5 /test </dfs-admin-command>
       </test-commands>
       <cleanup-commands>
              <!-- Same directory will be used in the next test -->   
@@ -3254,7 +3254,7 @@
     <test> <!--Tested -->
       <description>verifying error messages for quota commands - clearQuota on non existing file</description>
       <test-commands>
-        <admin-command>-fs NAMENODE -clrQuota /test1 </admin-command>
+        <dfs-admin-command>-fs NAMENODE -clrQuota /test1 </dfs-admin-command>
       </test-commands>
       <cleanup-commands>
       	<command>-fs NAMENODE -rmr /test </command>
@@ -3266,5 +3266,74 @@
         </comparator>
       </comparators>
     </test>
+    
+    <test> <!--Tested -->
+      <description>refreshServiceAcl: refreshing security authorization policy for namenode</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -refreshServiceAcl </dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- No cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!--Tested -->
+      <description>refreshServiceAcl: verifying error message while refreshing security authorization policy for namenode</description>
+      <test-commands>
+        <!-- hadoop-policy.xml for tests has 
+             security.refresh.policy.protocol.acl = ${user.name} -->
+        <dfs-admin-command>-fs NAMENODE -Dhadoop.job.ugi=blah,blah -refreshServiceAcl </dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- No cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>access denied</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!--Tested -->
+      <description>refreshServiceAcl: refreshing security authorization policy for jobtracker</description>
+      <test-commands>
+        <mr-admin-command>-jt JOBTRACKER -refreshServiceAcl </mr-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- No cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!--Tested -->
+      <description>refreshServiceAcl: verifying error message while refreshing security authorization policy for jobtracker</description>
+      <test-commands>
+        <!-- hadoop-policy.xml for tests has 
+             security.refresh.policy.protocol.acl = ${user.name} -->
+        <mr-admin-command>-jt JOBTRACKER -Dhadoop.job.ugi=blah,blah -refreshServiceAcl </mr-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- No cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>access denied</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
   </tests>
 </configuration>

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/cli/util/CLITestData.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/cli/util/CLITestData.java?rev=725603&r1=725602&r2=725603&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/cli/util/CLITestData.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/cli/util/CLITestData.java Wed Dec 10 23:21:13 2008
@@ -37,13 +37,13 @@
 
   /**
    * Class to define Test Command. includes type of the command and command itself
-   * Valid types FS and Admin (for dfsadmin commands)
-   *
+   * Valid types FS, DFSADMIN and MRADMIN.
    */
   static public class TestCmd {
     public enum CommandType {
         FS,
-        ADMIN
+        DFSADMIN,
+        MRADMIN
     }
     private final CommandType type;
     private final String cmd;

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/cli/util/CommandExecutor.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/cli/util/CommandExecutor.java?rev=725603&r1=725602&r2=725603&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/cli/util/CommandExecutor.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/cli/util/CommandExecutor.java Wed Dec 10 23:21:13 2008
@@ -28,6 +28,7 @@
 import org.apache.hadoop.cli.util.CLITestData.TestCmd.CommandType;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.mapred.tools.MRAdmin;
 import org.apache.hadoop.util.ToolRunner;
 
 /**
@@ -40,8 +41,8 @@
   private static Exception lastException = null;
   private static String cmdExecuted = null;
   
-  private static String[] getFSCommandAsArgs(final String cmd, 
-		  final String namenode) {
+  private static String[] getCommandAsArgs(final String cmd, final String masterKey,
+		                                       final String master) {
     StringTokenizer tokenizer = new StringTokenizer(cmd, " ");
     String[] args = new String[tokenizer.countTokens()];
     
@@ -49,7 +50,7 @@
     while (tokenizer.hasMoreTokens()) {
       args[i] = tokenizer.nextToken();
 
-      args[i] = args[i].replaceAll("NAMENODE", namenode);
+      args[i] = args[i].replaceAll(masterKey, master);
       args[i] = args[i].replaceAll("CLITEST_DATA", 
         new File(TestCLI.TEST_CACHE_DATA_DIR).
         toURI().toString().replace(' ', '+'));
@@ -61,12 +62,16 @@
     return args;
   }
   
-  public static int executeCommand(final TestCmd cmd, final String namenode) throws Exception {
+  public static int executeCommand(final TestCmd cmd, 
+                                   final String namenode, final String jobtracker) 
+  throws Exception {
     switch(cmd.getType()) {
-    case ADMIN:
-      return CommandExecutor.executeDFSAdminCommand(cmd.getCmd(),namenode);
+    case DFSADMIN:
+      return CommandExecutor.executeDFSAdminCommand(cmd.getCmd(), namenode);
+    case MRADMIN:
+      return CommandExecutor.executeMRAdminCommand(cmd.getCmd(), jobtracker);
     case FS:
-      return CommandExecutor.executeFSCommand(cmd.getCmd(),namenode);
+      return CommandExecutor.executeFSCommand(cmd.getCmd(), namenode);
     default:
       throw new Exception("Unknow type of Test command:"+ cmd.getType()); 
     }
@@ -83,7 +88,7 @@
       System.setErr(new PrintStream(bao));
       
       DFSAdmin shell = new DFSAdmin();
-      String[] args = getFSCommandAsArgs(cmd, namenode);
+      String[] args = getCommandAsArgs(cmd, "NAMENODE", namenode);
       cmdExecuted = cmd;
      
       try {
@@ -102,6 +107,37 @@
       return exitCode;
   }
   
+  public static int executeMRAdminCommand(final String cmd, 
+                                          final String jobtracker) {
+    exitCode = 0;
+    
+    ByteArrayOutputStream bao = new ByteArrayOutputStream();
+    PrintStream origOut = System.out;
+    PrintStream origErr = System.err;
+    
+    System.setOut(new PrintStream(bao));
+    System.setErr(new PrintStream(bao));
+    
+    MRAdmin mradmin = new MRAdmin();
+    String[] args = getCommandAsArgs(cmd, "JOBTRACKER", jobtracker);
+    cmdExecuted = cmd;
+   
+    try {
+      ToolRunner.run(mradmin, args);
+    } catch (Exception e) {
+      e.printStackTrace();
+      lastException = e;
+      exitCode = -1;
+    } finally {
+      System.setOut(origOut);
+      System.setErr(origErr);
+    }
+    
+    commandOutput = bao.toString();
+    
+    return exitCode;
+  }
+
   public static int executeFSCommand(final String cmd, final String namenode) {
     exitCode = 0;
     
@@ -113,7 +149,7 @@
     System.setErr(new PrintStream(bao));
     
     FsShell shell = new FsShell();
-    String[] args = getFSCommandAsArgs(cmd, namenode);
+    String[] args = getCommandAsArgs(cmd, "NAMENODE", namenode);
     cmdExecuted = cmd;
     
     try {

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=725603&r1=725602&r2=725603&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java Wed Dec 10 23:21:13 2008
@@ -233,7 +233,7 @@
                         long[] simulatedCapacities) throws IOException {
     this.conf = conf;
     try {
-      UserGroupInformation.setCurrentUGI(UnixUserGroupInformation.login(conf));
+      UserGroupInformation.setCurrentUser(UnixUserGroupInformation.login(conf));
     } catch (LoginException e) {
       IOException ioe = new IOException();
       ioe.initCause(e);

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=725603&r1=725602&r2=725603&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Wed Dec 10 23:21:13 2008
@@ -87,7 +87,7 @@
   NNThroughputBenchmark(Configuration conf) throws IOException, LoginException {
     config = conf;
     ugi = UnixUserGroupInformation.login(config);
-    UserGroupInformation.setCurrentUGI(ugi);
+    UserGroupInformation.setCurrentUser(ugi);
 
     // We do not need many handlers, since each thread simulates a handler
     // by calling name-node methods directly
@@ -337,7 +337,7 @@
     }
 
     public void run() {
-      UserGroupInformation.setCurrentUGI(ugi);
+      UserGroupInformation.setCurrentUser(ugi);
       localNumOpsExecuted = 0;
       localCumulativeTime = 0;
       arg1 = statsOp.getExecutionArgument(daemonId);

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/ipc/TestIPC.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/ipc/TestIPC.java?rev=725603&r1=725602&r2=725603&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/ipc/TestIPC.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/ipc/TestIPC.java Wed Dec 10 23:21:13 2008
@@ -59,7 +59,9 @@
       this.sleep = sleep;
     }
 
-    public Writable call(Writable param, long receivedTime) throws IOException {
+    @Override
+    public Writable call(Class<?> protocol, Writable param, long receiveTime)
+        throws IOException {
       if (sleep) {
         try {
           Thread.sleep(RANDOM.nextInt(2*PING_INTERVAL));      // sleep a bit

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/ipc/TestIPCServerResponder.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/ipc/TestIPCServerResponder.java?rev=725603&r1=725602&r2=725603&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/ipc/TestIPCServerResponder.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/ipc/TestIPCServerResponder.java Wed Dec 10 23:21:13 2008
@@ -71,8 +71,8 @@
     }
 
     @Override
-    public Writable call(final Writable param, final long receivedTime) 
-                                               throws IOException {
+    public Writable call(Class<?> protocol, Writable param, long receiveTime)
+        throws IOException {
       if (sleep) {
         try {
           Thread.sleep(RANDOM.nextInt(20)); // sleep a bit

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/ipc/TestRPC.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/ipc/TestRPC.java?rev=725603&r1=725602&r2=725603&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/ipc/TestRPC.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/ipc/TestRPC.java Wed Dec 10 23:21:13 2008
@@ -34,6 +34,12 @@
 import org.apache.hadoop.io.Writable;
 
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.ConfiguredPolicy;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.security.authorize.Service;
+import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 
 /** Unit tests for RPC. */
 public class TestRPC extends TestCase {
@@ -319,6 +325,64 @@
     }
   }
   
+  private static final String ACL_CONFIG = "test.protocol.acl";
+  
+  private static class TestPolicyProvider extends PolicyProvider {
+
+    @Override
+    public Service[] getServices() {
+      return new Service[] { new Service(ACL_CONFIG, TestProtocol.class) };
+    }
+    
+  }
+  
+  private void doRPCs(Configuration conf, boolean expectFailure) throws Exception {
+    SecurityUtil.setPolicy(new ConfiguredPolicy(conf, new TestPolicyProvider()));
+    
+    Server server = RPC.getServer(new TestImpl(), ADDRESS, 0, 5, true, conf);
+
+    TestProtocol proxy = null;
+
+    server.start();
+
+    InetSocketAddress addr = NetUtils.getConnectAddress(server);
+    
+    try {
+      proxy = (TestProtocol)RPC.getProxy(
+          TestProtocol.class, TestProtocol.versionID, addr, conf);
+      proxy.ping();
+
+      if (expectFailure) {
+        fail("Expect RPC.getProxy to fail with AuthorizationException!");
+      }
+    } catch (RemoteException e) {
+      if (expectFailure) {
+        assertTrue(e.unwrapRemoteException() instanceof AuthorizationException);
+      } else {
+        throw e;
+      }
+    } finally {
+      server.stop();
+      if (proxy != null) {
+        RPC.stopProxy(proxy);
+      }
+    }
+  }
+  
+  public void testAuthorization() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setBoolean(
+        ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, true);
+    
+    // Expect to succeed
+    conf.set(ACL_CONFIG, "*");
+    doRPCs(conf, false);
+    
+    // Reset authorization to expect failure
+    conf.set(ACL_CONFIG, "invalid invalid");
+    doRPCs(conf, true);
+  }
+  
   public static void main(String[] args) throws Exception {
 
     new TestRPC("test").testCalls();

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRWithDFS.java?rev=725603&r1=725602&r2=725603&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRWithDFS.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRWithDFS.java Wed Dec 10 23:21:13 2008
@@ -170,7 +170,7 @@
     }
   }
 
-  static void runPI(MiniMRCluster mr, JobConf jobconf) throws IOException {
+  public static void runPI(MiniMRCluster mr, JobConf jobconf) throws IOException {
     LOG.info("runPI");
     double estimate = org.apache.hadoop.examples.PiEstimator.estimate(
         NUM_MAPS, NUM_SAMPLES, jobconf).doubleValue();
@@ -179,7 +179,8 @@
     checkTaskDirectories(mr, new String[]{}, new String[]{});
   }
 
-  static void runWordCount(MiniMRCluster mr, JobConf jobConf) throws IOException {
+  public static void runWordCount(MiniMRCluster mr, JobConf jobConf) 
+  throws IOException {
     LOG.info("runWordCount");
     // Run a word count example
     // Keeping tasks that match this pattern

Added: hadoop/core/trunk/src/test/org/apache/hadoop/security/TestAccessControlList.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/security/TestAccessControlList.java?rev=725603&view=auto
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/security/TestAccessControlList.java (added)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/security/TestAccessControlList.java Wed Dec 10 23:21:13 2008
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security;
+
+import java.util.Iterator;
+import java.util.Set;
+
+import org.apache.hadoop.security.SecurityUtil.AccessControlList;
+
+import junit.framework.TestCase;
+
+public class TestAccessControlList extends TestCase {
+  
+  public void testWildCardAccessControlList() throws Exception {
+    AccessControlList acl;
+    
+    acl = new AccessControlList("*");
+    assertTrue(acl.allAllowed());
+    
+    acl = new AccessControlList("  * ");
+    assertTrue(acl.allAllowed());
+    
+    acl = new AccessControlList(" *");
+    assertTrue(acl.allAllowed());
+    
+    acl = new AccessControlList("*  ");
+    assertTrue(acl.allAllowed());
+  }
+  
+  public void testAccessControlList() throws Exception {
+    AccessControlList acl;
+    Set<String> users;
+    Set<String> groups;
+    
+    acl = new AccessControlList("drwho tardis");
+    users = acl.getUsers();
+    assertEquals(users.size(), 1);
+    assertEquals(users.iterator().next(), "drwho");
+    groups = acl.getGroups();
+    assertEquals(groups.size(), 1);
+    assertEquals(groups.iterator().next(), "tardis");
+    
+    acl = new AccessControlList("drwho");
+    users = acl.getUsers();
+    assertEquals(users.size(), 1);
+    assertEquals(users.iterator().next(), "drwho");
+    groups = acl.getGroups();
+    assertEquals(groups.size(), 0);
+    
+    acl = new AccessControlList("drwho ");
+    users = acl.getUsers();
+    assertEquals(users.size(), 1);
+    assertEquals(users.iterator().next(), "drwho");
+    groups = acl.getGroups();
+    assertEquals(groups.size(), 0);
+    
+    acl = new AccessControlList(" tardis");
+    users = acl.getUsers();
+    assertEquals(users.size(), 0);
+    groups = acl.getGroups();
+    assertEquals(groups.size(), 1);
+    assertEquals(groups.iterator().next(), "tardis");
+
+    Iterator<String> iter;
+    acl = new AccessControlList("drwho,joe tardis,users");
+    users = acl.getUsers();
+    assertEquals(users.size(), 2);
+    iter = users.iterator();
+    assertEquals(iter.next(), "drwho");
+    assertEquals(iter.next(), "joe");
+    groups = acl.getGroups();
+    assertEquals(groups.size(), 2);
+    iter = groups.iterator();
+    assertEquals(iter.next(), "tardis");
+    assertEquals(iter.next(), "users");
+    
+    acl = new AccessControlList("drwho,joe tardis, users");
+    users = acl.getUsers();
+    assertEquals(users.size(), 2);
+    iter = users.iterator();
+    assertEquals(iter.next(), "drwho");
+    assertEquals(iter.next(), "joe");
+    groups = acl.getGroups();
+    assertEquals(groups.size(), 2);
+    iter = groups.iterator();
+    assertEquals(iter.next(), "tardis");
+    assertEquals(iter.next(), "users");
+  }
+}

Added: hadoop/core/trunk/src/test/org/apache/hadoop/security/authorize/HadoopPolicyProvider.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/security/authorize/HadoopPolicyProvider.java?rev=725603&view=auto
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/security/authorize/HadoopPolicyProvider.java (added)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/security/authorize/HadoopPolicyProvider.java Wed Dec 10 23:21:13 2008
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.authorize;
+
+import org.apache.hadoop.hdfs.HDFSPolicyProvider;
+import org.apache.hadoop.mapred.MapReducePolicyProvider;
+
+public class HadoopPolicyProvider extends PolicyProvider {
+
+  @Override
+  public Service[] getServices() {
+    Service[] hdfsServices = new HDFSPolicyProvider().getServices();
+    Service[] mrServices = new MapReducePolicyProvider().getServices();
+    
+    Service[] hadoopServices = 
+      new Service[hdfsServices.length + mrServices.length];
+    System.arraycopy(hdfsServices, 0, hadoopServices, 0, hdfsServices.length);
+    System.arraycopy(mrServices, 0, hadoopServices, hdfsServices.length, 
+                     mrServices.length);
+
+    return hadoopServices;
+  }
+
+}

Added: hadoop/core/trunk/src/test/org/apache/hadoop/security/authorize/TestConfiguredPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/security/authorize/TestConfiguredPolicy.java?rev=725603&view=auto
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/security/authorize/TestConfiguredPolicy.java (added)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/security/authorize/TestConfiguredPolicy.java Wed Dec 10 23:21:13 2008
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.authorize;
+
+import java.security.Permission;
+
+import javax.security.auth.Subject;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.SecurityUtil.AccessControlList;
+
+import junit.framework.TestCase;
+
+public class TestConfiguredPolicy extends TestCase {
+  private static final String USER1 = "drwho";
+  private static final String USER2 = "joe";
+  private static final String[] GROUPS1 = new String[]{"tardis"};
+  private static final String[] GROUPS2 = new String[]{"users"};
+  
+  private static final String KEY_1 = "test.policy.1";
+  private static final String KEY_2 = "test.policy.2";
+  
+  public static class Protocol1 {
+    int i;
+  }
+  public static class Protocol2 {
+    int j;
+  }
+  
+  private static class TestPolicyProvider extends PolicyProvider {
+    @Override
+    public Service[] getServices() {
+      return new Service[] {
+          new Service(KEY_1, Protocol1.class),
+          new Service(KEY_2, Protocol2.class),
+          };
+    }
+  }
+  
+  public void testConfiguredPolicy() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(KEY_1, AccessControlList.WILDCARD_ACL_VALUE);
+    conf.set(KEY_2, USER1 + " " + GROUPS1[0]);
+    
+    ConfiguredPolicy policy = new ConfiguredPolicy(conf, new TestPolicyProvider());
+    SecurityUtil.setPolicy(policy);
+    
+    Subject user1 = 
+      SecurityUtil.getSubject(new UnixUserGroupInformation(USER1, GROUPS1));
+
+    // Should succeed
+    ServiceAuthorizationManager.authorize(user1, Protocol1.class);
+    
+    // Should fail
+    Subject user2 = 
+      SecurityUtil.getSubject(new UnixUserGroupInformation(USER2, GROUPS2));
+    boolean failed = false;
+    try {
+      ServiceAuthorizationManager.authorize(user2, Protocol2.class);
+    } catch (AuthorizationException ae) {
+      failed = true;
+    }
+    assertTrue(failed);
+  }
+}

Added: hadoop/core/trunk/src/test/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java?rev=725603&view=auto
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java (added)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java Wed Dec 10 23:21:13 2008
@@ -0,0 +1,152 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.authorize;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.HDFSPolicyProvider;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.MiniMRCluster;
+import org.apache.hadoop.mapred.TestMiniMRWithDFS;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.util.StringUtils;
+
+import junit.framework.TestCase;
+
+public class TestServiceLevelAuthorization extends TestCase {
+  public void testServiceLevelAuthorization() throws Exception {
+    MiniDFSCluster dfs = null;
+    MiniMRCluster mr = null;
+    FileSystem fileSys = null;
+    try {
+      final int slaves = 4;
+
+      // Turn on service-level authorization
+      Configuration conf = new Configuration();
+      conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
+                    HadoopPolicyProvider.class, PolicyProvider.class);
+      conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
+                      true);
+      
+      // Start the mini clusters
+      dfs = new MiniDFSCluster(conf, slaves, true, null);
+      fileSys = dfs.getFileSystem();
+      JobConf mrConf = new JobConf(conf);
+      mr = new MiniMRCluster(slaves, fileSys.getUri().toString(), 1, 
+                             null, null, mrConf);
+
+      // Run examples
+      TestMiniMRWithDFS.runPI(mr, mr.createJobConf(mrConf));
+      TestMiniMRWithDFS.runWordCount(mr, mr.createJobConf(mrConf));
+    } finally {
+      if (dfs != null) { dfs.shutdown(); }
+      if (mr != null) { mr.shutdown();
+      }
+    }
+  }
+  
+  private static final String DUMMY_ACL = "nouser nogroup";
+  private static final String UNKNOWN_USER = "dev,null";
+  
+  private void rewriteHadoopPolicyFile(File policyFile) throws IOException {
+    FileWriter fos = new FileWriter(policyFile);
+    PolicyProvider policyProvider = new HDFSPolicyProvider();
+    fos.write("<configuration>\n");
+    for (Service service : policyProvider.getServices()) {
+      String key = service.getServiceKey();
+      String value ="*";
+      if (key.equals("security.refresh.policy.protocol.acl")) {
+        value = DUMMY_ACL;
+      }
+      fos.write("<property><name>"+ key + "</name><value>" + value + 
+                "</value></property>\n");
+      System.err.println("<property><name>"+ key + "</name><value>" + value + 
+          "</value></property>\n");
+    }
+    fos.write("</configuration>\n");
+    fos.close();
+  }
+  
+  private void refreshPolicy(Configuration conf)  throws IOException {
+    DFSAdmin dfsAdmin = new DFSAdmin(conf);
+    dfsAdmin.refreshServiceAcl();
+  }
+  
+  public void testRefresh() throws Exception {
+    MiniDFSCluster dfs = null;
+    try {
+      final int slaves = 4;
+
+      // Turn on service-level authorization
+      Configuration conf = new Configuration();
+      conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
+                    HDFSPolicyProvider.class, PolicyProvider.class);
+      conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
+                      true);
+      
+      // Start the mini dfs cluster
+      dfs = new MiniDFSCluster(conf, slaves, true, null);
+
+      // Refresh the service level authorization policy
+      refreshPolicy(conf);
+      
+      // Simulate an 'edit' of hadoop-policy.xml
+      String confDir = System.getProperty("test.build.extraconf", 
+                                          "build/test/extraconf");
+      File policyFile = new File(confDir, ConfiguredPolicy.HADOOP_POLICY_FILE);
+      String policyFileCopy = ConfiguredPolicy.HADOOP_POLICY_FILE + ".orig";
+      FileUtil.copy(policyFile, FileSystem.getLocal(conf),   // first save original 
+                    new Path(confDir, policyFileCopy), false, conf);
+      rewriteHadoopPolicyFile(                               // rewrite the file
+          new File(confDir, ConfiguredPolicy.HADOOP_POLICY_FILE));
+      
+      // Refresh the service level authorization policy
+      refreshPolicy(conf);
+      
+      // Refresh the service level authorization policy once again, 
+      // this time it should fail!
+      try {
+        // Note: hadoop-policy.xml for tests has 
+        // security.refresh.policy.protocol.acl = ${user.name}
+        conf.set(UnixUserGroupInformation.UGI_PROPERTY_NAME, UNKNOWN_USER);
+        refreshPolicy(conf);
+        fail("Refresh of NameNode's policy file cannot be successful!");
+      } catch (RemoteException re) {
+        System.out.println("Good, refresh worked... refresh failed with: " + 
+                           StringUtils.stringifyException(re.unwrapRemoteException()));
+      } finally {
+        // Reset to original hadoop-policy.xml
+        FileUtil.fullyDelete(new File(confDir, 
+            ConfiguredPolicy.HADOOP_POLICY_FILE));
+        FileUtil.replaceFile(new File(confDir, policyFileCopy), new File(confDir, ConfiguredPolicy.HADOOP_POLICY_FILE));
+      }
+    } finally {
+      if (dfs != null) { dfs.shutdown(); }
+    }
+  }
+
+}



Mime
View raw message