hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From tomwh...@apache.org
Subject svn commit: r1002905 - in /hadoop/mapreduce/trunk: CHANGES.txt src/java/org/apache/hadoop/mapred/JobTracker.java src/java/org/apache/hadoop/mapred/TaskTracker.java src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java
Date Thu, 30 Sep 2010 00:13:13 GMT
Author: tomwhite
Date: Thu Sep 30 00:13:13 2010
New Revision: 1002905

URL: http://svn.apache.org/viewvc?rev=1002905&view=rev
Log:
MAPREDUCE-2067.  Distinct minicluster services (e.g. NN and JT) overwrite each other's service
policies.  Contributed by Aaron T. Myers.

Modified:
    hadoop/mapreduce/trunk/CHANGES.txt
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java

Modified: hadoop/mapreduce/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/CHANGES.txt?rev=1002905&r1=1002904&r2=1002905&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/CHANGES.txt (original)
+++ hadoop/mapreduce/trunk/CHANGES.txt Thu Sep 30 00:13:13 2010
@@ -310,6 +310,9 @@ Trunk (unreleased changes)
     MAPREDUCE-1989. Fixes error message in gridmix when user resolver is set
     and no user list is given. (Ravi Gummadi via amareshwari)
 
+    MAPREDUCE-2067.  Distinct minicluster services (e.g. NN and JT) overwrite
+    each other's service policies.  (Aaron T. Myers via tomwhite)
+
 Release 0.21.0 - Unreleased
 
   INCOMPATIBLE CHANGES

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java?rev=1002905&r1=1002904&r2=1002905&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java Thu Sep 30 00:13:13
2010
@@ -58,6 +58,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -1451,12 +1452,6 @@ public class JobTracker implements MRCon
       = conf.getClass(JT_TASK_SCHEDULER,
           JobQueueTaskScheduler.class, TaskScheduler.class);
     taskScheduler = (TaskScheduler) ReflectionUtils.newInstance(schedulerClass, conf);
-                                           
-    // Set service-level authorization security policy
-    if (conf.getBoolean(
-          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
-      ServiceAuthorizationManager.refresh(conf, new MapReducePolicyProvider());
-    }
     
     int handlerCount = conf.getInt(JT_IPC_HANDLER_COUNT, 10);
     this.interTrackerServer = RPC.getServer(ClientProtocol.class,
@@ -1464,6 +1459,13 @@ public class JobTracker implements MRCon
                                             addr.getHostName(), 
                                             addr.getPort(), handlerCount, 
                                             false, conf, secretManager);
+
+    // Set service-level authorization security policy
+    if (conf.getBoolean(
+        CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
+      this.interTrackerServer.refreshServiceAcl(conf, new MapReducePolicyProvider());
+    }
+
     if (LOG.isDebugEnabled()) {
       Properties p = System.getProperties();
       for (Iterator it = p.keySet().iterator(); it.hasNext();) {
@@ -4353,10 +4355,10 @@ public class JobTracker implements MRCon
   @Override
   public void refreshServiceAcl() throws IOException {
     if (!conf.getBoolean(
-            ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
+        CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
       throw new AuthorizationException("Service Level Authorization not enabled!");
     }
-    ServiceAuthorizationManager.refresh(conf, new MapReducePolicyProvider());
+    this.interTrackerServer.refreshServiceAcl(conf, new MapReducePolicyProvider());
   }
 
   @Override

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java?rev=1002905&r1=1002904&r2=1002905&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java Thu Sep 30 00:13:13
2010
@@ -57,6 +57,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DF;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -100,7 +101,6 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.mapreduce.util.ConfigUtil;
 import org.apache.hadoop.mapreduce.util.MemoryCalculatorPlugin;
@@ -642,24 +642,25 @@ public class TaskTracker 
     
     this.jvmManager = new JvmManager(this);
 
+    // RPC initialization
+    int max = maxMapSlots > maxReduceSlots ?
+                       maxMapSlots : maxReduceSlots;
+    //set the num handlers to max*2 since canCommit may wait for the duration
+    //of a heartbeat RPC
+    this.taskReportServer = RPC.getServer(this.getClass(), this, bindAddress,
+        tmpPort, 2 * max, false, this.fConf, this.jobTokenSecretManager);
+
     // Set service-level authorization security policy
     if (this.fConf.getBoolean(
-          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
+        CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
       PolicyProvider policyProvider = 
         (PolicyProvider)(ReflectionUtils.newInstance(
             this.fConf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
                 MapReducePolicyProvider.class, PolicyProvider.class), 
             this.fConf));
-      ServiceAuthorizationManager.refresh(fConf, policyProvider);
+      this.taskReportServer.refreshServiceAcl(fConf, policyProvider);
     }
-    
-    // RPC initialization
-    int max = maxMapSlots > maxReduceSlots ? 
-                       maxMapSlots : maxReduceSlots;
-    //set the num handlers to max*2 since canCommit may wait for the duration
-    //of a heartbeat RPC
-    this.taskReportServer = RPC.getServer(this.getClass(), this, bindAddress,
-        tmpPort, 2 * max, false, this.fConf, this.jobTokenSecretManager);
+
     this.taskReportServer.start();
 
     // get the assigned address

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java?rev=1002905&r1=1002904&r2=1002905&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java
(original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java
Thu Sep 30 00:13:13 2010
@@ -21,6 +21,7 @@ import java.io.File;
 import java.io.FileWriter;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -28,6 +29,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.mapred.JobConf;
@@ -55,10 +57,35 @@ public class TestServiceLevelAuthorizati
       
       // Start the mini clusters
       dfs = new MiniDFSCluster(conf, slaves, true, null);
+
+      // Ensure that the protocols authorized on the name node are only the HDFS protocols.
+      Set<Class<?>> protocolsWithAcls = NameNodeAdapter.getRpcServer(dfs.getNameNode())
+          .getServiceAuthorizationManager().getProtocolsWithAcls();
+      Service[] hdfsServices = new HDFSPolicyProvider().getServices();
+      for (Service service : hdfsServices) {
+        if (!protocolsWithAcls.contains(service.getProtocol()))
+          fail("service authorization manager has no entry for protocol " + service.getProtocol());
+      }
+      if (hdfsServices.length != protocolsWithAcls.size())
+        fail("there should be an entry for every HDFS service in the protocols with ACLs
map");
+
       fileSys = dfs.getFileSystem();
       JobConf mrConf = new JobConf(conf);
       mr = new MiniMRCluster(slaves, fileSys.getUri().toString(), 1, 
                              null, null, mrConf);
+
+      // Ensure that the protocols configured for the name node did not change
+      // when the MR cluster was started.
+      protocolsWithAcls = NameNodeAdapter.getRpcServer(dfs.getNameNode())
+          .getServiceAuthorizationManager().getProtocolsWithAcls();
+      hdfsServices = new HDFSPolicyProvider().getServices();
+      for (Service service : hdfsServices) {
+        if (!protocolsWithAcls.contains(service.getProtocol()))
+          fail("service authorization manager has no entry for protocol " + service.getProtocol());
+      }
+      if (hdfsServices.length != protocolsWithAcls.size())
+        fail("there should be an entry for every HDFS service in the protocols with ACLs
map");
+
       // make cleanup inline sothat validation of existence of these directories
       // can be done
       mr.setInlineCleanupThreads();



Mime
View raw message