hadoop-yarn-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From w...@apache.org
Subject svn commit: r1524865 [8/8] - in /hadoop/common/branches/HDFS-4949/hadoop-yarn-project: ./ hadoop-yarn/bin/ hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protoc...
Date Thu, 19 Sep 2013 23:42:29 GMT
Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java?rev=1524865&r1=1524864&r2=1524865&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
(original)
+++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
Thu Sep 19 23:42:10 2013
@@ -51,13 +51,15 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestCapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
-import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.After;
 import org.junit.Before;
@@ -414,7 +416,14 @@ public class TestFifoScheduler {
     
     LOG.info("--- END: testFifoScheduler ---");
   }
-  
+
+  @Test
+  public void testConcurrentAccessOnApplications() throws Exception {
+    FifoScheduler fs = new FifoScheduler();
+    TestCapacityScheduler.verifyConcurrentAccessOnApplications(
+        fs.applications, FiCaSchedulerApp.class);
+  }
+
   private void checkApplicationResourceUsage(int expected, 
       Application application) {
     Assert.assertEquals(expected, application.getUsedResources().getMemory());

Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java?rev=1524865&r1=1524864&r2=1524865&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java
(original)
+++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java
Thu Sep 19 23:42:10 2013
@@ -38,6 +38,8 @@ import org.apache.hadoop.yarn.api.protoc
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
@@ -46,6 +48,8 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MyContainerManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.junit.Assert;
 import org.junit.Test;
@@ -80,6 +84,7 @@ public class TestAMRMTokens {
    * 
    * @throws Exception
    */
+  @SuppressWarnings("unchecked")
   @Test
   public void testTokenExpiry() throws Exception {
 
@@ -134,6 +139,20 @@ public class TestAMRMTokens {
       finishAMRequest.setTrackingUrl("url");
       rmClient.finishApplicationMaster(finishAMRequest);
 
+      // Send RMAppAttemptEventType.CONTAINER_FINISHED to transit RMAppAttempt
+      // from Finishing state to Finished State. Both AMRMToken and
+      // ClientToAMToken will be removed.
+      ContainerStatus containerStatus =
+          BuilderUtils.newContainerStatus(attempt.getMasterContainer().getId(),
+              ContainerState.COMPLETE,
+              "AM Container Finished", 0);
+      rm.getRMContext()
+          .getDispatcher()
+          .getEventHandler()
+          .handle(
+              new RMAppAttemptContainerFinishedEvent(applicationAttemptId,
+                  containerStatus));
+
       // Now simulate trying to allocate. RPC call itself should throw auth
       // exception.
       rpc.stopProxy(rmClient, conf); // To avoid using cached client

Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java?rev=1524865&r1=1524864&r2=1524865&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java
(original)
+++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java
Thu Sep 19 23:42:10 2013
@@ -115,7 +115,6 @@ public class TestClientToAMTokens {
     private final byte[] secretKey;
     private InetSocketAddress address;
     private boolean pinged = false;
-    private ClientToAMTokenSecretManager secretManager;
     
     public CustomAM(ApplicationAttemptId appId, byte[] secretKey) {
       super("CustomAM");
@@ -132,12 +131,14 @@ public class TestClientToAMTokens {
     protected void serviceStart() throws Exception {
       Configuration conf = getConfig();
 
-      secretManager = new ClientToAMTokenSecretManager(this.appAttemptId, secretKey);
       Server server;
       try {
         server =
-            new RPC.Builder(conf).setProtocol(CustomProtocol.class)
-              .setNumHandlers(1).setSecretManager(secretManager)
+            new RPC.Builder(conf)
+              .setProtocol(CustomProtocol.class)
+              .setNumHandlers(1)
+              .setSecretManager(
+                new ClientToAMTokenSecretManager(this.appAttemptId, secretKey))
               .setInstance(this).build();
       } catch (Exception e) {
         throw new YarnRuntimeException(e);
@@ -146,14 +147,10 @@ public class TestClientToAMTokens {
       this.address = NetUtils.getConnectAddress(server);
       super.serviceStart();
     }
-    
-    public ClientToAMTokenSecretManager getClientToAMTokenSecretManager() {
-      return this.secretManager;
-    }
   }
 
   @Test
-  public void testClientToAMs() throws Exception {
+  public void testClientToAMTokens() throws Exception {
 
     final Configuration conf = new Configuration();
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
@@ -204,7 +201,7 @@ public class TestClientToAMTokens {
     GetApplicationReportResponse reportResponse =
         rm.getClientRMService().getApplicationReport(request);
     ApplicationReport appReport = reportResponse.getApplicationReport();
-    org.apache.hadoop.yarn.api.records.Token clientToAMToken =
+    org.apache.hadoop.yarn.api.records.Token originalClientToAMToken =
         appReport.getClientToAMToken();
 
     ApplicationAttemptId appAttempt = app.getCurrentAppAttempt().getAppAttemptId();
@@ -259,17 +256,47 @@ public class TestClientToAMTokens {
       Assert.assertFalse(am.pinged);
     }
 
-    // Verify denial for a malicious user
-    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("me");
     Token<ClientToAMTokenIdentifier> token =
-        ConverterUtils.convertFromYarn(clientToAMToken, am.address);
+        ConverterUtils.convertFromYarn(originalClientToAMToken, am.address);
+
+    // Verify denial for a malicious user with tampered ID
+    verifyTokenWithTamperedID(conf, am, token);
+
+    // Verify denial for a malicious user with tampered user-name
+    verifyTokenWithTamperedUserName(conf, am, token);
 
+    // Now for an authenticated user
+    verifyValidToken(conf, am, token);
+  }
+
+  private void verifyTokenWithTamperedID(final Configuration conf,
+      final CustomAM am, Token<ClientToAMTokenIdentifier> token)
+      throws IOException {
     // Malicious user, messes with appId
+    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("me");
     ClientToAMTokenIdentifier maliciousID =
         new ClientToAMTokenIdentifier(BuilderUtils.newApplicationAttemptId(
-          BuilderUtils.newApplicationId(app.getApplicationId()
-            .getClusterTimestamp(), 42), 43));
+          BuilderUtils.newApplicationId(am.appAttemptId.getApplicationId()
+            .getClusterTimestamp(), 42), 43), UserGroupInformation
+          .getCurrentUser().getShortUserName());
+
+    verifyTamperedToken(conf, am, token, ugi, maliciousID);
+  }
+
+  private void verifyTokenWithTamperedUserName(final Configuration conf,
+      final CustomAM am, Token<ClientToAMTokenIdentifier> token)
+      throws IOException {
+    // Malicious user, messes with appId
+    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("me");
+    ClientToAMTokenIdentifier maliciousID =
+        new ClientToAMTokenIdentifier(am.appAttemptId, "evilOrc");
 
+    verifyTamperedToken(conf, am, token, ugi, maliciousID);
+  }
+
+  private void verifyTamperedToken(final Configuration conf, final CustomAM am,
+      Token<ClientToAMTokenIdentifier> token, UserGroupInformation ugi,
+      ClientToAMTokenIdentifier maliciousID) {
     Token<ClientToAMTokenIdentifier> maliciousToken =
         new Token<ClientToAMTokenIdentifier>(maliciousID.getBytes(),
           token.getPassword(), token.getKind(),
@@ -309,8 +336,12 @@ public class TestClientToAMTokens {
               + "Mismatched response."));
       Assert.assertFalse(am.pinged);
     }
+  }
 
-    // Now for an authenticated user
+  private void verifyValidToken(final Configuration conf, final CustomAM am,
+      Token<ClientToAMTokenIdentifier> token) throws IOException,
+      InterruptedException {
+    UserGroupInformation ugi;
     ugi = UserGroupInformation.createRemoteUser("me");
     ugi.addToken(token);
 
@@ -326,5 +357,4 @@ public class TestClientToAMTokens {
       }
     });
   }
-
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java?rev=1524865&r1=1524864&r2=1524865&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
(original)
+++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
Thu Sep 19 23:42:10 2013
@@ -25,8 +25,10 @@ import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.Collections;
@@ -48,9 +50,12 @@ import org.apache.hadoop.security.token.
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenRenewer;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
+import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.junit.After;
 import org.junit.Before;
@@ -141,6 +146,13 @@ public class TestDelegationTokenRenewer 
     Renewer.reset();
     delegationTokenRenewer = new DelegationTokenRenewer();
     delegationTokenRenewer.init(conf);
+    RMContext mockContext = mock(RMContext.class);
+    ClientRMService mockClientRMService = mock(ClientRMService.class);
+    when(mockContext.getClientRMService()).thenReturn(mockClientRMService);
+    InetSocketAddress sockAddr =
+        InetSocketAddress.createUnresolved("localhost", 1234);
+    when(mockClientRMService.getBindAddress()).thenReturn(sockAddr);
+    delegationTokenRenewer.setRMContext(mockContext);
     delegationTokenRenewer.start();
   }
   
@@ -454,6 +466,13 @@ public class TestDelegationTokenRenewer 
         YarnConfiguration.RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS,
         1000l);
     localDtr.init(lconf);
+    RMContext mockContext = mock(RMContext.class);
+    ClientRMService mockClientRMService = mock(ClientRMService.class);
+    when(mockContext.getClientRMService()).thenReturn(mockClientRMService);
+    InetSocketAddress sockAddr =
+        InetSocketAddress.createUnresolved("localhost", 1234);
+    when(mockClientRMService.getBindAddress()).thenReturn(sockAddr);
+    localDtr.setRMContext(mockContext);
     localDtr.start();
     
     MyFS dfs = (MyFS)FileSystem.get(lconf);
@@ -511,6 +530,13 @@ public class TestDelegationTokenRenewer 
         YarnConfiguration.RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS,
         1000l);
     localDtr.init(lconf);
+    RMContext mockContext = mock(RMContext.class);
+    ClientRMService mockClientRMService = mock(ClientRMService.class);
+    when(mockContext.getClientRMService()).thenReturn(mockClientRMService);
+    InetSocketAddress sockAddr =
+        InetSocketAddress.createUnresolved("localhost", 1234);
+    when(mockClientRMService.getBindAddress()).thenReturn(sockAddr);
+    localDtr.setRMContext(mockContext);
     localDtr.start();
     
     MyFS dfs = (MyFS)FileSystem.get(lconf);
@@ -550,7 +576,7 @@ public class TestDelegationTokenRenewer 
     } catch (InvalidToken ite) {}
   }
   
-  @Test(timeout=2000)
+  @Test(timeout=20000)
   public void testConncurrentAddApplication()
       throws IOException, InterruptedException, BrokenBarrierException {
     final CyclicBarrier startBarrier = new CyclicBarrier(2);
@@ -579,6 +605,13 @@ public class TestDelegationTokenRenewer 
     // fire up the renewer
     final DelegationTokenRenewer dtr = new DelegationTokenRenewer();
     dtr.init(conf);
+    RMContext mockContext = mock(RMContext.class);
+    ClientRMService mockClientRMService = mock(ClientRMService.class);
+    when(mockContext.getClientRMService()).thenReturn(mockClientRMService);
+    InetSocketAddress sockAddr =
+        InetSocketAddress.createUnresolved("localhost", 1234);
+    when(mockClientRMService.getBindAddress()).thenReturn(sockAddr);
+    dtr.setRMContext(mockContext);
     dtr.start();
     
     // submit a job that blocks during renewal

Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java?rev=1524865&r1=1524864&r2=1524865&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
(original)
+++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
Thu Sep 19 23:42:10 2013
@@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.webapp.Web
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
@@ -109,6 +110,16 @@ public class TestRMWebServices extends J
         .contextPath("jersey-guice-filter").servletPath("/").build());
   }
 
+  @BeforeClass
+  public static void initClusterMetrics() {
+    ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
+    clusterMetrics.incrDecommisionedNMs();
+    clusterMetrics.incrNumActiveNodes();
+    clusterMetrics.incrNumLostNMs();
+    clusterMetrics.incrNumRebootedNMs();
+    clusterMetrics.incrNumUnhealthyNMs();
+  }
+
   @Test
   public void testInfoXML() throws JSONException, Exception {
     WebResource r = resource();
@@ -426,7 +437,8 @@ public class TestRMWebServices extends J
         "totalNodes doesn't match",
         clusterMetrics.getNumActiveNMs() + clusterMetrics.getNumLostNMs()
             + clusterMetrics.getNumDecommisionedNMs()
-            + clusterMetrics.getNumRebootedNMs(), totalNodes);
+            + clusterMetrics.getNumRebootedNMs()
+            + clusterMetrics.getUnhealthyNMs(), totalNodes);
     assertEquals("lostNodes doesn't match", clusterMetrics.getNumLostNMs(),
         lostNodes);
     assertEquals("unhealthyNodes doesn't match",

Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java?rev=1524865&r1=1524864&r2=1524865&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
(original)
+++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
Thu Sep 19 23:42:10 2013
@@ -68,6 +68,7 @@ import com.sun.jersey.api.client.ClientR
 import com.sun.jersey.api.client.ClientResponse.Status;
 import com.sun.jersey.api.client.UniformInterfaceException;
 import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.core.util.MultivaluedMapImpl;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
@@ -240,6 +241,122 @@ public class TestRMWebServicesApps exten
   }
 
   @Test
+  public void testAppsQueryStates() throws JSONException, Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+    rm.submitApp(1024);
+    RMApp killedApp = rm.submitApp(1024);
+    rm.killApp(killedApp.getApplicationId());
+
+    amNodeManager.nodeHeartbeat(true);
+
+    WebResource r = resource();
+    MultivaluedMapImpl params = new MultivaluedMapImpl();
+    params.add("states", RMAppState.ACCEPTED.toString());
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .path("apps").queryParams(params)
+        .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    JSONObject apps = json.getJSONObject("apps");
+    assertEquals("incorrect number of elements", 1, apps.length());
+    JSONArray array = apps.getJSONArray("app");
+    assertEquals("incorrect number of elements", 1, array.length());
+    assertEquals("state not equal to ACCEPTED", "ACCEPTED", array
+        .getJSONObject(0).getString("state"));
+
+    r = resource();
+    params = new MultivaluedMapImpl();
+    params.add("states", RMAppState.ACCEPTED.toString());
+    params.add("states", RMAppState.KILLED.toString());
+    response = r.path("ws").path("v1").path("cluster")
+        .path("apps").queryParams(params)
+        .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    apps = json.getJSONObject("apps");
+    assertEquals("incorrect number of elements", 1, apps.length());
+    array = apps.getJSONArray("app");
+    assertEquals("incorrect number of elements", 2, array.length());
+    assertTrue("both app states of ACCEPTED and KILLED are not present", 
+        (array.getJSONObject(0).getString("state").equals("ACCEPTED") &&
+        array.getJSONObject(1).getString("state").equals("KILLED")) ||
+        (array.getJSONObject(0).getString("state").equals("KILLED") &&
+        array.getJSONObject(1).getString("state").equals("ACCEPTED")));
+
+    rm.stop();
+  }
+
+  @Test
+  public void testAppsQueryStatesComma() throws JSONException, Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+    rm.submitApp(1024);
+    RMApp killedApp = rm.submitApp(1024);
+    rm.killApp(killedApp.getApplicationId());
+
+    amNodeManager.nodeHeartbeat(true);
+
+    WebResource r = resource();
+    MultivaluedMapImpl params = new MultivaluedMapImpl();
+    params.add("states", RMAppState.ACCEPTED.toString());
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .path("apps").queryParams(params)
+        .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    JSONObject apps = json.getJSONObject("apps");
+    assertEquals("incorrect number of elements", 1, apps.length());
+    JSONArray array = apps.getJSONArray("app");
+    assertEquals("incorrect number of elements", 1, array.length());
+    assertEquals("state not equal to ACCEPTED", "ACCEPTED", array
+        .getJSONObject(0).getString("state"));
+
+    r = resource();
+    params = new MultivaluedMapImpl();
+    params.add("states", RMAppState.ACCEPTED.toString() + ","
+        + RMAppState.KILLED.toString());
+    response = r.path("ws").path("v1").path("cluster")
+        .path("apps").queryParams(params)
+        .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    apps = json.getJSONObject("apps");
+    assertEquals("incorrect number of elements", 1, apps.length());
+    array = apps.getJSONArray("app");
+    assertEquals("incorrect number of elements", 2, array.length());
+    assertTrue("both app states of ACCEPTED and KILLED are not present", 
+        (array.getJSONObject(0).getString("state").equals("ACCEPTED") &&
+        array.getJSONObject(1).getString("state").equals("KILLED")) ||
+        (array.getJSONObject(0).getString("state").equals("KILLED") &&
+        array.getJSONObject(1).getString("state").equals("ACCEPTED")));
+    
+    rm.stop();
+  }
+
+  @Test
+  public void testAppsQueryStatesNone() throws JSONException, Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+    rm.submitApp(1024);
+    amNodeManager.nodeHeartbeat(true);
+    WebResource r = resource();
+
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .path("apps").queryParam("states", RMAppState.RUNNING.toString())
+        .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    assertEquals("apps is not null", JSONObject.NULL, json.get("apps"));
+    rm.stop();
+  }
+
+  @Test
   public void testAppsQueryStateNone() throws JSONException, Exception {
     rm.start();
     MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
@@ -258,6 +375,43 @@ public class TestRMWebServicesApps exten
   }
 
   @Test
+  public void testAppsQueryStatesInvalid() throws JSONException, Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+    rm.submitApp(1024);
+    amNodeManager.nodeHeartbeat(true);
+    WebResource r = resource();
+
+    try {
+      r.path("ws").path("v1").path("cluster").path("apps")
+          .queryParam("states", "INVALID_test")
+          .accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
+      fail("should have thrown exception on invalid state query");
+    } catch (UniformInterfaceException ue) {
+      ClientResponse response = ue.getResponse();
+      assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject msg = response.getEntity(JSONObject.class);
+      JSONObject exception = msg.getJSONObject("RemoteException");
+      assertEquals("incorrect number of elements", 3, exception.length());
+      String message = exception.getString("message");
+      String type = exception.getString("exception");
+      String classname = exception.getString("javaClassName");
+      WebServicesTestUtils.checkStringContains(
+          "exception message",
+          "Invalid application-state INVALID_test",
+          message);
+      WebServicesTestUtils.checkStringMatch("exception type",
+          "BadRequestException", type);
+      WebServicesTestUtils.checkStringMatch("exception classname",
+          "org.apache.hadoop.yarn.webapp.BadRequestException", classname);
+
+    } finally {
+      rm.stop();
+    }
+  }
+
+  @Test
   public void testAppsQueryStateInvalid() throws JSONException, Exception {
     rm.start();
     MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
@@ -280,15 +434,14 @@ public class TestRMWebServicesApps exten
       String message = exception.getString("message");
       String type = exception.getString("exception");
       String classname = exception.getString("javaClassName");
-      WebServicesTestUtils
-          .checkStringContains(
-              "exception message",
-              "org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState.INVALID_test",
-              message);
+      WebServicesTestUtils.checkStringContains(
+          "exception message",
+          "Invalid application-state INVALID_test",
+          message);
       WebServicesTestUtils.checkStringMatch("exception type",
-          "IllegalArgumentException", type);
+          "BadRequestException", type);
       WebServicesTestUtils.checkStringMatch("exception classname",
-          "java.lang.IllegalArgumentException", classname);
+          "org.apache.hadoop.yarn.webapp.BadRequestException", classname);
 
     } finally {
       rm.stop();

Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java?rev=1524865&r1=1524864&r2=1524865&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
(original)
+++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
Thu Sep 19 23:42:10 2013
@@ -53,6 +53,21 @@ import org.apache.hadoop.yarn.server.nod
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService;
 
+/**
+ * Embedded Yarn minicluster for testcases that need to interact with a cluster.
+ * <p/>
+ * In a real cluster, resource request matching is done using the hostname, and
+ * by default Yarn minicluster works in the exact same way as a real cluster.
+ * <p/>
+ * If a testcase needs to use multiple nodes and exercise resource request 
+ * matching to a specific node, then the property 
+ * {@YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME} should be set
+ * <code>true</code> in the configuration used to initialize the minicluster.
+ * <p/>
+ * With this property set to <code>true</code>, the matching will be done using
+ * the <code>hostname:port</code> of the namenodes. In such case, the AM must
+ * do resource request using <code>hostname:port</code> as the location.
+ */
 public class MiniYARNCluster extends CompositeService {
 
   private static final Log LOG = LogFactory.getLog(MiniYARNCluster.class);

Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java?rev=1524865&r1=1524864&r2=1524865&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java
(original)
+++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java
Thu Sep 19 23:42:10 2013
@@ -87,8 +87,9 @@ public class WebAppProxy extends Abstrac
   @Override
   protected void serviceStart() throws Exception {
     try {
-      proxyServer = new HttpServer("proxy", bindAddress, port,
-          port == 0, getConfig(), acl);
+      proxyServer = new HttpServer.Builder().setName("proxy")
+          .setBindAddress(bindAddress).setPort(port).setFindPort(port == 0)
+          .setConf(getConfig()).setACL(acl).build();
       proxyServer.addServlet(ProxyUriUtils.PROXY_SERVLET_NAME, 
           ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class);
       proxyServer.setAttribute(FETCHER_ATTRIBUTE, fetcher);

Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm?rev=1524865&r1=1524864&r2=1524865&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm
(original)
+++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm
Thu Sep 19 23:42:10 2013
@@ -25,8 +25,7 @@ Hadoop MapReduce Next Generation - Fair 
 * {Purpose} 
 
   This document describes the <<<FairScheduler>>>, a pluggable scheduler
for Hadoop 
-  which provides a way to share large clusters. <<NOTE:>> The Fair Scheduler

-  implementation is currently under development and should be considered experimental.
+  that allows YARN applications to share resources in large clusters fairly.
 
 * {Introduction}
 
@@ -47,7 +46,7 @@ Hadoop MapReduce Next Generation - Fair 
 
   The scheduler organizes apps further into "queues", and shares resources
   fairly between these queues. By default, all users share a single queue,
-  called “default”. If an app specifically lists a queue in a container resource
+  named “default”. If an app specifically lists a queue in a container resource
   request, the request is submitted to that queue. It is also possible to assign
   queues based on the user name included with the request through
   configuration. Within each queue, a scheduling policy is used to share
@@ -85,7 +84,7 @@ Hadoop MapReduce Next Generation - Fair 
   their parents in the fair scheduler configuration file.
   
   A queue's name starts with the names of its parents, with periods as
-  separators. So a queue named "queue1" under the root named, would be referred
+  separators. So a queue named "queue1" under the root queue, would be referred
   to as "root.queue1", and a queue named "queue2" under a queue named "parent1"
   would be referred to as "root.parent1.queue2". When referring to queues, the
   root part of the name is optional, so queue1 could be referred to as just
@@ -118,22 +117,23 @@ Hadoop MapReduce Next Generation - Fair 
 
   Customizing the Fair Scheduler typically involves altering two files. First, 
   scheduler-wide options can be set by adding configuration properties in the 
-  fair-scheduler.xml file in your existing configuration directory. Second, in 
+  yarn-site.xml file in your existing configuration directory. Second, in 
   most cases users will want to create a manifest file listing which queues 
   exist and their respective weights and capacities. The location of this file 
-  is flexible - but it must be declared in fair-scheduler.xml. 
+  is flexible - but it must be declared in yarn-site.xml.
 
  * <<<yarn.scheduler.fair.allocation.file>>>
 
    * Path to allocation file. An allocation file is an XML manifest describing
      queues and their properties, in addition to certain policy defaults. This file
      must be in XML format as described in the next section.
+     Defaults to fair-scheduler.xml in configuration directory.
 
  * <<<yarn.scheduler.fair.user-as-default-queue>>>
 
     * Whether to use the username associated with the allocation as the default 
       queue name, in the event that a queue name is not specified. If this is set 
-      to "false" or unset, all jobs have a shared default queue, called "default".
+      to "false" or unset, all jobs have a shared default queue, named "default".
       Defaults to true.
 
  * <<<yarn.scheduler.fair.preemption>>>
@@ -158,7 +158,7 @@ Hadoop MapReduce Next Generation - Fair 
     * If assignmultiple is true, the maximum amount of containers that can be
       assigned in one heartbeat. Defaults to -1, which sets no limit.
 
- * <<<locality.threshold.node>>>
+ * <<<yarn.scheduler.fair.locality.threshold.node>>>
 
     * For applications that request containers on particular nodes, the number of
       scheduling opportunities since the last container assignment to wait before
@@ -167,7 +167,7 @@ Hadoop MapReduce Next Generation - Fair 
       opportunities to pass up. The default value of -1.0 means don't pass up any
       scheduling opportunities.
 
- * <<<locality.threshold.rack>>>
+ * <<<yarn.scheduler.fair.locality.threshold.rack>>>
 
     * For applications that request containers on particular racks, the number of
       scheduling opportunities since the last container assignment to wait before
@@ -178,14 +178,15 @@ Hadoop MapReduce Next Generation - Fair 
 
 Allocation file format
 
-  The allocation file must be in XML format. The format contains four types of
+  The allocation file must be in XML format. The format contains five types of
   elements:
 
  * <<Queue elements>>, which represent queues. Each may contain the following
      properties:
 
    * minResources: minimum resources the queue is entitled to, in the form
-     "X mb, Y vcores". If a queue's minimum share is not satisfied, it will be
+     "X mb, Y vcores". For the single-resource fairness policy, the vcores
+     value is ignored. If a queue's minimum share is not satisfied, it will be
      offered available resources before any other queue under the same parent.
      Under the single-resource fairness policy, a queue
      is considered unsatisfied if its memory usage is below its minimum memory
@@ -199,7 +200,8 @@ Allocation file format
      may be using those resources.
 
    * maxResources: maximum resources a queue is allowed, in the form
-     "X mb, Y vcores". A queue will never be assigned a container that would
+     "X mb, Y vcores". For the single-resource fairness policy, the vcores
+     value is ignored. A queue will never be assigned a container that would
      put its aggregate usage over this limit.
 
    * maxRunningApps: limit the number of apps from the queue to run at once
@@ -234,19 +236,23 @@ Allocation file format
    its fair share before it will try to preempt containers to take resources from
    other queues.
 
+ * <<A defaultQueueSchedulingPolicy element>>, which sets the default scheduling

+   policy for queues; overriden by the schedulingPolicy element in each queue
+   if specified. Defaults to "fair".
+
   An example allocation file is given here:
 
 ---
 <?xml version="1.0"?>
 <allocations>
   <queue name="sample_queue">
-    <minResources>10000 mb</minResources>
-    <maxResources>90000 mb</maxResources>
+    <minResources>10000 mb,0vcores</minResources>
+    <maxResources>90000 mb,0vcores</maxResources>
     <maxRunningApps>50</maxRunningApps>
     <weight>2.0</weight>
     <schedulingPolicy>fair</schedulingPolicy>
     <queue name="sample_sub_queue">
-      <minResources>5000 mb</minResources>
+      <minResources>5000 mb,0vcores</minResources>
     </queue>
   </queue>
   <user name="sample_user">

Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm?rev=1524865&r1=1524864&r2=1524865&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
(original)
+++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
Thu Sep 19 23:42:10 2013
@@ -1107,10 +1107,11 @@ ResourceManager REST API's.
 
 ** Query Parameters Supported
 
-  Multiple paramters can be specified.  The started and finished times have a begin and end
parameter to allow you to specify ranges.  For example, one could request all applications
that started between 1:00am and 2:00pm on 12/19/2011 with startedTimeBegin=1324256400&startedTimeEnd=1324303200.
If the Begin parameter is not specfied, it defaults to 0, and if the End parameter is not
specified, it defaults to infinity.
+  Multiple parameters can be specified.  The started and finished times have a begin and
end parameter to allow you to specify ranges.  For example, one could request all applications
that started between 1:00am and 2:00pm on 12/19/2011 with startedTimeBegin=1324256400&startedTimeEnd=1324303200.
If the Begin parameter is not specified, it defaults to 0, and if the End parameter is not
specified, it defaults to infinity.
 
 ------
-  * state - state of the application 
+  * state [deprecated] - state of the application
+  * states - applications matching the given application states, specified as a comma-separated
list.
   * finalStatus - the final status of the application - reported by the application itself
   * user - user name
   * queue - queue name



Mime
View raw message