Return-Path: X-Original-To: apmail-hadoop-yarn-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-yarn-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 8D1D2100D1 for ; Thu, 17 Oct 2013 05:34:57 +0000 (UTC) Received: (qmail 97749 invoked by uid 500); 17 Oct 2013 05:34:37 -0000 Delivered-To: apmail-hadoop-yarn-commits-archive@hadoop.apache.org Received: (qmail 97616 invoked by uid 500); 17 Oct 2013 05:34:32 -0000 Mailing-List: contact yarn-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: yarn-commits@hadoop.apache.org Delivered-To: mailing list yarn-commits@hadoop.apache.org Received: (qmail 97360 invoked by uid 99); 17 Oct 2013 05:34:19 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 17 Oct 2013 05:34:19 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED,T_FRT_OPPORTUN1 X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 17 Oct 2013 05:34:15 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id A78A72388CA6; Thu, 17 Oct 2013 05:33:14 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1532967 [8/9] - in /hadoop/common/branches/HDFS-4949/hadoop-yarn-project: ./ hadoop-yarn/bin/ hadoop-yarn/conf/ hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ hadoop-yarn/hadoop-yarn-api/src/main/java... Date: Thu, 17 Oct 2013 05:33:06 -0000 To: yarn-commits@hadoop.apache.org From: wang@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20131017053314.A78A72388CA6@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java?rev=1532967&r1=1532966&r2=1532967&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java Thu Oct 17 05:32:42 2013 @@ -151,7 +151,8 @@ public class TestLeafQueue { // Define top-level queues conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {newRoot}); conf.setMaximumCapacity(CapacitySchedulerConfiguration.ROOT, 100); - conf.setAcl(CapacitySchedulerConfiguration.ROOT, QueueACL.SUBMIT_APPLICATIONS, " "); + conf.setAcl(CapacitySchedulerConfiguration.ROOT, + QueueACL.SUBMIT_APPLICATIONS, " "); final String Q_newRoot = CapacitySchedulerConfiguration.ROOT + "." + newRoot; conf.setQueues(Q_newRoot, new String[] {A, B, C, D, E}); @@ -282,8 +283,9 @@ public class TestLeafQueue { // Setup some nodes String host_0 = "127.0.0.1"; - FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB); - + FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, + 8*GB); + final int numNodes = 1; Resource clusterResource = Resources.createResource(numNodes * (8*GB), numNodes * 16); @@ -293,13 +295,15 @@ public class TestLeafQueue { Priority priority = TestUtils.createMockPriority(1); app_0.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 3, true, - priority, recordFactory)), null, null); + priority, recordFactory))); // Start testing... // Only 1 container a.assignContainers(clusterResource, node_0); - assertEquals(6*GB, a.getMetrics().getAvailableMB()); + assertEquals( + (int)(node_0.getTotalResource().getMemory() * a.getCapacity()) - (1*GB), + a.getMetrics().getAvailableMB()); } @Test @@ -404,8 +408,9 @@ public class TestLeafQueue { // Setup some nodes String host_0 = "127.0.0.1"; - FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB); - + FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, + 8*GB); + final int numNodes = 1; Resource clusterResource = Resources.createResource(numNodes * (8*GB), numNodes * 16); @@ -415,11 +420,11 @@ public class TestLeafQueue { Priority priority = TestUtils.createMockPriority(1); app_0.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 3, true, - priority, recordFactory)), null, null); + priority, recordFactory))); app_1.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 2, true, - priority, recordFactory)), null, null); + priority, recordFactory))); // Start testing... @@ -492,12 +497,14 @@ public class TestLeafQueue { a.completedContainer(clusterResource, app_1, node_0, rmContainer, null, RMContainerEventType.KILL, null); } + assertEquals(0*GB, a.getUsedResources().getMemory()); assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); assertEquals(0*GB, a.getMetrics().getReservedMB()); assertEquals(0*GB, a.getMetrics().getAllocatedMB()); - assertEquals(1*GB, a.getMetrics().getAvailableMB()); + assertEquals((int)(a.getCapacity() * node_0.getTotalResource().getMemory()), + a.getMetrics().getAvailableMB()); } @Test @@ -548,11 +555,11 @@ public class TestLeafQueue { Priority priority = TestUtils.createMockPriority(1); app_0.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 2*GB, 1, true, - priority, recordFactory)), null, null); + priority, recordFactory))); app_1.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 2, true, - priority, recordFactory)), null, null); + priority, recordFactory))); /** * Start testing... @@ -573,7 +580,7 @@ public class TestLeafQueue { // Pre MAPREDUCE-3732 this test should fail without this block too // app_2.updateResourceRequests(Collections.singletonList( // TestUtils.createResourceRequest(RMNodeImpl.ANY, 1*GB, 1, priority, -// recordFactory))); +// recordFactory))); // 1 container to user_0 a.assignContainers(clusterResource, node_0); @@ -641,11 +648,11 @@ public class TestLeafQueue { Priority priority = TestUtils.createMockPriority(1); app_0.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 2*GB, 1, true, - priority, recordFactory)), null, null); + priority, recordFactory))); app_1.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 2, true, - priority, recordFactory)), null, null); + priority, recordFactory))); /** * Start testing... @@ -680,7 +687,7 @@ public class TestLeafQueue { a.setMaxCapacity(.1f); app_2.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 1, true, - priority, recordFactory)), null, null); + priority, recordFactory))); assertEquals(2, a.getActiveUsersManager().getNumActiveUsers()); // No more to user_0 since he is already over user-limit @@ -697,7 +704,7 @@ public class TestLeafQueue { LOG.info("here"); app_1.updateResourceRequests(Collections.singletonList( // unset TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 0, true, - priority, recordFactory)), null, null); + priority, recordFactory))); assertEquals(1, a.getActiveUsersManager().getNumActiveUsers()); a.assignContainers(clusterResource, node_1); assertEquals(1*GB, app_2.getHeadroom().getMemory()); // hit queue max-cap @@ -758,11 +765,11 @@ public class TestLeafQueue { Priority priority = TestUtils.createMockPriority(1); app_0.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 10, true, - priority, recordFactory)), null, null); + priority, recordFactory))); app_1.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 10, true, - priority, recordFactory)), null, null); + priority, recordFactory))); /** * Start testing... @@ -792,11 +799,11 @@ public class TestLeafQueue { app_2.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 3*GB, 1, true, - priority, recordFactory)), null, null); + priority, recordFactory))); app_3.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 2, true, - priority, recordFactory)), null, null); + priority, recordFactory))); // Now allocations should goto app_2 since // user_0 is at limit inspite of high user-limit-factor @@ -920,11 +927,11 @@ public class TestLeafQueue { Priority priority = TestUtils.createMockPriority(1); app_0.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 2, true, - priority, recordFactory)), null, null); + priority, recordFactory))); app_1.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 4*GB, 1, true, - priority, recordFactory)), null, null); + priority, recordFactory))); // Start testing... @@ -1024,7 +1031,7 @@ public class TestLeafQueue { Priority priority = TestUtils.createMockPriority(1); app_0.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 2*GB, 1, true, - priority, recordFactory)), null, null); + priority, recordFactory))); // Setup app_1 to request a 4GB container on host_0 and // another 4GB container anywhere. @@ -1036,7 +1043,7 @@ public class TestLeafQueue { true, priority, recordFactory)); appRequests_1.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 4*GB, 2, true, priority, recordFactory)); - app_1.updateResourceRequests(appRequests_1, null, null); + app_1.updateResourceRequests(appRequests_1); // Start testing... @@ -1131,11 +1138,11 @@ public class TestLeafQueue { Priority priority = TestUtils.createMockPriority(1); app_0.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 2, true, - priority, recordFactory)), null, null); + priority, recordFactory))); app_1.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 4*GB, 1, true, - priority, recordFactory)), null, null); + priority, recordFactory))); // Start testing... @@ -1260,7 +1267,7 @@ public class TestLeafQueue { app_0_requests_0.add( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 3, // one extra true, priority, recordFactory)); - app_0.updateResourceRequests(app_0_requests_0, null, null); + app_0.updateResourceRequests(app_0_requests_0); // Start testing... CSAssignment assignment = null; @@ -1325,7 +1332,7 @@ public class TestLeafQueue { app_0_requests_0.add( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 2, // one extra true, priority, recordFactory)); - app_0.updateResourceRequests(app_0_requests_0, null, null); + app_0.updateResourceRequests(app_0_requests_0); assertEquals(2, app_0.getTotalRequiredResources(priority)); String host_3 = "127.0.0.4"; // on rack_1 @@ -1416,7 +1423,7 @@ public class TestLeafQueue { TestUtils.createResourceRequest(ResourceRequest.ANY, 2*GB, 1, true, priority_2, recordFactory)); - app_0.updateResourceRequests(app_0_requests_0, null, null); + app_0.updateResourceRequests(app_0_requests_0); // Start testing... @@ -1531,7 +1538,7 @@ public class TestLeafQueue { app_0_requests_0.add( TestUtils.createResourceRequest(rack_1, 1*GB, 1, true, priority, recordFactory)); - app_0.updateResourceRequests(app_0_requests_0, null, null); + app_0.updateResourceRequests(app_0_requests_0); // Start testing... @@ -1540,7 +1547,7 @@ public class TestLeafQueue { app_0_requests_0.add( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 1, // only one true, priority, recordFactory)); - app_0.updateResourceRequests(app_0_requests_0, null, null); + app_0.updateResourceRequests(app_0_requests_0); // NODE_LOCAL - node_0_1 a.assignContainers(clusterResource, node_0_0); @@ -1563,7 +1570,7 @@ public class TestLeafQueue { app_0_requests_0.add( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 1, // only one true, priority, recordFactory)); - app_0.updateResourceRequests(app_0_requests_0, null, null); + app_0.updateResourceRequests(app_0_requests_0); // No allocation on node_0_1 even though it's node/rack local since // required(rack_1) == 0 @@ -1808,8 +1815,8 @@ public class TestLeafQueue { app_0_requests_0.add( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 1, // only one false, priority, recordFactory)); - app_0.updateResourceRequests(app_0_requests_0, - Collections.singletonList(host_0_0), null); + app_0.updateResourceRequests(app_0_requests_0); + app_0.updateBlacklist(Collections.singletonList(host_0_0), null); app_0_requests_0.clear(); // @@ -1849,8 +1856,8 @@ public class TestLeafQueue { app_0_requests_0.add( TestUtils.createResourceRequest(rack_1, 1*GB, 1, true, priority, recordFactory)); - app_0.updateResourceRequests(app_0_requests_0, - Collections.singletonList(host_1_1), null); + app_0.updateResourceRequests(app_0_requests_0); + app_0.updateBlacklist(Collections.singletonList(host_1_1), null); app_0_requests_0.clear(); // resourceName: @@ -1876,7 +1883,8 @@ public class TestLeafQueue { assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should be 0 // Now, remove node_1_1 from blacklist, but add rack_1 to blacklist - app_0.updateResourceRequests(app_0_requests_0, + app_0.updateResourceRequests(app_0_requests_0); + app_0.updateBlacklist( Collections.singletonList(rack_1), Collections.singletonList(host_1_1)); app_0_requests_0.clear(); @@ -1903,8 +1911,8 @@ public class TestLeafQueue { assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should be 0 // Now remove rack_1 from blacklist - app_0.updateResourceRequests(app_0_requests_0, - null, Collections.singletonList(rack_1)); + app_0.updateResourceRequests(app_0_requests_0); + app_0.updateBlacklist(null, Collections.singletonList(rack_1)); app_0_requests_0.clear(); // resourceName: @@ -1936,7 +1944,7 @@ public class TestLeafQueue { app_0_requests_0.add( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 1, // only one false, priority, recordFactory)); - app_0.updateResourceRequests(app_0_requests_0, null, null); + app_0.updateResourceRequests(app_0_requests_0); app_0_requests_0.clear(); // resourceName: Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java?rev=1532967&r1=1532966&r2=1532967&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java Thu Oct 17 05:32:42 2013 @@ -25,11 +25,25 @@ import org.apache.hadoop.yarn.api.record import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue; +import org.apache.hadoop.yarn.util.Clock; import org.junit.Test; import org.mockito.Mockito; public class TestFSSchedulerApp { + private class MockClock implements Clock { + private long time = 0; + @Override + public long getTime() { + return time; + } + + public void tick(int seconds) { + time = time + seconds * 1000; + } + + } + private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) { ApplicationId appIdImpl = ApplicationId.newInstance(0, appId); ApplicationAttemptId attId = @@ -94,6 +108,63 @@ public class TestFSSchedulerApp { } @Test + public void testDelaySchedulingForContinuousScheduling() + throws InterruptedException { + Queue queue = Mockito.mock(Queue.class); + Priority prio = Mockito.mock(Priority.class); + Mockito.when(prio.getPriority()).thenReturn(1); + + MockClock clock = new MockClock(); + + long nodeLocalityDelayMs = 5 * 1000L; // 5 seconds + long rackLocalityDelayMs = 6 * 1000L; // 6 seconds + + ApplicationAttemptId applicationAttemptId = createAppAttemptId(1, 1); + FSSchedulerApp schedulerApp = + new FSSchedulerApp(applicationAttemptId, "user1", queue, + null, null); + AppSchedulable appSchedulable = Mockito.mock(AppSchedulable.class); + long startTime = clock.getTime(); + Mockito.when(appSchedulable.getStartTime()).thenReturn(startTime); + schedulerApp.setAppSchedulable(appSchedulable); + + // Default level should be node-local + assertEquals(NodeType.NODE_LOCAL, + schedulerApp.getAllowedLocalityLevelByTime(prio, + nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime())); + + // after 4 seconds should remain node local + clock.tick(4); + assertEquals(NodeType.NODE_LOCAL, + schedulerApp.getAllowedLocalityLevelByTime(prio, + nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime())); + + // after 6 seconds should switch to rack local + clock.tick(2); + assertEquals(NodeType.RACK_LOCAL, + schedulerApp.getAllowedLocalityLevelByTime(prio, + nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime())); + + // manually set back to node local + schedulerApp.resetAllowedLocalityLevel(prio, NodeType.NODE_LOCAL); + schedulerApp.resetSchedulingOpportunities(prio, clock.getTime()); + assertEquals(NodeType.NODE_LOCAL, + schedulerApp.getAllowedLocalityLevelByTime(prio, + nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime())); + + // Now escalate again to rack-local, then to off-switch + clock.tick(6); + assertEquals(NodeType.RACK_LOCAL, + schedulerApp.getAllowedLocalityLevelByTime(prio, + nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime())); + + clock.tick(7); + assertEquals(NodeType.OFF_SWITCH, + schedulerApp.getAllowedLocalityLevelByTime(prio, + nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime())); + } + + @Test /** * Ensure that when negative paramaters are given (signaling delay scheduling * no tin use), the least restrictive locality level is returned. Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java?rev=1532967&r1=1532966&r2=1532967&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java Thu Oct 17 05:32:42 2013 @@ -23,6 +23,10 @@ import static org.junit.Assert.assertNot import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.File; import java.io.FileWriter; @@ -32,6 +36,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -41,6 +46,7 @@ import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -53,6 +59,7 @@ import org.apache.hadoop.yarn.api.record import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; +import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -60,6 +67,7 @@ import org.apache.hadoop.yarn.factories. import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; @@ -72,6 +80,7 @@ import org.apache.hadoop.yarn.server.res import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestCapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent; @@ -81,12 +90,15 @@ import org.apache.hadoop.yarn.server.res import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.security.QueueACLsManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.resource.Resources; import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; import org.xml.sax.SAXException; public class TestFairScheduler { @@ -288,6 +300,14 @@ public class TestFairScheduler { conf.setBoolean(FairSchedulerConfiguration.SIZE_BASED_WEIGHT, true); conf.setDouble(FairSchedulerConfiguration.LOCALITY_THRESHOLD_NODE, .5); conf.setDouble(FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK, .7); + conf.setBoolean(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED, + true); + conf.setInt(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS, + 10); + conf.setInt(FairSchedulerConfiguration.LOCALITY_DELAY_RACK_MS, + 5000); + conf.setInt(FairSchedulerConfiguration.LOCALITY_DELAY_NODE_MS, + 5000); conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 1024); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512); conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB, @@ -298,6 +318,11 @@ public class TestFairScheduler { Assert.assertEquals(true, scheduler.sizeBasedWeight); Assert.assertEquals(.5, scheduler.nodeLocalityThreshold, .01); Assert.assertEquals(.7, scheduler.rackLocalityThreshold, .01); + Assert.assertTrue("The continuous scheduling should be enabled", + scheduler.continuousSchedulingEnabled); + Assert.assertEquals(10, scheduler.continuousSchedulingSleepMs); + Assert.assertEquals(5000, scheduler.nodeLocalityDelayMs); + Assert.assertEquals(5000, scheduler.rackLocalityDelayMs); Assert.assertEquals(1024, scheduler.getMaximumResourceCapability().getMemory()); Assert.assertEquals(512, scheduler.getMinimumResourceCapability().getMemory()); Assert.assertEquals(128, @@ -406,9 +431,9 @@ public class TestFairScheduler { Collection queues = queueManager.getLeafQueues(); assertEquals(3, queues.size()); - FSLeafQueue queue1 = queueManager.getLeafQueue("default"); - FSLeafQueue queue2 = queueManager.getLeafQueue("parent.queue2"); - FSLeafQueue queue3 = queueManager.getLeafQueue("parent.queue3"); + FSLeafQueue queue1 = queueManager.getLeafQueue("default", true); + FSLeafQueue queue2 = queueManager.getLeafQueue("parent.queue2", true); + FSLeafQueue queue3 = queueManager.getLeafQueue("parent.queue3", true); assertEquals(capacity / 2, queue1.getFairShare().getMemory()); assertEquals(capacity / 2, queue1.getMetrics().getFairShareMB()); assertEquals(capacity / 4, queue2.getFairShare().getMemory()); @@ -420,25 +445,63 @@ public class TestFairScheduler { @Test public void testHierarchicalQueuesSimilarParents() { QueueManager queueManager = scheduler.getQueueManager(); - FSLeafQueue leafQueue = queueManager.getLeafQueue("parent.child"); + FSLeafQueue leafQueue = queueManager.getLeafQueue("parent.child", true); Assert.assertEquals(2, queueManager.getLeafQueues().size()); Assert.assertNotNull(leafQueue); Assert.assertEquals("root.parent.child", leafQueue.getName()); - FSLeafQueue leafQueue2 = queueManager.getLeafQueue("parent"); + FSLeafQueue leafQueue2 = queueManager.getLeafQueue("parent", true); Assert.assertNull(leafQueue2); Assert.assertEquals(2, queueManager.getLeafQueues().size()); - FSLeafQueue leafQueue3 = queueManager.getLeafQueue("parent.child.grandchild"); + FSLeafQueue leafQueue3 = queueManager.getLeafQueue("parent.child.grandchild", true); Assert.assertNull(leafQueue3); Assert.assertEquals(2, queueManager.getLeafQueues().size()); - FSLeafQueue leafQueue4 = queueManager.getLeafQueue("parent.sister"); + FSLeafQueue leafQueue4 = queueManager.getLeafQueue("parent.sister", true); Assert.assertNotNull(leafQueue4); Assert.assertEquals("root.parent.sister", leafQueue4.getName()); Assert.assertEquals(3, queueManager.getLeafQueues().size()); } + @Test + public void testSchedulerRootQueueMetrics() throws InterruptedException { + + // Add a node + RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024)); + NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); + scheduler.handle(nodeEvent1); + + // Queue 1 requests full capacity of node + createSchedulingRequest(1024, "queue1", "user1", 1); + scheduler.update(); + NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1); + scheduler.handle(updateEvent); + + // Now queue 2 requests likewise + createSchedulingRequest(1024, "queue2", "user1", 1); + scheduler.update(); + scheduler.handle(updateEvent); + + // Make sure reserved memory gets updated correctly + assertEquals(1024, scheduler.rootMetrics.getReservedMB()); + + // Now another node checks in with capacity + RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(1024)); + NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); + NodeUpdateSchedulerEvent updateEvent2 = new NodeUpdateSchedulerEvent(node2); + scheduler.handle(nodeEvent2); + scheduler.handle(updateEvent2); + + + // The old reservation should still be there... + assertEquals(1024, scheduler.rootMetrics.getReservedMB()); + + // ... but it should disappear when we update the first node. + scheduler.handle(updateEvent); + assertEquals(0, scheduler.rootMetrics.getReservedMB()); + } + @Test (timeout = 5000) public void testSimpleContainerAllocation() { // Add a node @@ -541,24 +604,33 @@ public class TestFairScheduler { Configuration conf = createConfiguration(); conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "true"); scheduler.reinitialize(conf, resourceManager.getRMContext()); + RMContext rmContext = resourceManager.getRMContext(); + Map appsMap = rmContext.getRMApps(); + ApplicationAttemptId appAttemptId = createAppAttemptId(1, 1); + RMApp rmApp = new RMAppImpl(appAttemptId.getApplicationId(), rmContext, conf, + null, null, null, ApplicationSubmissionContext.newInstance(null, null, + null, null, null, false, false, 0, null, null), null, null, 0, null); + appsMap.put(appAttemptId.getApplicationId(), rmApp); + AppAddedSchedulerEvent appAddedEvent = new AppAddedSchedulerEvent( - createAppAttemptId(1, 1), "default", "user1"); + appAttemptId, "default", "user1"); scheduler.handle(appAddedEvent); - assertEquals(1, scheduler.getQueueManager().getLeafQueue("user1") + assertEquals(1, scheduler.getQueueManager().getLeafQueue("user1", true) .getAppSchedulables().size()); - assertEquals(0, scheduler.getQueueManager().getLeafQueue("default") + assertEquals(0, scheduler.getQueueManager().getLeafQueue("default", true) .getAppSchedulables().size()); + assertEquals("root.user1", rmApp.getQueue()); conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "false"); scheduler.reinitialize(conf, resourceManager.getRMContext()); AppAddedSchedulerEvent appAddedEvent2 = new AppAddedSchedulerEvent( createAppAttemptId(2, 1), "default", "user2"); scheduler.handle(appAddedEvent2); - assertEquals(1, scheduler.getQueueManager().getLeafQueue("user1") + assertEquals(1, scheduler.getQueueManager().getLeafQueue("user1", true) .getAppSchedulables().size()); - assertEquals(1, scheduler.getQueueManager().getLeafQueue("default") + assertEquals(1, scheduler.getQueueManager().getLeafQueue("default", true) .getAppSchedulables().size()); - assertEquals(0, scheduler.getQueueManager().getLeafQueue("user2") + assertEquals(0, scheduler.getQueueManager().getLeafQueue("user2", true) .getAppSchedulables().size()); } @@ -704,7 +776,7 @@ public class TestFairScheduler { assertEquals(2, scheduler.getQueueManager().getLeafQueues().size()); // That queue should have one app - assertEquals(1, scheduler.getQueueManager().getLeafQueue("user1") + assertEquals(1, scheduler.getQueueManager().getLeafQueue("user1", true) .getAppSchedulables().size()); AppRemovedSchedulerEvent appRemovedEvent1 = new AppRemovedSchedulerEvent( @@ -714,7 +786,7 @@ public class TestFairScheduler { scheduler.handle(appRemovedEvent1); // Queue should have no apps - assertEquals(0, scheduler.getQueueManager().getLeafQueue("user1") + assertEquals(0, scheduler.getQueueManager().getLeafQueue("user1", true) .getAppSchedulables().size()); } @@ -851,15 +923,71 @@ public class TestFairScheduler { Collection leafQueues = queueManager.getLeafQueues(); Assert.assertEquals(4, leafQueues.size()); - Assert.assertNotNull(queueManager.getLeafQueue("queueA")); - Assert.assertNotNull(queueManager.getLeafQueue("queueB.queueC")); - Assert.assertNotNull(queueManager.getLeafQueue("queueB.queueD")); - Assert.assertNotNull(queueManager.getLeafQueue("default")); + Assert.assertNotNull(queueManager.getLeafQueue("queueA", false)); + Assert.assertNotNull(queueManager.getLeafQueue("queueB.queueC", false)); + Assert.assertNotNull(queueManager.getLeafQueue("queueB.queueD", false)); + Assert.assertNotNull(queueManager.getLeafQueue("default", false)); // Make sure querying for queues didn't create any new ones: Assert.assertEquals(4, leafQueues.size()); } @Test + public void testConfigureRootQueue() throws Exception { + Configuration conf = createConfiguration(); + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + scheduler.reinitialize(conf, resourceManager.getRMContext()); + + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + out.println("fair"); + out.println(""); + out.println(" drf"); + out.println(" "); + out.println(" 1024mb,1vcores"); + out.println(" "); + out.println(" "); + out.println(" 1024mb,4vcores"); + out.println(" "); + out.println(""); + out.println(""); + out.close(); + + QueueManager queueManager = scheduler.getQueueManager(); + queueManager.initialize(); + + FSQueue root = queueManager.getRootQueue(); + assertTrue(root.getPolicy() instanceof DominantResourceFairnessPolicy); + + assertNotNull(queueManager.getLeafQueue("child1", false)); + assertNotNull(queueManager.getLeafQueue("child2", false)); + } + + /** + * Verify that you can't place queues at the same level as the root queue in + * the allocations file. + */ + @Test (expected = AllocationConfigurationException.class) + public void testQueueAlongsideRoot() throws Exception { + Configuration conf = createConfiguration(); + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + scheduler.reinitialize(conf, resourceManager.getRMContext()); + + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.close(); + + QueueManager queueManager = scheduler.getQueueManager(); + queueManager.initialize(); + } + + @Test public void testBackwardsCompatibleAllocationFileParsing() throws Exception { Configuration conf = createConfiguration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); @@ -1355,9 +1483,9 @@ public class TestFairScheduler { scheduler.update(); FSLeafQueue schedC = - scheduler.getQueueManager().getLeafQueue("queueC"); + scheduler.getQueueManager().getLeafQueue("queueC", true); FSLeafQueue schedD = - scheduler.getQueueManager().getLeafQueue("queueD"); + scheduler.getQueueManager().getLeafQueue("queueD", true); assertTrue(Resources.equals( Resources.none(), scheduler.resToPreempt(schedC, clock.getTime()))); @@ -1538,6 +1666,7 @@ public class TestFairScheduler { out.println(""); out.println(""); out.println("norealuserhasthisname"); + out.println("norealuserhasthisname"); out.println(""); out.println(""); out.close(); @@ -1619,7 +1748,7 @@ public class TestFairScheduler { FSSchedulerApp app1 = scheduler.applications.get(attId1); FSSchedulerApp app2 = scheduler.applications.get(attId2); - FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1"); + FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1", true); queue1.setPolicy(new FifoPolicy()); scheduler.update(); @@ -1647,7 +1776,7 @@ public class TestFairScheduler { public void testMaxAssign() throws AllocationConfigurationException { // set required scheduler configs scheduler.assignMultiple = true; - scheduler.getQueueManager().getLeafQueue("root.default") + scheduler.getQueueManager().getLeafQueue("root.default", true) .setPolicy(SchedulingPolicy.getDefault()); RMNode node = @@ -1724,7 +1853,7 @@ public class TestFairScheduler { FSSchedulerApp app3 = scheduler.applications.get(attId3); FSSchedulerApp app4 = scheduler.applications.get(attId4); - scheduler.getQueueManager().getLeafQueue(fifoQueue) + scheduler.getQueueManager().getLeafQueue(fifoQueue, true) .setPolicy(SchedulingPolicy.parse("fifo")); scheduler.update(); @@ -1766,6 +1895,7 @@ public class TestFairScheduler { out.println(""); out.println(""); out.println("userallow"); + out.println("userallow"); out.println(""); out.println(""); out.close(); @@ -2205,4 +2335,91 @@ public class TestFairScheduler { fs.applications, FSSchedulerApp.class); } + @Test (timeout = 5000) + public void testContinuousScheduling() throws Exception { + // set continuous scheduling enabled + FairScheduler fs = new FairScheduler(); + Configuration conf = createConfiguration(); + conf.setBoolean(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED, + true); + fs.reinitialize(conf, resourceManager.getRMContext()); + Assert.assertTrue("Continuous scheduling should be enabled.", + fs.isContinuousSchedulingEnabled()); + + // Add one node + RMNode node1 = + MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8), 1, + "127.0.0.1"); + NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); + fs.handle(nodeEvent1); + + // available resource + Assert.assertEquals(fs.getClusterCapacity().getMemory(), 8 * 1024); + Assert.assertEquals(fs.getClusterCapacity().getVirtualCores(), 8); + + // send application request + ApplicationAttemptId appAttemptId = + createAppAttemptId(this.APP_ID++, this.ATTEMPT_ID++); + fs.addApplication(appAttemptId, "queue11", "user11"); + List ask = new ArrayList(); + ResourceRequest request = + createResourceRequest(1024, 1, ResourceRequest.ANY, 1, 1, true); + ask.add(request); + fs.allocate(appAttemptId, ask, new ArrayList(), null, null); + + // waiting for continuous_scheduler_sleep_time + // at least one pass + Thread.sleep(fs.getConf().getContinuousSchedulingSleepMs() + 500); + + FSSchedulerApp app = fs.applications.get(appAttemptId); + // Wait until app gets resources. + while (app.getCurrentConsumption().equals(Resources.none())) { } + + // check consumption + Assert.assertEquals(1024, app.getCurrentConsumption().getMemory()); + Assert.assertEquals(1, app.getCurrentConsumption().getVirtualCores()); + } + + + @Test + public void testDontAllowUndeclaredPools() throws Exception{ + Configuration conf = createConfiguration(); + conf.setBoolean(FairSchedulerConfiguration.ALLOW_UNDECLARED_POOLS, false); + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + scheduler.reinitialize(conf, resourceManager.getRMContext()); + + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.close(); + + QueueManager queueManager = scheduler.getQueueManager(); + queueManager.initialize(); + + FSLeafQueue jerryQueue = queueManager.getLeafQueue("jerry", false); + FSLeafQueue defaultQueue = queueManager.getLeafQueue("default", false); + + // Should get put into jerry + createSchedulingRequest(1024, "jerry", "someuser"); + assertEquals(1, jerryQueue.getAppSchedulables().size()); + + // Should get forced into default + createSchedulingRequest(1024, "newqueue", "someuser"); + assertEquals(1, jerryQueue.getAppSchedulables().size()); + assertEquals(1, defaultQueue.getAppSchedulables().size()); + + // Would get put into someuser because of user-as-default-queue, but should + // be forced into default + createSchedulingRequest(1024, "default", "someuser"); + assertEquals(1, jerryQueue.getAppSchedulables().size()); + assertEquals(2, defaultQueue.getAppSchedulables().size()); + + // Should get put into jerry because of user-as-default-queue + createSchedulingRequest(1024, "default", "jerry"); + assertEquals(2, jerryQueue.getAppSchedulables().size()); + assertEquals(2, defaultQueue.getAppSchedulables().size()); + } } Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java?rev=1532967&r1=1532966&r2=1532967&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java Thu Oct 17 05:32:42 2013 @@ -20,6 +20,11 @@ package org.apache.hadoop.yarn.server.re import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration.parseResourceConfigValue; import static org.junit.Assert.assertEquals; +import java.io.File; + +import junit.framework.Assert; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.junit.Test; @@ -55,4 +60,15 @@ public class TestFairSchedulerConfigurat public void testGibberish() throws Exception { parseResourceConfigValue("1o24vc0res"); } + + @Test + public void testGetAllocationFileFromClasspath() { + FairSchedulerConfiguration conf = new FairSchedulerConfiguration( + new Configuration()); + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, + "test-fair-scheduler.xml"); + File allocationFile = conf.getAllocationFile(); + Assert.assertEquals("test-fair-scheduler.xml", allocationFile.getName()); + Assert.assertTrue(allocationFile.exists()); + } } Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java?rev=1532967&r1=1532966&r2=1532967&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java Thu Oct 17 05:32:42 2013 @@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.re import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import junit.framework.Assert; @@ -43,6 +44,7 @@ import org.apache.hadoop.yarn.factories. import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.resourcemanager.Application; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; @@ -67,7 +69,8 @@ import org.junit.Test; public class TestFifoScheduler { private static final Log LOG = LogFactory.getLog(TestFifoScheduler.class); - + private final int GB = 1024; + private ResourceManager resourceManager = null; private static final RecordFactory recordFactory = @@ -93,8 +96,7 @@ public class TestFifoScheduler { YarnException { return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager( hostName, containerManagerPort, nmHttpPort, rackName, capability, - resourceManager.getResourceTrackerService(), resourceManager - .getRMContext()); + resourceManager); } private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) { @@ -424,6 +426,40 @@ public class TestFifoScheduler { fs.applications, FiCaSchedulerApp.class); } + @SuppressWarnings("resource") + @Test + public void testBlackListNodes() throws Exception { + Configuration conf = new Configuration(); + conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, + ResourceScheduler.class); + MockRM rm = new MockRM(conf); + rm.start(); + FifoScheduler fs = (FifoScheduler) rm.getResourceScheduler(); + + String host = "127.0.0.1"; + RMNode node = + MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1, host); + fs.handle(new NodeAddedSchedulerEvent(node)); + + ApplicationId appId = BuilderUtils.newApplicationId(100, 1); + ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId( + appId, 1); + SchedulerEvent event = new AppAddedSchedulerEvent(appAttemptId, "default", + "user"); + fs.handle(event); + + // Verify the blacklist can be updated independent of requesting containers + fs.allocate(appAttemptId, Collections.emptyList(), + Collections.emptyList(), + Collections.singletonList(host), null); + Assert.assertTrue(fs.getApplication(appAttemptId).isBlacklisted(host)); + fs.allocate(appAttemptId, Collections.emptyList(), + Collections.emptyList(), null, + Collections.singletonList(host)); + Assert.assertFalse(fs.getApplication(appAttemptId).isBlacklisted(host)); + rm.stop(); + } + private void checkApplicationResourceUsage(int expected, Application application) { Assert.assertEquals(expected, application.getUsedResources().getMemory()); Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java?rev=1532967&r1=1532966&r2=1532967&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java Thu Oct 17 05:32:42 2013 @@ -167,7 +167,7 @@ public class TestClientToAMTokens { MockRM rm = new MockRMWithCustomAMLauncher(conf, containerManager) { protected ClientRMService createClientRMService() { return new ClientRMService(this.rmContext, scheduler, - this.rmAppManager, this.applicationACLsManager, + this.rmAppManager, this.applicationACLsManager, this.queueACLsManager, this.rmDTSecretManager); }; @@ -194,16 +194,6 @@ public class TestClientToAMTokens { nm1.nodeHeartbeat(true); dispatcher.await(); - // Get the app-report. - GetApplicationReportRequest request = - Records.newRecord(GetApplicationReportRequest.class); - request.setApplicationId(app.getApplicationId()); - GetApplicationReportResponse reportResponse = - rm.getClientRMService().getApplicationReport(request); - ApplicationReport appReport = reportResponse.getApplicationReport(); - org.apache.hadoop.yarn.api.records.Token originalClientToAMToken = - appReport.getClientToAMToken(); - ApplicationAttemptId appAttempt = app.getCurrentAppAttempt().getAppAttemptId(); final MockAM mockAM = new MockAM(rm.getRMContext(), rm.getApplicationMasterService(), @@ -224,7 +214,17 @@ public class TestClientToAMTokens { return response; } }); - + + // Get the app-report. + GetApplicationReportRequest request = + Records.newRecord(GetApplicationReportRequest.class); + request.setApplicationId(app.getApplicationId()); + GetApplicationReportResponse reportResponse = + rm.getClientRMService().getApplicationReport(request); + ApplicationReport appReport = reportResponse.getApplicationReport(); + org.apache.hadoop.yarn.api.records.Token originalClientToAMToken = + appReport.getClientToAMToken(); + // ClientToAMToken master key should have been received on register // application master response. Assert.assertNotNull(response.getClientToAMTokenMasterKey()); Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java?rev=1532967&r1=1532966&r2=1532967&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java Thu Oct 17 05:32:42 2013 @@ -49,7 +49,7 @@ public class TestNodesPage { // Number of Actual Table Headers for NodesPage.NodesBlock might change in // future. In that case this value should be adjusted to the new value. final int numberOfThInMetricsTable = 13; - final int numberOfActualTableHeaders = 9; + final int numberOfActualTableHeaders = 10; private Injector injector; Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java?rev=1532967&r1=1532966&r2=1532967&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java Thu Oct 17 05:32:42 2013 @@ -40,6 +40,7 @@ import org.apache.hadoop.yarn.server.res import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.security.QueueACLsManager; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.util.YarnVersionInfo; import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; @@ -84,6 +85,7 @@ public class TestRMWebServices extends J bind(RMContext.class).toInstance(rm.getRMContext()); bind(ApplicationACLsManager.class).toInstance( rm.getApplicationACLsManager()); + bind(QueueACLsManager.class).toInstance(rm.getQueueACLsManager()); serve("/*").with(GuiceContainer.class); } }); @@ -295,10 +297,10 @@ public class TestRMWebServices extends J String hadoopVersion, String resourceManagerVersionBuiltOn, String resourceManagerBuildVersion, String resourceManagerVersion) { - assertEquals("clusterId doesn't match: ", ResourceManager.clusterTimeStamp, - clusterid); - assertEquals("startedOn doesn't match: ", ResourceManager.clusterTimeStamp, - startedon); + assertEquals("clusterId doesn't match: ", + ResourceManager.getClusterTimeStamp(), clusterid); + assertEquals("startedOn doesn't match: ", + ResourceManager.getClusterTimeStamp(), startedon); assertTrue("stated doesn't match: " + state, state.matches(STATE.INITED.toString())); Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java?rev=1532967&r1=1532966&r2=1532967&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java Thu Oct 17 05:32:42 2013 @@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configurat import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.MockAM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; @@ -47,6 +48,7 @@ import org.apache.hadoop.yarn.server.res import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.security.QueueACLsManager; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; import org.apache.hadoop.yarn.webapp.WebServicesTestUtils; @@ -93,6 +95,7 @@ public class TestRMWebServicesApps exten bind(RMContext.class).toInstance(rm.getRMContext()); bind(ApplicationACLsManager.class).toInstance( rm.getApplicationACLsManager()); + bind(QueueACLsManager.class).toInstance(rm.getQueueACLsManager()); serve("/*").with(GuiceContainer.class); } }); @@ -227,7 +230,8 @@ public class TestRMWebServicesApps exten WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("cluster") - .path("apps").queryParam("state", RMAppState.ACCEPTED.toString()) + .path("apps") + .queryParam("state", YarnApplicationState.ACCEPTED.toString()) .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); @@ -252,7 +256,7 @@ public class TestRMWebServicesApps exten WebResource r = resource(); MultivaluedMapImpl params = new MultivaluedMapImpl(); - params.add("states", RMAppState.ACCEPTED.toString()); + params.add("states", YarnApplicationState.ACCEPTED.toString()); ClientResponse response = r.path("ws").path("v1").path("cluster") .path("apps").queryParams(params) .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); @@ -268,8 +272,8 @@ public class TestRMWebServicesApps exten r = resource(); params = new MultivaluedMapImpl(); - params.add("states", RMAppState.ACCEPTED.toString()); - params.add("states", RMAppState.KILLED.toString()); + params.add("states", YarnApplicationState.ACCEPTED.toString()); + params.add("states", YarnApplicationState.KILLED.toString()); response = r.path("ws").path("v1").path("cluster") .path("apps").queryParams(params) .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); @@ -301,7 +305,7 @@ public class TestRMWebServicesApps exten WebResource r = resource(); MultivaluedMapImpl params = new MultivaluedMapImpl(); - params.add("states", RMAppState.ACCEPTED.toString()); + params.add("states", YarnApplicationState.ACCEPTED.toString()); ClientResponse response = r.path("ws").path("v1").path("cluster") .path("apps").queryParams(params) .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); @@ -317,8 +321,8 @@ public class TestRMWebServicesApps exten r = resource(); params = new MultivaluedMapImpl(); - params.add("states", RMAppState.ACCEPTED.toString() + "," - + RMAppState.KILLED.toString()); + params.add("states", YarnApplicationState.ACCEPTED.toString() + "," + + YarnApplicationState.KILLED.toString()); response = r.path("ws").path("v1").path("cluster") .path("apps").queryParams(params) .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); @@ -347,7 +351,8 @@ public class TestRMWebServicesApps exten WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("cluster") - .path("apps").queryParam("states", RMAppState.RUNNING.toString()) + .path("apps") + .queryParam("states", YarnApplicationState.RUNNING.toString()) .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); @@ -365,7 +370,8 @@ public class TestRMWebServicesApps exten WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("cluster") - .path("apps").queryParam("state", RMAppState.RUNNING.toString()) + .path("apps") + .queryParam("state", YarnApplicationState.RUNNING.toString()) .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); @@ -976,6 +982,169 @@ public class TestRMWebServicesApps exten } @Test + public void testAppStatistics() throws JSONException, Exception { + try { + rm.start(); + MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 4096); + Thread.sleep(1); + RMApp app1 = rm.submitApp(1024, "", UserGroupInformation.getCurrentUser() + .getShortUserName(), null, false, null, 2, null, "MAPREDUCE"); + amNodeManager.nodeHeartbeat(true); + // finish App + MockAM am = rm + .sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId()); + am.registerAppAttempt(); + am.unregisterAppAttempt(); + amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(), + 1, ContainerState.COMPLETE); + + rm.submitApp(1024, "", UserGroupInformation.getCurrentUser() + .getShortUserName(), null, false, null, 2, null, "MAPREDUCE"); + rm.submitApp(1024, "", UserGroupInformation.getCurrentUser() + .getShortUserName(), null, false, null, 2, null, "OTHER"); + + // zero type, zero state + WebResource r = resource(); + ClientResponse response = r.path("ws").path("v1").path("cluster") + .path("appstatistics") + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + JSONObject json = response.getEntity(JSONObject.class); + assertEquals("incorrect number of elements", 1, json.length()); + JSONObject appsStatInfo = json.getJSONObject("appStatInfo"); + assertEquals("incorrect number of elements", 1, appsStatInfo.length()); + JSONArray statItems = appsStatInfo.getJSONArray("statItem"); + assertEquals("incorrect number of elements", + YarnApplicationState.values().length, statItems.length()); + for (int i = 0; i < YarnApplicationState.values().length; ++i) { + assertEquals("*", statItems.getJSONObject(0).getString("type")); + if (statItems.getJSONObject(0).getString("state").equals("ACCEPTED")) { + assertEquals("2", statItems.getJSONObject(0).getString("count")); + } else if ( + statItems.getJSONObject(0).getString("state").equals("FINISHED")) { + assertEquals("1", statItems.getJSONObject(0).getString("count")); + } else { + assertEquals("0", statItems.getJSONObject(0).getString("count")); + } + } + + // zero type, one state + r = resource(); + response = r.path("ws").path("v1").path("cluster") + .path("appstatistics") + .queryParam("states", YarnApplicationState.ACCEPTED.toString()) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + json = response.getEntity(JSONObject.class); + assertEquals("incorrect number of elements", 1, json.length()); + appsStatInfo = json.getJSONObject("appStatInfo"); + assertEquals("incorrect number of elements", 1, appsStatInfo.length()); + statItems = appsStatInfo.getJSONArray("statItem"); + assertEquals("incorrect number of elements", 1, statItems.length()); + assertEquals("ACCEPTED", statItems.getJSONObject(0).getString("state")); + assertEquals("*", statItems.getJSONObject(0).getString("type")); + assertEquals("2", statItems.getJSONObject(0).getString("count")); + + // one type, zero state + r = resource(); + response = r.path("ws").path("v1").path("cluster") + .path("appstatistics") + .queryParam("applicationTypes", "MAPREDUCE") + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + json = response.getEntity(JSONObject.class); + assertEquals("incorrect number of elements", 1, json.length()); + appsStatInfo = json.getJSONObject("appStatInfo"); + assertEquals("incorrect number of elements", 1, appsStatInfo.length()); + statItems = appsStatInfo.getJSONArray("statItem"); + assertEquals("incorrect number of elements", + YarnApplicationState.values().length, statItems.length()); + for (int i = 0; i < YarnApplicationState.values().length; ++i) { + assertEquals("mapreduce", statItems.getJSONObject(0).getString("type")); + if (statItems.getJSONObject(0).getString("state").equals("ACCEPTED")) { + assertEquals("1", statItems.getJSONObject(0).getString("count")); + } else if ( + statItems.getJSONObject(0).getString("state").equals("FINISHED")) { + assertEquals("1", statItems.getJSONObject(0).getString("count")); + } else { + assertEquals("0", statItems.getJSONObject(0).getString("count")); + } + } + + // two types, zero state + r = resource(); + response = r.path("ws").path("v1").path("cluster") + .path("appstatistics") + .queryParam("applicationTypes", "MAPREDUCE,OTHER") + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus()); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + json = response.getEntity(JSONObject.class); + assertEquals("incorrect number of elements", 1, json.length()); + JSONObject exception = json.getJSONObject("RemoteException"); + assertEquals("incorrect number of elements", 3, exception.length()); + String message = exception.getString("message"); + String type = exception.getString("exception"); + String className = exception.getString("javaClassName"); + WebServicesTestUtils.checkStringContains("exception message", + "we temporarily support at most one applicationType", message); + WebServicesTestUtils.checkStringEqual("exception type", + "BadRequestException", type); + WebServicesTestUtils.checkStringEqual("exception className", + "org.apache.hadoop.yarn.webapp.BadRequestException", className); + + // one type, two states + r = resource(); + response = r.path("ws").path("v1").path("cluster") + .path("appstatistics") + .queryParam("states", YarnApplicationState.FINISHED.toString() + + "," + YarnApplicationState.ACCEPTED.toString()) + .queryParam("applicationTypes", "MAPREDUCE") + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + json = response.getEntity(JSONObject.class); + assertEquals("incorrect number of elements", 1, json.length()); + appsStatInfo = json.getJSONObject("appStatInfo"); + assertEquals("incorrect number of elements", 1, appsStatInfo.length()); + statItems = appsStatInfo.getJSONArray("statItem"); + assertEquals("incorrect number of elements", 2, statItems.length()); + JSONObject statItem1 = statItems.getJSONObject(0); + JSONObject statItem2 = statItems.getJSONObject(1); + assertTrue((statItem1.getString("state").equals("ACCEPTED") && + statItem2.getString("state").equals("FINISHED")) || + (statItem2.getString("state").equals("ACCEPTED") && + statItem1.getString("state").equals("FINISHED"))); + assertEquals("mapreduce", statItem1.getString("type")); + assertEquals("1", statItem1.getString("count")); + assertEquals("mapreduce", statItem2.getString("type")); + assertEquals("1", statItem2.getString("count")); + + // invalid state + r = resource(); + response = r.path("ws").path("v1").path("cluster") + .path("appstatistics").queryParam("states", "wrong_state") + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus()); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + json = response.getEntity(JSONObject.class); + assertEquals("incorrect number of elements", 1, json.length()); + exception = json.getJSONObject("RemoteException"); + assertEquals("incorrect number of elements", 3, exception.length()); + message = exception.getString("message"); + type = exception.getString("exception"); + className = exception.getString("javaClassName"); + WebServicesTestUtils.checkStringContains("exception message", + "Invalid application-state wrong_state", message); + WebServicesTestUtils.checkStringEqual("exception type", + "BadRequestException", type); + WebServicesTestUtils.checkStringEqual("exception className", + "org.apache.hadoop.yarn.webapp.BadRequestException", className); + } finally { + rm.stop(); + } + } + + @Test public void testSingleApp() throws JSONException, Exception { rm.start(); MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048); @@ -1181,8 +1350,8 @@ public class TestRMWebServicesApps exten trackingUI); WebServicesTestUtils.checkStringMatch("diagnostics", app.getDiagnostics() .toString(), diagnostics); - assertEquals("clusterId doesn't match", ResourceManager.clusterTimeStamp, - clusterId); + assertEquals("clusterId doesn't match", + ResourceManager.getClusterTimeStamp(), clusterId); assertEquals("startedTime doesn't match", app.getStartTime(), startedTime); assertEquals("finishedTime doesn't match", app.getFinishTime(), finishedTime); Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java?rev=1532967&r1=1532966&r2=1532967&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java Thu Oct 17 05:32:42 2013 @@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.server.res import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.security.QueueACLsManager; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.util.resource.Resources; import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; @@ -74,7 +75,6 @@ public class TestRMWebServicesCapacitySc float absoluteMaxCapacity; float absoluteUsedCapacity; int numApplications; - String usedResources; String queueName; String state; } @@ -107,6 +107,7 @@ public class TestRMWebServicesCapacitySc bind(RMContext.class).toInstance(rm.getRMContext()); bind(ApplicationACLsManager.class).toInstance( rm.getApplicationACLsManager()); + bind(QueueACLsManager.class).toInstance(rm.getQueueACLsManager()); serve("/*").with(GuiceContainer.class); } }); @@ -281,8 +282,6 @@ public class TestRMWebServicesCapacitySc WebServicesTestUtils.getXmlFloat(qElem, "absoluteUsedCapacity"); qi.numApplications = WebServicesTestUtils.getXmlInt(qElem, "numApplications"); - qi.usedResources = - WebServicesTestUtils.getXmlString(qElem, "usedResources"); qi.queueName = WebServicesTestUtils.getXmlString(qElem, "queueName"); qi.state = WebServicesTestUtils.getXmlString(qElem, "state"); verifySubQueueGeneric(q, qi, parentAbsCapacity, parentAbsMaxCapacity); @@ -358,10 +357,10 @@ public class TestRMWebServicesCapacitySc private void verifySubQueue(JSONObject info, String q, float parentAbsCapacity, float parentAbsMaxCapacity) throws JSONException, Exception { - int numExpectedElements = 12; + int numExpectedElements = 11; boolean isParentQueue = true; if (!info.has("queues")) { - numExpectedElements = 22; + numExpectedElements = 21; isParentQueue = false; } assertEquals("incorrect number of elements", numExpectedElements, info.length()); @@ -374,7 +373,6 @@ public class TestRMWebServicesCapacitySc qi.absoluteMaxCapacity = (float) info.getDouble("absoluteMaxCapacity"); qi.absoluteUsedCapacity = (float) info.getDouble("absoluteUsedCapacity"); qi.numApplications = info.getInt("numApplications"); - qi.usedResources = info.getString("usedResources"); qi.queueName = info.getString("queueName"); qi.state = info.getString("state"); @@ -429,8 +427,6 @@ public class TestRMWebServicesCapacitySc assertEquals("absoluteUsedCapacity doesn't match", 0, info.absoluteUsedCapacity, 1e-3f); assertEquals("numApplications doesn't match", 0, info.numApplications); - assertTrue("usedResources doesn't match ", - info.usedResources.matches("")); assertTrue("queueName doesn't match, got: " + info.queueName + " expected: " + q, qshortName.matches(info.queueName)); assertTrue("state doesn't match", Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java?rev=1532967&r1=1532966&r2=1532967&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java Thu Oct 17 05:32:42 2013 @@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.server.res import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.security.QueueACLsManager; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; import org.codehaus.jettison.json.JSONException; @@ -62,6 +63,7 @@ public class TestRMWebServicesFairSchedu bind(RMContext.class).toInstance(rm.getRMContext()); bind(ApplicationACLsManager.class).toInstance( rm.getApplicationACLsManager()); + bind(QueueACLsManager.class).toInstance(rm.getQueueACLsManager()); serve("/*").with(GuiceContainer.class); } }); Modified: hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java?rev=1532967&r1=1532966&r2=1532967&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java Thu Oct 17 05:32:42 2013 @@ -43,6 +43,7 @@ import org.apache.hadoop.yarn.server.res import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; +import org.apache.hadoop.yarn.server.resourcemanager.security.QueueACLsManager; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; import org.apache.hadoop.yarn.webapp.WebServicesTestUtils; @@ -86,6 +87,7 @@ public class TestRMWebServicesNodes exte bind(RMContext.class).toInstance(rm.getRMContext()); bind(ApplicationACLsManager.class).toInstance( rm.getApplicationACLsManager()); + bind(QueueACLsManager.class).toInstance(rm.getQueueACLsManager()); serve("/*").with(GuiceContainer.class); } }); @@ -653,13 +655,14 @@ public class TestRMWebServicesNodes exte WebServicesTestUtils.getXmlString(element, "healthReport"), WebServicesTestUtils.getXmlInt(element, "numContainers"), WebServicesTestUtils.getXmlLong(element, "usedMemoryMB"), - WebServicesTestUtils.getXmlLong(element, "availMemoryMB")); + WebServicesTestUtils.getXmlLong(element, "availMemoryMB"), + WebServicesTestUtils.getXmlString(element, "version")); } } public void verifyNodeInfo(JSONObject nodeInfo, MockNM nm) throws JSONException, Exception { - assertEquals("incorrect number of elements", 10, nodeInfo.length()); + assertEquals("incorrect number of elements", 11, nodeInfo.length()); verifyNodeInfoGeneric(nm, nodeInfo.getString("state"), nodeInfo.getString("rack"), @@ -667,14 +670,15 @@ public class TestRMWebServicesNodes exte nodeInfo.getString("nodeHTTPAddress"), nodeInfo.getLong("lastHealthUpdate"), nodeInfo.getString("healthReport"), nodeInfo.getInt("numContainers"), - nodeInfo.getLong("usedMemoryMB"), nodeInfo.getLong("availMemoryMB")); + nodeInfo.getLong("usedMemoryMB"), nodeInfo.getLong("availMemoryMB"), + nodeInfo.getString("version")); } public void verifyNodeInfoGeneric(MockNM nm, String state, String rack, String id, String nodeHostName, String nodeHTTPAddress, long lastHealthUpdate, String healthReport, - int numContainers, long usedMemoryMB, long availMemoryMB) + int numContainers, long usedMemoryMB, long availMemoryMB, String version) throws JSONException, Exception { RMNode node = rm.getRMContext().getRMNodes().get(nm.getNodeId()); @@ -693,6 +697,8 @@ public class TestRMWebServicesNodes exte + nm.getHttpPort(); WebServicesTestUtils.checkStringMatch("nodeHTTPAddress", expectedHttpAddress, nodeHTTPAddress); + WebServicesTestUtils.checkStringMatch("version", + node.getNodeManagerVersion(), version); long expectedHealthUpdate = node.getLastHealthReportTime(); assertEquals("lastHealthUpdate doesn't match, got: " + lastHealthUpdate