Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id DB0FA200C55 for ; Thu, 13 Apr 2017 20:14:08 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id D96CF160BA7; Thu, 13 Apr 2017 18:14:08 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id D3625160B89 for ; Thu, 13 Apr 2017 20:14:06 +0200 (CEST) Received: (qmail 85900 invoked by uid 500); 13 Apr 2017 18:14:06 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 85887 invoked by uid 99); 13 Apr 2017 18:14:05 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 13 Apr 2017 18:14:05 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id C65FBDFC00; Thu, 13 Apr 2017 18:14:05 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: wangda@apache.org To: common-commits@hadoop.apache.org Date: Thu, 13 Apr 2017 18:14:05 -0000 Message-Id: <88a95e6b5866474c8142ed8a4084d6a8@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [1/2] hadoop git commit: YARN-6040. Introduce api independent PendingAsk to replace usage of ResourceRequest within Scheduler classes. (wangda) archived-at: Thu, 13 Apr 2017 18:14:09 -0000 Repository: hadoop Updated Branches: refs/heads/branch-2 107f685f6 -> 408d23477 http://git-wip-us.apache.org/repos/asf/hadoop/blob/408d2347/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SchedulingPlacementSet.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SchedulingPlacementSet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SchedulingPlacementSet.java index 3cf5fa2..3e0620e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SchedulingPlacementSet.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SchedulingPlacementSet.java @@ -21,6 +21,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk; import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; import java.util.Collection; @@ -70,22 +72,38 @@ public interface SchedulingPlacementSet { Map getResourceRequests(); /** - * Get ResourceRequest by given schedulerKey and resourceName + * Get pending ask for given resourceName. If there's no such pendingAsk, + * returns {@link PendingAsk#ZERO} + * + * @param resourceName resourceName + * @return PendingAsk + */ + PendingAsk getPendingAsk(String resourceName); + + /** + * Get #pending-allocations for given resourceName. If there's no such + * pendingAsk, returns 0 + * * @param resourceName resourceName - * @return ResourceRequest + * @return #pending-allocations */ - ResourceRequest getResourceRequest(String resourceName); + int getOutstandingAsksCount(String resourceName); /** * Notify container allocated. * @param schedulerKey SchedulerRequestKey for this ResourceRequest * @param type Type of the allocation * @param node Which node this container allocated on - * @param request Which resource request to allocate * @return list of ResourceRequests deducted */ List allocate(SchedulerRequestKey schedulerKey, - NodeType type, SchedulerNode node, ResourceRequest request); + NodeType type, SchedulerNode node); + + /** + * Returns list of accepted resourceNames. + * @return Iterator of accepted resourceNames + */ + Iterator getAcceptedResouceNames(); /** * We can still have pending requirement for a given NodeType and node @@ -94,4 +112,47 @@ public interface SchedulingPlacementSet { * @return true if we has pending requirement */ boolean canAllocate(NodeType type, SchedulerNode node); + + /** + * Can delay to give locality? + * TODO (wangda): This should be moved out of SchedulingPlacementSet + * and should belong to specific delay scheduling policy impl. + * + * @param resourceName resourceName + * @return can/cannot + */ + boolean canDelayTo(String resourceName); + + /** + * Does this {@link SchedulingPlacementSet} accept resources on nodePartition? + * + * @param nodePartition nodePartition + * @param schedulingMode schedulingMode + * @return accepted/not + */ + boolean acceptNodePartition(String nodePartition, + SchedulingMode schedulingMode); + + /** + * It is possible that one request can accept multiple node partition, + * So this method returns primary node partition for pending resource / + * headroom calculation. + * + * @return primary requested node partition + */ + String getPrimaryRequestedNodePartition(); + + /** + * @return number of unique location asks with #pending greater than 0, + * (like /rack1, host1, etc.). + * + * TODO (wangda): This should be moved out of SchedulingPlacementSet + * and should belong to specific delay scheduling policy impl. + */ + int getUniqueLocationAsks(); + + /** + * Print human-readable requests to LOG debug. + */ + void showRequests(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/408d2347/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java index 89440dc..778d2a6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java @@ -75,6 +75,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEv import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType; import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; +import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.resource.Resources; import org.junit.Assert; @@ -702,12 +703,14 @@ public class TestAbstractYarnScheduler extends ParameterizedSchedulerTestBase { // The core part of this test // The killed containers' ResourceRequests are recovered back to the // original app-attempt, not the new one - for (ResourceRequest request : firstSchedulerAppAttempt - .getAppSchedulingInfo().getAllResourceRequests()) { - if (request.getPriority().getPriority() == 0) { - Assert.assertEquals(0, request.getNumContainers()); - } else if (request.getPriority().getPriority() == ALLOCATED_CONTAINER_PRIORITY) { - Assert.assertEquals(1, request.getNumContainers()); + for (SchedulerRequestKey key : firstSchedulerAppAttempt.getSchedulerKeys()) { + if (key.getPriority().getPriority() == 0) { + Assert.assertEquals(0, + firstSchedulerAppAttempt.getOutstandingAsksCount(key)); + } else if (key.getPriority().getPriority() == + ALLOCATED_CONTAINER_PRIORITY) { + Assert.assertEquals(1, + firstSchedulerAppAttempt.getOutstandingAsksCount(key)); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/408d2347/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java index 468e760..bb29889 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java @@ -141,7 +141,7 @@ public class TestAppSchedulingInfo { // iterate to verify no ConcurrentModificationException for (SchedulerRequestKey schedulerKey : info.getSchedulerKeys()) { - info.allocate(NodeType.OFF_SWITCH, null, schedulerKey, req1, null); + info.allocate(NodeType.OFF_SWITCH, null, schedulerKey, null); } Assert.assertEquals(1, info.getSchedulerKeys().size()); Assert.assertEquals(SchedulerRequestKey.create(req2), @@ -153,7 +153,7 @@ public class TestAppSchedulingInfo { reqs.add(req2); info.updateResourceRequests(reqs, false); info.allocate(NodeType.OFF_SWITCH, null, SchedulerRequestKey.create(req2), - req2, null); + null); Assert.assertEquals(0, info.getSchedulerKeys().size()); req1 = ResourceRequest.newInstance(pri1, http://git-wip-us.apache.org/repos/asf/hadoop/blob/408d2347/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java index 0a3075e..c5e5183 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java @@ -93,8 +93,7 @@ public class TestSchedulerApplicationAttempt { app.liveContainers.put(container1.getContainerId(), container1); SchedulerNode node = createNode(); app.appSchedulingInfo.allocate(NodeType.OFF_SWITCH, node, - toSchedulerKey(requestedPriority), - request, container1.getContainer()); + toSchedulerKey(requestedPriority), container1.getContainer()); // Reserved container Priority prio1 = Priority.newInstance(1); http://git-wip-us.apache.org/repos/asf/hadoop/blob/408d2347/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index cce58b1..c1255d8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -188,7 +188,7 @@ public class TestCapacityScheduler { private ResourceManager resourceManager = null; private RMContext mockContext; - + @Before public void setUp() throws Exception { resourceManager = new ResourceManager() { @@ -199,11 +199,11 @@ public class TestCapacityScheduler { return mgr; } }; - CapacitySchedulerConfiguration csConf + CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(); setupQueueConfiguration(csConf); YarnConfiguration conf = new YarnConfiguration(csConf); - conf.setClass(YarnConfiguration.RM_SCHEDULER, + conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class); resourceManager.init(conf); resourceManager.getRMContext().getContainerTokenSecretManager().rollMasterKey(); @@ -263,7 +263,7 @@ public class TestCapacityScheduler { new org.apache.hadoop.yarn.server.resourcemanager.NodeManager( hostName, containerManagerPort, httpPort, rackName, capability, resourceManager); - NodeAddedSchedulerEvent nodeAddEvent1 = + NodeAddedSchedulerEvent nodeAddEvent1 = new NodeAddedSchedulerEvent(resourceManager.getRMContext() .getRMNodes().get(nm.getNodeId())); resourceManager.getResourceScheduler().handle(nodeAddEvent1); @@ -274,89 +274,89 @@ public class TestCapacityScheduler { public void testCapacityScheduler() throws Exception { LOG.info("--- START: testCapacityScheduler ---"); - + // Register node1 String host_0 = "host_0"; - org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = - registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(4 * GB, 1)); - + org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = + registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, + Resources.createResource(4 * GB, 1)); + // Register node2 String host_1 = "host_1"; - org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 = - registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(2 * GB, 1)); + org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 = + registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, + Resources.createResource(2 * GB, 1)); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); Priority priority_1 = Priority.newInstance(1); - + // Submit an application Application application_0 = new Application("user_0", "a1", resourceManager); application_0.submit(); - + application_0.addNodeManager(host_0, 1234, nm_0); application_0.addNodeManager(host_1, 1234, nm_1); Resource capability_0_0 = Resources.createResource(1 * GB, 1); application_0.addResourceRequestSpec(priority_1, capability_0_0); - + Resource capability_0_1 = Resources.createResource(2 * GB, 1); application_0.addResourceRequestSpec(priority_0, capability_0_1); - Task task_0_0 = new Task(application_0, priority_1, + Task task_0_0 = new Task(application_0, priority_1, new String[] {host_0, host_1}); application_0.addTask(task_0_0); - + // Submit another application Application application_1 = new Application("user_1", "b2", resourceManager); application_1.submit(); - + application_1.addNodeManager(host_0, 1234, nm_0); application_1.addNodeManager(host_1, 1234, nm_1); - + Resource capability_1_0 = Resources.createResource(3 * GB, 1); application_1.addResourceRequestSpec(priority_1, capability_1_0); - + Resource capability_1_1 = Resources.createResource(2 * GB, 1); application_1.addResourceRequestSpec(priority_0, capability_1_1); - Task task_1_0 = new Task(application_1, priority_1, + Task task_1_0 = new Task(application_1, priority_1, new String[] {host_0, host_1}); application_1.addTask(task_1_0); - + // Send resource requests to the scheduler application_0.schedule(); application_1.schedule(); // Send a heartbeat to kick the tires on the Scheduler LOG.info("Kick!"); - + // task_0_0 and task_1_0 allocated, used=4G nodeUpdate(nm_0); - + // nothing allocated nodeUpdate(nm_1); - + // Get allocations from the scheduler application_0.schedule(); // task_0_0 checkApplicationResourceUsage(1 * GB, application_0); application_1.schedule(); // task_1_0 checkApplicationResourceUsage(3 * GB, application_1); - + checkNodeResourceUsage(4*GB, nm_0); // task_0_0 (1G) and task_1_0 (3G) checkNodeResourceUsage(0*GB, nm_1); // no tasks, 2G available LOG.info("Adding new tasks..."); - - Task task_1_1 = new Task(application_1, priority_0, + + Task task_1_1 = new Task(application_1, priority_0, new String[] {ResourceRequest.ANY}); application_1.addTask(task_1_1); application_1.schedule(); - Task task_0_1 = new Task(application_0, priority_0, + Task task_0_1 = new Task(application_0, priority_0, new String[] {host_0, host_1}); application_0.addTask(task_0_1); @@ -366,11 +366,11 @@ public class TestCapacityScheduler { LOG.info("Sending hb from " + nm_0.getHostName()); // nothing new, used=4G nodeUpdate(nm_0); - + LOG.info("Sending hb from " + nm_1.getHostName()); // task_0_1 is prefer as locality, used=2G nodeUpdate(nm_1); - + // Get allocations from the scheduler LOG.info("Trying to allocate..."); application_0.schedule(); @@ -378,10 +378,10 @@ public class TestCapacityScheduler { application_1.schedule(); checkApplicationResourceUsage(5 * GB, application_1); - + nodeUpdate(nm_0); nodeUpdate(nm_1); - + checkNodeResourceUsage(4*GB, nm_0); checkNodeResourceUsage(2*GB, nm_1); @@ -408,20 +408,20 @@ public class TestCapacityScheduler { */ private CapacitySchedulerConfiguration setupQueueConfiguration( CapacitySchedulerConfiguration conf) { - + // Define top-level queues conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b"}); conf.setCapacity(A, A_CAPACITY); conf.setCapacity(B, B_CAPACITY); - + // Define 2nd-level queues conf.setQueues(A, new String[] {"a1", "a2"}); conf.setCapacity(A1, A1_CAPACITY); conf.setUserLimitFactor(A1, 100.0f); conf.setCapacity(A2, A2_CAPACITY); conf.setUserLimitFactor(A2, 100.0f); - + conf.setQueues(B, new String[] {"b1", "b2", "b3"}); conf.setCapacity(B1, B1_CAPACITY); conf.setUserLimitFactor(B1, 100.0f); @@ -596,8 +596,8 @@ public class TestCapacityScheduler { conf.setMaximumCapacity(A, -1); assertEquals(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY_VALUE,conf.getNonLabeledQueueMaximumCapacity(A),delta); } - - + + @Test public void testRefreshQueues() throws Exception { CapacityScheduler cs = new CapacityScheduler(); @@ -682,11 +682,11 @@ public class TestCapacityScheduler { return null; } - private void checkApplicationResourceUsage(int expected, + private void checkApplicationResourceUsage(int expected, Application application) { Assert.assertEquals(expected, application.getUsedResources().getMemorySize()); } - + private void checkNodeResourceUsage(int expected, org.apache.hadoop.yarn.server.resourcemanager.NodeManager node) { Assert.assertEquals(expected, node.getUsed().getMemorySize()); @@ -767,7 +767,7 @@ public class TestCapacityScheduler { // Add a new queue b4 String B4 = B + ".b4"; float B4_CAPACITY = 10; - + B3_CAPACITY -= B4_CAPACITY; try { conf.setCapacity(A, 80f); @@ -779,7 +779,7 @@ public class TestCapacityScheduler { conf.setCapacity(B4, B4_CAPACITY); cs.reinitialize(conf,mockContext); checkQueueCapacities(cs, 80f, 20f); - + // Verify parent for B4 CSQueue rootQueue = cs.getRootQueue(); CSQueue queueB = findQueue(rootQueue, B); @@ -997,7 +997,7 @@ public class TestCapacityScheduler { ResourceScheduler.class); MockRM rm = new MockRM(conf); rm.start(); - + MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * GB); RMApp app1 = rm.submitApp(2048); // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm1 @@ -1027,7 +1027,7 @@ public class TestCapacityScheduler { Assert.assertEquals(1, allocated1.size()); Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemorySize()); Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId()); - + report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); // check node report, 4 GB used and 0 GB available Assert.assertEquals(0, report_nm1.getAvailableResource().getMemorySize()); @@ -1036,13 +1036,13 @@ public class TestCapacityScheduler { // check container is assigned with 2 GB. Container c1 = allocated1.get(0); Assert.assertEquals(2 * GB, c1.getResource().getMemorySize()); - + // update node resource to 2 GB, so resource is over-consumed. - Map nodeResourceMap = + Map nodeResourceMap = new HashMap(); - nodeResourceMap.put(nm1.getNodeId(), + nodeResourceMap.put(nm1.getNodeId(), ResourceOption.newInstance(Resource.newInstance(2 * GB, 1), -1)); - UpdateNodeResourceRequest request = + UpdateNodeResourceRequest request = UpdateNodeResourceRequest.newInstance(nodeResourceMap); AdminService as = ((MockRM)rm).getAdminService(); as.updateNodeResource(request); @@ -1061,7 +1061,7 @@ public class TestCapacityScheduler { report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemorySize()); Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemorySize()); - + // Check container can complete successfully in case of resource over-commitment. ContainerStatus containerStatus = BuilderUtils.newContainerStatus( c1.getId(), ContainerState.COMPLETE, "", 0, c1.getResource()); @@ -1079,7 +1079,7 @@ public class TestCapacityScheduler { Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemorySize()); // As container return 2 GB back, the available resource becomes 0 again. Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemorySize()); - + // Verify no NPE is trigger in schedule after resource is updated. am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 3 * GB, 1, 1); alloc1Response = am1.schedule(); @@ -1097,7 +1097,7 @@ public class TestCapacityScheduler { 0, alloc1Response.getAllocatedContainers().size()); rm.stop(); } - + @Test public void testGetAppsInQueue() throws Exception { Application application_0 = new Application("user_0", "a1", resourceManager); @@ -1145,7 +1145,7 @@ public class TestCapacityScheduler { cs.getSchedulerApplications(), cs, "a1"); Assert.assertEquals("a1", app.getQueue().getQueueName()); } - + @Test public void testAsyncScheduling() throws Exception { Configuration conf = new Configuration(); @@ -1156,7 +1156,7 @@ public class TestCapacityScheduler { CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); final int NODES = 100; - + // Register nodes for (int i=0; i < NODES; ++i) { String host = "192.168.1." + i; @@ -1164,7 +1164,7 @@ public class TestCapacityScheduler { MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1, host); cs.handle(new NodeAddedSchedulerEvent(node)); } - + // Now directly exercise the scheduling loop for (int i=0; i < NODES; ++i) { CapacityScheduler.schedule(cs); @@ -1186,7 +1186,7 @@ public class TestCapacityScheduler { && attemptPM.getResourcePreempted().equals(currentAttemptPreempted) && app.getCurrentAppAttempt().getRMAppAttemptMetrics() .getIsPreempted() == currentAttemptAMPreempted - && attemptPM.getNumNonAMContainersPreempted() == + && attemptPM.getNumNonAMContainersPreempted() == numLatestAttemptTaskPreempted) { return; } @@ -1200,7 +1200,7 @@ public class TestCapacityScheduler { Thread.sleep(500); } } - + @Test(timeout = 30000) public void testAllocateDoesNotBlockOnSchedulerLock() throws Exception { final YarnConfiguration conf = new YarnConfiguration(); @@ -1419,7 +1419,7 @@ public class TestCapacityScheduler { rm1.stop(); } - + @Test(timeout = 300000) public void testRecoverRequestAfterPreemption() throws Exception { Configuration conf = new Configuration(); @@ -1453,8 +1453,9 @@ public class TestCapacityScheduler { // Already the node local resource request is cleared from RM after // allocation. - Assert.assertNull(app.getResourceRequest( - SchedulerRequestKey.create(request), request.getResourceName())); + Assert.assertEquals(0, + app.getOutstandingAsksCount(SchedulerRequestKey.create(request), + request.getResourceName())); } // Call killContainer to preempt the container @@ -1464,10 +1465,9 @@ public class TestCapacityScheduler { for (ResourceRequest request : requests) { // Resource request must have added back in RM after preempt event // handling. - Assert.assertEquals( - 1, - app.getResourceRequest(SchedulerRequestKey.create(request), - request.getResourceName()).getNumContainers()); + Assert.assertEquals(1, + app.getOutstandingAsksCount(SchedulerRequestKey.create(request), + request.getResourceName())); } // New container will be allocated and will move to ALLOCATED state @@ -2932,7 +2932,7 @@ public class TestCapacityScheduler { assertEquals("queue B2 max vcores allocation", 12, ((LeafQueue) queueB2).getMaximumAllocation().getVirtualCores()); } - + private void waitContainerAllocated(MockAM am, int mem, int nContainer, int startContainerId, MockRM rm, MockNM nm) throws Exception { for (int cId = startContainerId; cId < startContainerId + nContainer; cId++) { @@ -2966,44 +2966,44 @@ public class TestCapacityScheduler { MockNM nm1 = new MockNM("127.0.0.1:1234", 100 * GB, rm1.getResourceTrackerService()); nm1.registerNode(); - + RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "b1"); MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); - + waitContainerAllocated(am1, 1 * GB, 1, 2, rm1, nm1); // Maximum resoure of b1 is 100 * 0.895 * 0.792 = 71 GB // 2 GBs used by am, so it's 71 - 2 = 69G. Assert.assertEquals(69 * GB, am1.doHeartbeat().getAvailableResources().getMemorySize()); - + RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "b2"); MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1); - + // Allocate 5 containers, each one is 8 GB in am2 (40 GB in total) waitContainerAllocated(am2, 8 * GB, 5, 2, rm1, nm1); - + // Allocated one more container with 1 GB resource in b1 waitContainerAllocated(am1, 1 * GB, 1, 3, rm1, nm1); - + // Total is 100 GB, // B2 uses 41 GB (5 * 8GB containers and 1 AM container) // B1 uses 3 GB (2 * 1GB containers and 1 AM container) // Available is 100 - 41 - 3 = 56 GB Assert.assertEquals(56 * GB, am1.doHeartbeat().getAvailableResources().getMemorySize()); - + // Now we submit app3 to a1 (in higher level hierarchy), to see if headroom // of app1 (in queue b1) updated correctly RMApp app3 = rm1.submitApp(1 * GB, "app", "user", null, "a1"); MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm1); - + // Allocate 3 containers, each one is 8 GB in am3 (24 GB in total) waitContainerAllocated(am3, 8 * GB, 3, 2, rm1, nm1); - + // Allocated one more container with 4 GB resource in b1 waitContainerAllocated(am1, 1 * GB, 1, 4, rm1, nm1); - + // Total is 100 GB, // B2 uses 41 GB (5 * 8GB containers and 1 AM container) // B1 uses 4 GB (3 * 1GB containers and 1 AM container) @@ -3012,7 +3012,7 @@ public class TestCapacityScheduler { Assert.assertEquals(30 * GB, am1.doHeartbeat().getAvailableResources().getMemorySize()); } - + @Test public void testParentQueueMaxCapsAreRespected() throws Exception { /* @@ -3028,7 +3028,7 @@ public class TestCapacityScheduler { csConf.setCapacity(A, 50); csConf.setMaximumCapacity(A, 50); csConf.setCapacity(B, 50); - + // Define 2nd-level queues csConf.setQueues(A, new String[] {"a1", "a2"}); csConf.setCapacity(A1, 50); @@ -3037,7 +3037,7 @@ public class TestCapacityScheduler { csConf.setUserLimitFactor(A2, 100.0f); csConf.setCapacity(B1, B1_CAPACITY); csConf.setUserLimitFactor(B1, 100.0f); - + YarnConfiguration conf = new YarnConfiguration(csConf); conf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true); @@ -3048,12 +3048,12 @@ public class TestCapacityScheduler { MockNM nm1 = new MockNM("127.0.0.1:1234", 24 * GB, rm1.getResourceTrackerService()); nm1.registerNode(); - + // Launch app1 in a1, resource usage is 1GB (am) + 4GB * 2 = 9GB RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "a1"); MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); waitContainerAllocated(am1, 4 * GB, 2, 2, rm1, nm1); - + // Try to launch app2 in a2, asked 2GB, should success RMApp app2 = rm1.submitApp(2 * GB, "app", "user", null, "a2"); MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1); @@ -3070,24 +3070,24 @@ public class TestCapacityScheduler { Assert.fail("Shouldn't successfully allocate containers for am2, " + "queue-a's max capacity will be violated if container allocated"); } - + @SuppressWarnings("unchecked") private Set toSet(E... elements) { Set set = Sets.newHashSet(elements); return set; } - + @Test public void testQueueHierarchyPendingResourceUpdate() throws Exception { Configuration conf = TestUtils.getConfigurationWithQueueLabels(new Configuration(false)); conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true); - + final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager(); mgr.init(conf); mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y")); mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"))); - + MemoryRMStateStore memStore = new MemoryRMStateStore(); memStore.init(conf); MockRM rm = new MockRM(conf, memStore) { @@ -3095,74 +3095,74 @@ public class TestCapacityScheduler { return mgr; } }; - + rm.start(); MockNM nm1 = // label = x new MockNM("h1:1234", 200 * GB, rm.getResourceTrackerService()); nm1.registerNode(); - + MockNM nm2 = // label = "" new MockNM("h2:1234", 200 * GB, rm.getResourceTrackerService()); nm2.registerNode(); - + // Launch app1 in queue=a1 RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "a1"); MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2); - + // Launch app2 in queue=b1 RMApp app2 = rm.submitApp(8 * GB, "app", "user", null, "b1"); MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm2); - + // am1 asks for 8 * 1GB container for no label am1.allocate(Arrays.asList(ResourceRequest.newInstance( Priority.newInstance(1), "*", Resources.createResource(1 * GB), 8)), null); - + checkPendingResource(rm, "a1", 8 * GB, null); checkPendingResource(rm, "a", 8 * GB, null); checkPendingResource(rm, "root", 8 * GB, null); - + // am2 asks for 8 * 1GB container for no label am2.allocate(Arrays.asList(ResourceRequest.newInstance( Priority.newInstance(1), "*", Resources.createResource(1 * GB), 8)), null); - + checkPendingResource(rm, "a1", 8 * GB, null); checkPendingResource(rm, "a", 8 * GB, null); checkPendingResource(rm, "b1", 8 * GB, null); checkPendingResource(rm, "b", 8 * GB, null); // root = a + b checkPendingResource(rm, "root", 16 * GB, null); - + // am2 asks for 8 * 1GB container in another priority for no label am2.allocate(Arrays.asList(ResourceRequest.newInstance( Priority.newInstance(2), "*", Resources.createResource(1 * GB), 8)), null); - + checkPendingResource(rm, "a1", 8 * GB, null); checkPendingResource(rm, "a", 8 * GB, null); checkPendingResource(rm, "b1", 16 * GB, null); checkPendingResource(rm, "b", 16 * GB, null); // root = a + b checkPendingResource(rm, "root", 24 * GB, null); - + // am1 asks 4 GB resource instead of 8 * GB for priority=1 am1.allocate(Arrays.asList(ResourceRequest.newInstance( Priority.newInstance(1), "*", Resources.createResource(4 * GB), 1)), null); - + checkPendingResource(rm, "a1", 4 * GB, null); checkPendingResource(rm, "a", 4 * GB, null); checkPendingResource(rm, "b1", 16 * GB, null); checkPendingResource(rm, "b", 16 * GB, null); // root = a + b checkPendingResource(rm, "root", 20 * GB, null); - + // am1 asks 8 * GB resource which label=x am1.allocate(Arrays.asList(ResourceRequest.newInstance( Priority.newInstance(2), "*", Resources.createResource(8 * GB), 1, true, "x")), null); - + checkPendingResource(rm, "a1", 4 * GB, null); checkPendingResource(rm, "a", 4 * GB, null); checkPendingResource(rm, "a1", 8 * GB, "x"); @@ -3172,7 +3172,7 @@ public class TestCapacityScheduler { // root = a + b checkPendingResource(rm, "root", 20 * GB, null); checkPendingResource(rm, "root", 8 * GB, "x"); - + // some containers allocated for am1, pending resource should decrease ContainerId containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); @@ -3181,7 +3181,7 @@ public class TestCapacityScheduler { containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 3); Assert.assertTrue(rm.waitForState(nm2, containerId, RMContainerState.ALLOCATED)); - + checkPendingResource(rm, "a1", 0 * GB, null); checkPendingResource(rm, "a", 0 * GB, null); checkPendingResource(rm, "a1", 0 * GB, "x"); @@ -3193,23 +3193,23 @@ public class TestCapacityScheduler { // root = a + b checkPendingResourceGreaterThanZero(rm, "root", null); checkPendingResource(rm, "root", 0 * GB, "x"); - + // complete am2, pending resource should be 0 now AppAttemptRemovedSchedulerEvent appRemovedEvent = new AppAttemptRemovedSchedulerEvent( am2.getApplicationAttemptId(), RMAppAttemptState.FINISHED, false); rm.getResourceScheduler().handle(appRemovedEvent); - + checkPendingResource(rm, "a1", 0 * GB, null); checkPendingResource(rm, "a", 0 * GB, null); checkPendingResource(rm, "a1", 0 * GB, "x"); - checkPendingResource(rm, "a", 0 * GB, "x"); + checkPendingResource(rm, "a", 0 * GB, "x"); checkPendingResource(rm, "b1", 0 * GB, null); checkPendingResource(rm, "b", 0 * GB, null); checkPendingResource(rm, "root", 0 * GB, null); checkPendingResource(rm, "root", 0 * GB, "x"); } - + private void checkPendingResource(MockRM rm, String queueName, int memory, String label) { CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); @@ -3247,7 +3247,7 @@ public class TestCapacityScheduler { Resource minAllocResource = Resource.newInstance(minAllocMb, 1); String queueName = "a1"; RMApp rmApp = rm.submitApp(amMemory, "app-1", "user_0", null, queueName); - + assertEquals("RMApp does not containes minimum allocation", minAllocResource, rmApp.getAMResourceRequests().get(0).getCapability()); @@ -3479,7 +3479,7 @@ public class TestCapacityScheduler { DominantResourceCalculator.class.getName()); verifyAMLimitForLeafQueue(config); } - + private FiCaSchedulerApp getFiCaSchedulerApp(MockRM rm, ApplicationId appId) { CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); @@ -3492,10 +3492,10 @@ public class TestCapacityScheduler { Configuration conf = TestUtils.getConfigurationWithQueueLabels(new Configuration(false)); conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true); - + final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager(); mgr.init(conf); - + MemoryRMStateStore memStore = new MemoryRMStateStore(); memStore.init(conf); MockRM rm = new MockRM(conf, memStore) { @@ -3503,17 +3503,17 @@ public class TestCapacityScheduler { return mgr; } }; - + rm.start(); - + MockNM nm1 = // label = "" new MockNM("h1:1234", 200 * GB, rm.getResourceTrackerService()); nm1.registerNode(); - + // Launch app1 in queue=a1 RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "a1"); MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); - + // Allocate two more containers am1.allocate( Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1), @@ -3542,15 +3542,15 @@ public class TestCapacityScheduler { .newInstance(0, containerId1, ContainerUpdateType.INCREASE_RESOURCE, Resources.createResource(3 * GB), null))); - + FiCaSchedulerApp app = getFiCaSchedulerApp(rm, app1.getApplicationId()); - + Assert.assertEquals(2 * GB, app.getAppAttemptResourceUsage().getPending().getMemorySize()); checkPendingResource(rm, "a1", 2 * GB, null); checkPendingResource(rm, "a", 2 * GB, null); checkPendingResource(rm, "root", 2 * GB, null); - + // am1 asks to change containerId2 (2G -> 3G) and containerId3 (2G -> 5G) am1.sendContainerResizingRequest(Arrays.asList( UpdateContainerRequest @@ -3561,13 +3561,13 @@ public class TestCapacityScheduler { .newInstance(0, containerId3, ContainerUpdateType.INCREASE_RESOURCE, Resources.createResource(5 * GB), null))); - + Assert.assertEquals(6 * GB, app.getAppAttemptResourceUsage().getPending().getMemorySize()); checkPendingResource(rm, "a1", 6 * GB, null); checkPendingResource(rm, "a", 6 * GB, null); checkPendingResource(rm, "root", 6 * GB, null); - + // am1 asks to change containerId1 (1G->3G), containerId2 (2G -> 4G) and // containerId3 (2G -> 2G) am1.sendContainerResizingRequest(Arrays.asList( @@ -3650,7 +3650,7 @@ public class TestCapacityScheduler { + CapacitySchedulerConfiguration.MAXIMUM_ALLOCATION_VCORES; conf.setInt(propName, maxAllocVcores); } - + private void sentRMContainerLaunched(MockRM rm, ContainerId containerId) { CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); RMContainer rmContainer = cs.getRMContainer(containerId); http://git-wip-us.apache.org/repos/asf/hadoop/blob/408d2347/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index ec2b987..0c8f1b2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -23,7 +23,6 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -1054,9 +1053,13 @@ public class TestLeafQueue { //test case 3 qb.finishApplication(app_0.getApplicationId(), user_0); qb.finishApplication(app_2.getApplicationId(), user_1); - qb.releaseResource(clusterResource, app_0, app_0.getResource(u0SchedKey), + qb.releaseResource(clusterResource, app_0, + app_0.getAppSchedulingInfo().getPendingAsk(u0SchedKey) + .getPerAllocationResource(), null, null, false); - qb.releaseResource(clusterResource, app_2, app_2.getResource(u1SchedKey), + qb.releaseResource(clusterResource, app_2, + app_2.getAppSchedulingInfo().getPendingAsk(u1SchedKey) + .getPerAllocationResource(), null, null, false); qb.setUserLimit(50); @@ -1954,7 +1957,7 @@ public class TestLeafQueue { applyCSAssignment(clusterResource, assignment, a, nodes, apps); verifyNoContainerAllocated(assignment); assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey)); - assertEquals(3, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey)); assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // None->NODE_LOCAL // Another off switch, shouldn't allocate due to delay scheduling @@ -1963,7 +1966,7 @@ public class TestLeafQueue { applyCSAssignment(clusterResource, assignment, a, nodes, apps); verifyNoContainerAllocated(assignment); assertEquals(2, app_0.getSchedulingOpportunities(schedulerKey)); - assertEquals(3, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey)); assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // None->NODE_LOCAL // Another off switch, shouldn't allocate due to delay scheduling @@ -1972,7 +1975,7 @@ public class TestLeafQueue { applyCSAssignment(clusterResource, assignment, a, nodes, apps); verifyNoContainerAllocated(assignment); assertEquals(3, app_0.getSchedulingOpportunities(schedulerKey)); - assertEquals(3, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey)); assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // None->NODE_LOCAL // Another off switch, now we should allocate @@ -1983,7 +1986,7 @@ public class TestLeafQueue { verifyContainerAllocated(assignment, NodeType.OFF_SWITCH); // should NOT reset assertEquals(4, app_0.getSchedulingOpportunities(schedulerKey)); - assertEquals(2, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(2, app_0.getOutstandingAsksCount(schedulerKey)); // NODE_LOCAL - node_0 assignment = a.assignContainers(clusterResource, node_0, @@ -1992,7 +1995,7 @@ public class TestLeafQueue { verifyContainerAllocated(assignment, NodeType.NODE_LOCAL); // should reset assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); - assertEquals(1, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey)); // NODE_LOCAL - node_1 assignment = a.assignContainers(clusterResource, node_1, @@ -2001,7 +2004,7 @@ public class TestLeafQueue { verifyContainerAllocated(assignment, NodeType.NODE_LOCAL); // should reset assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); - assertEquals(0, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey)); assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // Add 1 more request to check for RACK_LOCAL @@ -2016,7 +2019,7 @@ public class TestLeafQueue { TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 4, // one extra true, priority, recordFactory)); app_0.updateResourceRequests(app_0_requests_0); - assertEquals(4, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(4, app_0.getOutstandingAsksCount(schedulerKey)); // Rack-delay doReturn(true).when(a).getRackLocalityFullReset(); @@ -2027,7 +2030,7 @@ public class TestLeafQueue { new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); applyCSAssignment(clusterResource, assignment, a, nodes, apps); assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey)); - assertEquals(4, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(4, app_0.getOutstandingAsksCount(schedulerKey)); // Should assign RACK_LOCAL now assignment = a.assignContainers(clusterResource, node_3, @@ -2036,14 +2039,14 @@ public class TestLeafQueue { verifyContainerAllocated(assignment, NodeType.RACK_LOCAL); // should reset assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); - assertEquals(3, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey)); // Shouldn't assign RACK_LOCAL because schedulingOpportunities should have gotten reset. assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); applyCSAssignment(clusterResource, assignment, a, nodes, apps); assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey)); - assertEquals(3, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey)); // Next time we schedule RACK_LOCAL, don't reset doReturn(false).when(a).getRackLocalityFullReset(); @@ -2055,7 +2058,7 @@ public class TestLeafQueue { verifyContainerAllocated(assignment, NodeType.RACK_LOCAL); // should NOT reset assertEquals(2, app_0.getSchedulingOpportunities(schedulerKey)); - assertEquals(2, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(2, app_0.getOutstandingAsksCount(schedulerKey)); // Another RACK_LOCAL since schedulingOpportunities not reset assignment = a.assignContainers(clusterResource, node_3, @@ -2064,7 +2067,7 @@ public class TestLeafQueue { verifyContainerAllocated(assignment, NodeType.RACK_LOCAL); // should NOT reset assertEquals(3, app_0.getSchedulingOpportunities(schedulerKey)); - assertEquals(1, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey)); // Add a request larger than cluster size to verify // OFF_SWITCH delay is capped by cluster size @@ -2170,7 +2173,7 @@ public class TestLeafQueue { CSAssignment assignment = null; SchedulerRequestKey schedulerKey = toSchedulerKey(priority); - assertEquals(3, app1.getTotalRequiredResources(schedulerKey)); + assertEquals(3, app1.getPendingAsk(schedulerKey, ResourceRequest.ANY).getCount()); // No rack-local yet. assignment = a.assignContainers(clusterResource, node2, @@ -2179,7 +2182,8 @@ public class TestLeafQueue { applyCSAssignment(clusterResource, assignment, a, nodes, apps); verifyNoContainerAllocated(assignment); assertEquals(1, app1.getSchedulingOpportunities(schedulerKey)); - assertEquals(3, app1.getTotalRequiredResources(schedulerKey)); + assertEquals(3, + app1.getPendingAsk(schedulerKey, ResourceRequest.ANY).getCount()); assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // None->NODE_LOCAL // Still no rack-local. @@ -2188,7 +2192,8 @@ public class TestLeafQueue { SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); applyCSAssignment(clusterResource, assignment, a, nodes, apps); assertEquals(2, app1.getSchedulingOpportunities(schedulerKey)); - assertEquals(3, app1.getTotalRequiredResources(schedulerKey)); + assertEquals(3, + app1.getPendingAsk(schedulerKey, ResourceRequest.ANY).getCount()); assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // None->NODE_LOCAL // Rack local now. @@ -2197,7 +2202,8 @@ public class TestLeafQueue { SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); applyCSAssignment(clusterResource, assignment, a, nodes, apps); assertEquals(0, app1.getSchedulingOpportunities(schedulerKey)); - assertEquals(2, app1.getTotalRequiredResources(schedulerKey)); + assertEquals(2, + app1.getPendingAsk(schedulerKey, ResourceRequest.ANY).getCount()); assertEquals(NodeType.RACK_LOCAL, assignment.getType()); // No off-switch until 3 missed opportunities. @@ -2214,7 +2220,8 @@ public class TestLeafQueue { SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); applyCSAssignment(clusterResource, assignment, a, nodes, apps); assertEquals(3, app1.getSchedulingOpportunities(schedulerKey)); - assertEquals(2, app1.getTotalRequiredResources(schedulerKey)); + assertEquals(2, + app1.getPendingAsk(schedulerKey, ResourceRequest.ANY).getCount()); assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // None->NODE_LOCAL // Now off-switch should succeed. @@ -2223,7 +2230,8 @@ public class TestLeafQueue { SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); applyCSAssignment(clusterResource, assignment, a, nodes, apps); assertEquals(4, app1.getSchedulingOpportunities(schedulerKey)); - assertEquals(1, app1.getTotalRequiredResources(schedulerKey)); + assertEquals(1, + app1.getPendingAsk(schedulerKey, ResourceRequest.ANY).getCount()); assertEquals(NodeType.OFF_SWITCH, assignment.getType()); // Check capping by number of cluster nodes. @@ -2235,14 +2243,16 @@ public class TestLeafQueue { SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); applyCSAssignment(clusterResource, assignment, a, nodes, apps); assertEquals(5, app1.getSchedulingOpportunities(schedulerKey)); - assertEquals(1, app1.getTotalRequiredResources(schedulerKey)); + assertEquals(1, + app1.getPendingAsk(schedulerKey, ResourceRequest.ANY).getCount()); assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // None->NODE_LOCAL assignment = a.assignContainers(clusterResource, node3, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); applyCSAssignment(clusterResource, assignment, a, nodes, apps); assertEquals(6, app1.getSchedulingOpportunities(schedulerKey)); - assertEquals(0, app1.getTotalRequiredResources(schedulerKey)); + assertEquals(0, + app1.getPendingAsk(schedulerKey, ResourceRequest.ANY).getCount()); assertEquals(NodeType.OFF_SWITCH, assignment.getType()); } @@ -2331,9 +2341,9 @@ public class TestLeafQueue { applyCSAssignment(clusterResource, assignment, a, nodes, apps); verifyNoContainerAllocated(assignment); assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey1)); - assertEquals(2, app_0.getTotalRequiredResources(schedulerKey1)); + assertEquals(2, app_0.getOutstandingAsksCount(schedulerKey1)); assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2)); - assertEquals(1, app_0.getTotalRequiredResources(schedulerKey2)); + assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey2)); // Another off-switch, shouldn't allocate P1 due to delay scheduling // thus, no P2 either! @@ -2342,9 +2352,9 @@ public class TestLeafQueue { applyCSAssignment(clusterResource, assignment, a, nodes, apps); verifyNoContainerAllocated(assignment); assertEquals(2, app_0.getSchedulingOpportunities(schedulerKey1)); - assertEquals(2, app_0.getTotalRequiredResources(schedulerKey1)); + assertEquals(2, app_0.getOutstandingAsksCount(schedulerKey1)); assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2)); - assertEquals(1, app_0.getTotalRequiredResources(schedulerKey2)); + assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey2)); // Another off-switch, shouldn't allocate OFF_SWITCH P1 assignment = a.assignContainers(clusterResource, node_2, @@ -2352,9 +2362,9 @@ public class TestLeafQueue { applyCSAssignment(clusterResource, assignment, a, nodes, apps); verifyContainerAllocated(assignment, NodeType.OFF_SWITCH); assertEquals(3, app_0.getSchedulingOpportunities(schedulerKey1)); - assertEquals(1, app_0.getTotalRequiredResources(schedulerKey1)); + assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey1)); assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2)); - assertEquals(1, app_0.getTotalRequiredResources(schedulerKey2)); + assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey2)); // Now, DATA_LOCAL for P1 assignment = a.assignContainers(clusterResource, node_0, @@ -2362,9 +2372,9 @@ public class TestLeafQueue { applyCSAssignment(clusterResource, assignment, a, nodes, apps); verifyContainerAllocated(assignment, NodeType.NODE_LOCAL); assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey1)); - assertEquals(0, app_0.getTotalRequiredResources(schedulerKey1)); + assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey1)); assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2)); - assertEquals(1, app_0.getTotalRequiredResources(schedulerKey2)); + assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey2)); // Now, OFF_SWITCH for P2 assignment = a.assignContainers(clusterResource, node_1, @@ -2372,9 +2382,9 @@ public class TestLeafQueue { applyCSAssignment(clusterResource, assignment, a, nodes, apps); verifyContainerAllocated(assignment, NodeType.OFF_SWITCH); assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey1)); - assertEquals(0, app_0.getTotalRequiredResources(schedulerKey1)); + assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey1)); assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey2)); - assertEquals(0, app_0.getTotalRequiredResources(schedulerKey2)); + assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey2)); } @@ -2455,7 +2465,7 @@ public class TestLeafQueue { verifyContainerAllocated(assignment, NodeType.NODE_LOCAL); assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); // should reset - assertEquals(0, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey)); // No allocation on node_1_0 even though it's node/rack local since // required(ANY) == 0 @@ -2466,7 +2476,7 @@ public class TestLeafQueue { // Still zero // since #req=0 assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); - assertEquals(0, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey)); // Add one request app_0_requests_0.clear(); @@ -2482,7 +2492,7 @@ public class TestLeafQueue { applyCSAssignment(clusterResource, assignment, a, nodes, apps); verifyNoContainerAllocated(assignment); assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey)); - assertEquals(1, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey)); // NODE_LOCAL - node_1 assignment = a.assignContainers(clusterResource, node_1_0, @@ -2491,7 +2501,7 @@ public class TestLeafQueue { verifyContainerAllocated(assignment, NodeType.NODE_LOCAL); // should reset assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); - assertEquals(0, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey)); } @Test (timeout = 30000) @@ -2870,7 +2880,7 @@ public class TestLeafQueue { applyCSAssignment(clusterResource, assignment, a, nodes, apps); verifyNoContainerAllocated(assignment); assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); - assertEquals(1, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey)); // Now sanity-check node_local app_0_requests_0.add( @@ -2901,7 +2911,7 @@ public class TestLeafQueue { applyCSAssignment(clusterResource, assignment, a, nodes, apps); verifyContainerAllocated(assignment, NodeType.NODE_LOCAL); assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey)); - assertEquals(0, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey)); } @@ -3354,7 +3364,7 @@ public class TestLeafQueue { applyCSAssignment(clusterResource, assignment, a, nodes, apps); verifyContainerAllocated(assignment, NodeType.NODE_LOCAL); assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey)); - assertEquals(3, app_0.getTotalRequiredResources(schedulerKey)); + assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey)); assertEquals(0, app_0.getLiveContainers().size()); assertEquals(1, app_1.getLiveContainers().size()); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/408d2347/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java index d2b5ae1..c5b3f00 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java @@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptS import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport; @@ -56,6 +57,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaS import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk; +import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -548,11 +551,12 @@ public class TestNodeLabelContainerAllocation { ApplicationAttemptId attemptId, int memory) { CapacityScheduler cs = (CapacityScheduler) rm.getRMContext().getScheduler(); FiCaSchedulerApp app = cs.getApplicationAttempt(attemptId); - ResourceRequest rr = - app.getAppSchedulingInfo().getResourceRequest( + PendingAsk ask = + app.getAppSchedulingInfo().getPendingAsk( TestUtils.toSchedulerKey(priority), "*"); Assert.assertEquals(memory, - rr.getCapability().getMemorySize() * rr.getNumContainers()); + ask.getPerAllocationResource().getMemorySize() * ask + .getCount()); } private void checkLaunchedContainerNumOnNode(MockRM rm, NodeId nodeId, @@ -607,18 +611,10 @@ public class TestNodeLabelContainerAllocation { (CapacityScheduler) rm1.getRMContext().getScheduler(); FiCaSchedulerApp app = cs.getApplicationAttempt(am1.getApplicationAttemptId()); - List allResourceRequests = - app.getAppSchedulingInfo().getAllResourceRequests(); - for (ResourceRequest changeReq : allResourceRequests) { - if (changeReq.getPriority().getPriority() == 2 - || changeReq.getPriority().getPriority() == 3) { - Assert.assertEquals("Expected label y", "y", - changeReq.getNodeLabelExpression()); - } else if (changeReq.getPriority().getPriority() == 4) { - Assert.assertEquals("Expected label EMPTY", - RMNodeLabelsManager.NO_LABEL, changeReq.getNodeLabelExpression()); - } - } + checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 2, "y"); + checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 3, "y"); + checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 4, + RMNodeLabelsManager.NO_LABEL); // Previous any request was Y trying to update with z and the // request before ANY label is null @@ -628,17 +624,11 @@ public class TestNodeLabelContainerAllocation { newReq.add(am1.createResourceReq("h1:1234", 1024, 3, 4, null)); newReq.add(am1.createResourceReq("*", 1024, 4, 5, "z")); am1.allocate(newReq, new ArrayList()); - allResourceRequests = app.getAppSchedulingInfo().getAllResourceRequests(); - for (ResourceRequest changeReq : allResourceRequests) { - if (changeReq.getPriority().getPriority() == 3 - || changeReq.getPriority().getPriority() == 4) { - Assert.assertEquals("Expected label z", "z", - changeReq.getNodeLabelExpression()); - } else if (changeReq.getPriority().getPriority() == 2) { - Assert.assertEquals("Expected label y", "y", - changeReq.getNodeLabelExpression()); - } - } + + checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 3, "z"); + checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 4, "z"); + checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 2, "y"); + // Request before ANY and ANY request is set as NULL. Request should be set // with Empty Label List resourceRequest1 = new ArrayList(); @@ -653,14 +643,21 @@ public class TestNodeLabelContainerAllocation { RMNodeLabelsManager.NO_LABEL)); resourceRequest1.add(am1.createResourceReq("h2:1234", 1024, 2, 4, null)); am1.allocate(resourceRequest1, new ArrayList()); - allResourceRequests = app.getAppSchedulingInfo().getAllResourceRequests(); - for (ResourceRequest changeReq : allResourceRequests) { - if (changeReq.getPriority().getPriority() == 3) { - Assert.assertEquals("Expected label Empty", - RMNodeLabelsManager.NO_LABEL, changeReq.getNodeLabelExpression()); - } else if (changeReq.getPriority().getPriority() == 2) { - Assert.assertEquals("Expected label y", RMNodeLabelsManager.NO_LABEL, - changeReq.getNodeLabelExpression()); + + checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 3, + RMNodeLabelsManager.NO_LABEL); + checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 2, + RMNodeLabelsManager.NO_LABEL); + } + + private void checkNodePartitionOfRequestedPriority(AppSchedulingInfo info, + int priority, String expectedPartition) { + for (SchedulerRequestKey key : info.getSchedulerKeys()) { + if (key.getPriority().getPriority() == priority) { + Assert.assertEquals("Expected partition is " + expectedPartition, + expectedPartition, + info.getSchedulingPlacementSet(key) + .getPrimaryRequestedNodePartition()); } } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/408d2347/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java index eafa735..5e6548b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java @@ -327,7 +327,7 @@ public class TestReservations { assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); - assertEquals(2, app_0.getTotalRequiredResources( + assertEquals(2, app_0.getOutstandingAsksCount( toSchedulerKey(priorityReduce))); // try to assign reducer (5G on node 0 and should reserve) @@ -346,7 +346,7 @@ public class TestReservations { assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); - assertEquals(2, app_0.getTotalRequiredResources( + assertEquals(2, app_0.getOutstandingAsksCount( toSchedulerKey(priorityReduce))); // assign reducer to node 2 @@ -365,7 +365,7 @@ public class TestReservations { assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize()); - assertEquals(1, app_0.getTotalRequiredResources( + assertEquals(1, app_0.getOutstandingAsksCount( toSchedulerKey(priorityReduce))); // node_1 heartbeat and unreserves from node_0 in order to allocate @@ -384,7 +384,7 @@ public class TestReservations { assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(8 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize()); - assertEquals(0, app_0.getTotalRequiredResources( + assertEquals(0, app_0.getOutstandingAsksCount( toSchedulerKey(priorityReduce))); } @@ -660,7 +660,7 @@ public class TestReservations { assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); - assertEquals(2, app_0.getTotalRequiredResources( + assertEquals(2, app_0.getOutstandingAsksCount( toSchedulerKey(priorityReduce))); // try to assign reducer (5G on node 0 and should reserve) @@ -679,7 +679,7 @@ public class TestReservations { assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); - assertEquals(2, app_0.getTotalRequiredResources( + assertEquals(2, app_0.getOutstandingAsksCount( toSchedulerKey(priorityReduce))); // assign reducer to node 2 @@ -698,7 +698,7 @@ public class TestReservations { assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize()); - assertEquals(1, app_0.getTotalRequiredResources( + assertEquals(1, app_0.getOutstandingAsksCount( toSchedulerKey(priorityReduce))); // node_1 heartbeat and won't unreserve from node_0, potentially stuck @@ -718,7 +718,7 @@ public class TestReservations { assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize()); - assertEquals(1, app_0.getTotalRequiredResources( + assertEquals(1, app_0.getOutstandingAsksCount( toSchedulerKey(priorityReduce))); } @@ -839,7 +839,7 @@ public class TestReservations { assertEquals(null, node_0.getReservedContainer()); assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); - assertEquals(2, app_0.getTotalRequiredResources( + assertEquals(2, app_0.getOutstandingAsksCount( toSchedulerKey(priorityReduce))); // try to assign reducer (5G on node 0 and should reserve) @@ -857,7 +857,7 @@ public class TestReservations { .getMemorySize()); assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); - assertEquals(2, app_0.getTotalRequiredResources( + assertEquals(2, app_0.getOutstandingAsksCount( toSchedulerKey(priorityReduce))); // could allocate but told need to unreserve first @@ -874,7 +874,7 @@ public class TestReservations { assertEquals(null, node_0.getReservedContainer()); assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(8 * GB, node_1.getAllocatedResource().getMemorySize()); - assertEquals(1, app_0.getTotalRequiredResources( + assertEquals(1, app_0.getOutstandingAsksCount( toSchedulerKey(priorityReduce))); } --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org