hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From xg...@apache.org
Subject [08/21] hadoop git commit: YARN-6040. Introduce api independent PendingAsk to replace usage of ResourceRequest within Scheduler classes. (Wangda Tan via asuresh)
Date Mon, 09 Jan 2017 21:30:03 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2977bc6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SchedulingPlacementSet.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SchedulingPlacementSet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SchedulingPlacementSet.java
index 3cf5fa2..3e0620e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SchedulingPlacementSet.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SchedulingPlacementSet.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk;
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
 
 import java.util.Collection;
@@ -70,22 +72,38 @@ public interface SchedulingPlacementSet<N extends SchedulerNode> {
   Map<String, ResourceRequest> getResourceRequests();
 
   /**
-   * Get ResourceRequest by given schedulerKey and resourceName
+   * Get pending ask for given resourceName. If there's no such pendingAsk,
+   * returns {@link PendingAsk#ZERO}
+   *
+   * @param resourceName resourceName
+   * @return PendingAsk
+   */
+  PendingAsk getPendingAsk(String resourceName);
+
+  /**
+   * Get #pending-allocations for given resourceName. If there's no such
+   * pendingAsk, returns 0
+   *
    * @param resourceName resourceName
-   * @return ResourceRequest
+   * @return #pending-allocations
    */
-  ResourceRequest getResourceRequest(String resourceName);
+  int getOutstandingAsksCount(String resourceName);
 
   /**
    * Notify container allocated.
    * @param schedulerKey SchedulerRequestKey for this ResourceRequest
    * @param type Type of the allocation
    * @param node Which node this container allocated on
-   * @param request Which resource request to allocate
    * @return list of ResourceRequests deducted
    */
   List<ResourceRequest> allocate(SchedulerRequestKey schedulerKey,
-      NodeType type, SchedulerNode node, ResourceRequest request);
+      NodeType type, SchedulerNode node);
+
+  /**
+   * Returns list of accepted resourceNames.
+   * @return Iterator of accepted resourceNames
+   */
+  Iterator<String> getAcceptedResouceNames();
 
   /**
    * We can still have pending requirement for a given NodeType and node
@@ -94,4 +112,47 @@ public interface SchedulingPlacementSet<N extends SchedulerNode> {
    * @return true if we has pending requirement
    */
   boolean canAllocate(NodeType type, SchedulerNode node);
+
+  /**
+   * Can delay to give locality?
+   * TODO (wangda): This should be moved out of SchedulingPlacementSet
+   * and should belong to specific delay scheduling policy impl.
+   *
+   * @param resourceName resourceName
+   * @return can/cannot
+   */
+  boolean canDelayTo(String resourceName);
+
+  /**
+   * Does this {@link SchedulingPlacementSet} accept resources on nodePartition?
+   *
+   * @param nodePartition nodePartition
+   * @param schedulingMode schedulingMode
+   * @return accepted/not
+   */
+  boolean acceptNodePartition(String nodePartition,
+      SchedulingMode schedulingMode);
+
+  /**
+   * It is possible that one request can accept multiple node partition,
+   * So this method returns primary node partition for pending resource /
+   * headroom calculation.
+   *
+   * @return primary requested node partition
+   */
+  String getPrimaryRequestedNodePartition();
+
+  /**
+   * @return number of unique location asks with #pending greater than 0,
+   * (like /rack1, host1, etc.).
+   *
+   * TODO (wangda): This should be moved out of SchedulingPlacementSet
+   * and should belong to specific delay scheduling policy impl.
+   */
+  int getUniqueLocationAsks();
+
+  /**
+   * Print human-readable requests to LOG debug.
+   */
+  void showRequests();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2977bc6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
index 6395339..b32f138 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEv
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
+import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.Assert;
@@ -588,12 +589,14 @@ public class TestAbstractYarnScheduler extends ParameterizedSchedulerTestBase {
       // The core part of this test
       // The killed containers' ResourceRequests are recovered back to the
       // original app-attempt, not the new one
-      for (ResourceRequest request : firstSchedulerAppAttempt
-        .getAppSchedulingInfo().getAllResourceRequests()) {
-        if (request.getPriority().getPriority() == 0) {
-          Assert.assertEquals(0, request.getNumContainers());
-        } else if (request.getPriority().getPriority() == ALLOCATED_CONTAINER_PRIORITY) {
-          Assert.assertEquals(1, request.getNumContainers());
+      for (SchedulerRequestKey key : firstSchedulerAppAttempt.getSchedulerKeys()) {
+        if (key.getPriority().getPriority() == 0) {
+          Assert.assertEquals(0,
+              firstSchedulerAppAttempt.getOutstandingAsksCount(key));
+        } else if (key.getPriority().getPriority() ==
+            ALLOCATED_CONTAINER_PRIORITY) {
+          Assert.assertEquals(1,
+              firstSchedulerAppAttempt.getOutstandingAsksCount(key));
         }
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2977bc6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java
index 468e760..bb29889 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java
@@ -141,7 +141,7 @@ public class TestAppSchedulingInfo {
 
     // iterate to verify no ConcurrentModificationException
     for (SchedulerRequestKey schedulerKey : info.getSchedulerKeys()) {
-      info.allocate(NodeType.OFF_SWITCH, null, schedulerKey, req1, null);
+      info.allocate(NodeType.OFF_SWITCH, null, schedulerKey, null);
     }
     Assert.assertEquals(1, info.getSchedulerKeys().size());
     Assert.assertEquals(SchedulerRequestKey.create(req2),
@@ -153,7 +153,7 @@ public class TestAppSchedulingInfo {
     reqs.add(req2);
     info.updateResourceRequests(reqs, false);
     info.allocate(NodeType.OFF_SWITCH, null, SchedulerRequestKey.create(req2),
-        req2, null);
+        null);
     Assert.assertEquals(0, info.getSchedulerKeys().size());
 
     req1 = ResourceRequest.newInstance(pri1,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2977bc6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
index ff5dc02..9a6c8d4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
@@ -93,8 +93,7 @@ public class TestSchedulerApplicationAttempt {
     app.liveContainers.put(container1.getContainerId(), container1);
     SchedulerNode node = createNode();
     app.appSchedulingInfo.allocate(NodeType.OFF_SWITCH, node,
-        toSchedulerKey(requestedPriority),
-        request, container1.getContainer());
+        toSchedulerKey(requestedPriority), container1.getContainer());
     
     // Reserved container
     Priority prio1 = Priority.newInstance(1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2977bc6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 43bdc8e..6230098 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -187,7 +187,7 @@ public class TestCapacityScheduler {
 
   private ResourceManager resourceManager = null;
   private RMContext mockContext;
-  
+
   @Before
   public void setUp() throws Exception {
     resourceManager = new ResourceManager() {
@@ -198,11 +198,11 @@ public class TestCapacityScheduler {
         return mgr;
       }
     };
-    CapacitySchedulerConfiguration csConf 
+    CapacitySchedulerConfiguration csConf
        = new CapacitySchedulerConfiguration();
     setupQueueConfiguration(csConf);
     YarnConfiguration conf = new YarnConfiguration(csConf);
-    conf.setClass(YarnConfiguration.RM_SCHEDULER, 
+    conf.setClass(YarnConfiguration.RM_SCHEDULER,
         CapacityScheduler.class, ResourceScheduler.class);
     resourceManager.init(conf);
     resourceManager.getRMContext().getContainerTokenSecretManager().rollMasterKey();
@@ -262,7 +262,7 @@ public class TestCapacityScheduler {
         new org.apache.hadoop.yarn.server.resourcemanager.NodeManager(
             hostName, containerManagerPort, httpPort, rackName, capability,
             resourceManager);
-    NodeAddedSchedulerEvent nodeAddEvent1 = 
+    NodeAddedSchedulerEvent nodeAddEvent1 =
         new NodeAddedSchedulerEvent(resourceManager.getRMContext()
             .getRMNodes().get(nm.getNodeId()));
     resourceManager.getResourceScheduler().handle(nodeAddEvent1);
@@ -273,89 +273,89 @@ public class TestCapacityScheduler {
   public void testCapacityScheduler() throws Exception {
 
     LOG.info("--- START: testCapacityScheduler ---");
-        
+
     // Register node1
     String host_0 = "host_0";
-    org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = 
-      registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, 
-          Resources.createResource(4 * GB, 1));
-    
+    org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 =
+        registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK,
+            Resources.createResource(4 * GB, 1));
+
     // Register node2
     String host_1 = "host_1";
-    org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 = 
-      registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, 
-          Resources.createResource(2 * GB, 1));
+    org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 =
+        registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK,
+            Resources.createResource(2 * GB, 1));
 
     // ResourceRequest priorities
     Priority priority_0 = Priority.newInstance(0);
     Priority priority_1 = Priority.newInstance(1);
-    
+
     // Submit an application
     Application application_0 = new Application("user_0", "a1", resourceManager);
     application_0.submit();
-    
+
     application_0.addNodeManager(host_0, 1234, nm_0);
     application_0.addNodeManager(host_1, 1234, nm_1);
 
     Resource capability_0_0 = Resources.createResource(1 * GB, 1);
     application_0.addResourceRequestSpec(priority_1, capability_0_0);
-    
+
     Resource capability_0_1 = Resources.createResource(2 * GB, 1);
     application_0.addResourceRequestSpec(priority_0, capability_0_1);
 
-    Task task_0_0 = new Task(application_0, priority_1, 
+    Task task_0_0 = new Task(application_0, priority_1,
         new String[] {host_0, host_1});
     application_0.addTask(task_0_0);
-       
+
     // Submit another application
     Application application_1 = new Application("user_1", "b2", resourceManager);
     application_1.submit();
-    
+
     application_1.addNodeManager(host_0, 1234, nm_0);
     application_1.addNodeManager(host_1, 1234, nm_1);
-    
+
     Resource capability_1_0 = Resources.createResource(3 * GB, 1);
     application_1.addResourceRequestSpec(priority_1, capability_1_0);
-    
+
     Resource capability_1_1 = Resources.createResource(2 * GB, 1);
     application_1.addResourceRequestSpec(priority_0, capability_1_1);
 
-    Task task_1_0 = new Task(application_1, priority_1, 
+    Task task_1_0 = new Task(application_1, priority_1,
         new String[] {host_0, host_1});
     application_1.addTask(task_1_0);
-        
+
     // Send resource requests to the scheduler
     application_0.schedule();
     application_1.schedule();
 
     // Send a heartbeat to kick the tires on the Scheduler
     LOG.info("Kick!");
-    
+
     // task_0_0 and task_1_0 allocated, used=4G
     nodeUpdate(nm_0);
-    
+
     // nothing allocated
     nodeUpdate(nm_1);
-    
+
     // Get allocations from the scheduler
     application_0.schedule();     // task_0_0 
     checkApplicationResourceUsage(1 * GB, application_0);
 
     application_1.schedule();     // task_1_0
     checkApplicationResourceUsage(3 * GB, application_1);
-    
+
     checkNodeResourceUsage(4*GB, nm_0);  // task_0_0 (1G) and task_1_0 (3G)
     checkNodeResourceUsage(0*GB, nm_1);  // no tasks, 2G available
 
     LOG.info("Adding new tasks...");
-    
-    Task task_1_1 = new Task(application_1, priority_0, 
+
+    Task task_1_1 = new Task(application_1, priority_0,
         new String[] {ResourceRequest.ANY});
     application_1.addTask(task_1_1);
 
     application_1.schedule();
 
-    Task task_0_1 = new Task(application_0, priority_0, 
+    Task task_0_1 = new Task(application_0, priority_0,
         new String[] {host_0, host_1});
     application_0.addTask(task_0_1);
 
@@ -365,11 +365,11 @@ public class TestCapacityScheduler {
     LOG.info("Sending hb from " + nm_0.getHostName());
     // nothing new, used=4G
     nodeUpdate(nm_0);
-    
+
     LOG.info("Sending hb from " + nm_1.getHostName());
     // task_0_1 is prefer as locality, used=2G
     nodeUpdate(nm_1);
-    
+
     // Get allocations from the scheduler
     LOG.info("Trying to allocate...");
     application_0.schedule();
@@ -377,10 +377,10 @@ public class TestCapacityScheduler {
 
     application_1.schedule();
     checkApplicationResourceUsage(5 * GB, application_1);
-    
+
     nodeUpdate(nm_0);
     nodeUpdate(nm_1);
-    
+
     checkNodeResourceUsage(4*GB, nm_0);
     checkNodeResourceUsage(2*GB, nm_1);
 
@@ -394,23 +394,23 @@ public class TestCapacityScheduler {
     NodeUpdateSchedulerEvent nodeUpdate = new NodeUpdateSchedulerEvent(node);
     resourceManager.getResourceScheduler().handle(nodeUpdate);
   }
-  
+
   private CapacitySchedulerConfiguration setupQueueConfiguration(
       CapacitySchedulerConfiguration conf) {
-    
+
     // Define top-level queues
     conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b"});
 
     conf.setCapacity(A, A_CAPACITY);
     conf.setCapacity(B, B_CAPACITY);
-    
+
     // Define 2nd-level queues
     conf.setQueues(A, new String[] {"a1", "a2"});
     conf.setCapacity(A1, A1_CAPACITY);
     conf.setUserLimitFactor(A1, 100.0f);
     conf.setCapacity(A2, A2_CAPACITY);
     conf.setUserLimitFactor(A2, 100.0f);
-    
+
     conf.setQueues(B, new String[] {"b1", "b2", "b3"});
     conf.setCapacity(B1, B1_CAPACITY);
     conf.setUserLimitFactor(B1, 100.0f);
@@ -478,8 +478,8 @@ public class TestCapacityScheduler {
     conf.setMaximumCapacity(A, -1);
     assertEquals(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY_VALUE,conf.getNonLabeledQueueMaximumCapacity(A),delta);
   }
-  
-  
+
+
   @Test
   public void testRefreshQueues() throws Exception {
     CapacityScheduler cs = new CapacityScheduler();
@@ -564,11 +564,11 @@ public class TestCapacityScheduler {
     return null;
   }
 
-  private void checkApplicationResourceUsage(int expected, 
+  private void checkApplicationResourceUsage(int expected,
       Application application) {
     Assert.assertEquals(expected, application.getUsedResources().getMemorySize());
   }
-  
+
   private void checkNodeResourceUsage(int expected,
       org.apache.hadoop.yarn.server.resourcemanager.NodeManager node) {
     Assert.assertEquals(expected, node.getUsed().getMemorySize());
@@ -649,7 +649,7 @@ public class TestCapacityScheduler {
     // Add a new queue b4
     String B4 = B + ".b4";
     float B4_CAPACITY = 10;
-    
+
     B3_CAPACITY -= B4_CAPACITY;
     try {
       conf.setCapacity(A, 80f);
@@ -661,7 +661,7 @@ public class TestCapacityScheduler {
       conf.setCapacity(B4, B4_CAPACITY);
       cs.reinitialize(conf,mockContext);
       checkQueueCapacities(cs, 80f, 20f);
-      
+
       // Verify parent for B4
       CSQueue rootQueue = cs.getRootQueue();
       CSQueue queueB = findQueue(rootQueue, B);
@@ -879,7 +879,7 @@ public class TestCapacityScheduler {
         ResourceScheduler.class);
     MockRM rm = new MockRM(conf);
     rm.start();
-    
+
     MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * GB);
     RMApp app1 = rm.submitApp(2048);
     // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm1
@@ -909,7 +909,7 @@ public class TestCapacityScheduler {
     Assert.assertEquals(1, allocated1.size());
     Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemorySize());
     Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
-    
+
     report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
     // check node report, 4 GB used and 0 GB available
     Assert.assertEquals(0, report_nm1.getAvailableResource().getMemorySize());
@@ -918,13 +918,13 @@ public class TestCapacityScheduler {
     // check container is assigned with 2 GB.
     Container c1 = allocated1.get(0);
     Assert.assertEquals(2 * GB, c1.getResource().getMemorySize());
-    
+
     // update node resource to 2 GB, so resource is over-consumed.
-    Map<NodeId, ResourceOption> nodeResourceMap = 
+    Map<NodeId, ResourceOption> nodeResourceMap =
         new HashMap<NodeId, ResourceOption>();
-    nodeResourceMap.put(nm1.getNodeId(), 
+    nodeResourceMap.put(nm1.getNodeId(),
         ResourceOption.newInstance(Resource.newInstance(2 * GB, 1), -1));
-    UpdateNodeResourceRequest request = 
+    UpdateNodeResourceRequest request =
         UpdateNodeResourceRequest.newInstance(nodeResourceMap);
     AdminService as = ((MockRM)rm).getAdminService();
     as.updateNodeResource(request);
@@ -943,7 +943,7 @@ public class TestCapacityScheduler {
     report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
     Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemorySize());
     Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemorySize());
-    
+
     // Check container can complete successfully in case of resource over-commitment.
     ContainerStatus containerStatus = BuilderUtils.newContainerStatus(
         c1.getId(), ContainerState.COMPLETE, "", 0, c1.getResource());
@@ -961,7 +961,7 @@ public class TestCapacityScheduler {
     Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemorySize());
     // As container return 2 GB back, the available resource becomes 0 again.
     Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemorySize());
-    
+
     // Verify no NPE is trigger in schedule after resource is updated.
     am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 3 * GB, 1, 1);
     alloc1Response = am1.schedule();
@@ -979,7 +979,7 @@ public class TestCapacityScheduler {
         0, alloc1Response.getAllocatedContainers().size());
     rm.stop();
   }
-    
+
   @Test
   public void testGetAppsInQueue() throws Exception {
     Application application_0 = new Application("user_0", "a1", resourceManager);
@@ -1027,7 +1027,7 @@ public class TestCapacityScheduler {
           cs.getSchedulerApplications(), cs, "a1");
     Assert.assertEquals("a1", app.getQueue().getQueueName());
   }
-  
+
   @Test
   public void testAsyncScheduling() throws Exception {
     Configuration conf = new Configuration();
@@ -1038,7 +1038,7 @@ public class TestCapacityScheduler {
     CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
 
     final int NODES = 100;
-    
+
     // Register nodes
     for (int i=0; i < NODES; ++i) {
       String host = "192.168.1." + i;
@@ -1046,7 +1046,7 @@ public class TestCapacityScheduler {
           MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1, host);
       cs.handle(new NodeAddedSchedulerEvent(node));
     }
-    
+
     // Now directly exercise the scheduling loop
     for (int i=0; i < NODES; ++i) {
       CapacityScheduler.schedule(cs);
@@ -1068,7 +1068,7 @@ public class TestCapacityScheduler {
           && attemptPM.getResourcePreempted().equals(currentAttemptPreempted)
           && app.getCurrentAppAttempt().getRMAppAttemptMetrics()
             .getIsPreempted() == currentAttemptAMPreempted
-          && attemptPM.getNumNonAMContainersPreempted() == 
+          && attemptPM.getNumNonAMContainersPreempted() ==
              numLatestAttemptTaskPreempted) {
         return;
       }
@@ -1082,7 +1082,7 @@ public class TestCapacityScheduler {
       Thread.sleep(500);
     }
   }
-  
+
   @Test(timeout = 30000)
   public void testAllocateDoesNotBlockOnSchedulerLock() throws Exception {
     final YarnConfiguration conf = new YarnConfiguration();
@@ -1301,7 +1301,7 @@ public class TestCapacityScheduler {
 
     rm1.stop();
   }
-  
+
   @Test(timeout = 300000)
   public void testRecoverRequestAfterPreemption() throws Exception {
     Configuration conf = new Configuration();
@@ -1335,8 +1335,9 @@ public class TestCapacityScheduler {
 
       // Already the node local resource request is cleared from RM after
       // allocation.
-      Assert.assertNull(app.getResourceRequest(
-          SchedulerRequestKey.create(request), request.getResourceName()));
+      Assert.assertEquals(0,
+          app.getOutstandingAsksCount(SchedulerRequestKey.create(request),
+              request.getResourceName()));
     }
 
     // Call killContainer to preempt the container
@@ -1346,10 +1347,9 @@ public class TestCapacityScheduler {
     for (ResourceRequest request : requests) {
       // Resource request must have added back in RM after preempt event
       // handling.
-      Assert.assertEquals(
-          1,
-          app.getResourceRequest(SchedulerRequestKey.create(request),
-              request.getResourceName()).getNumContainers());
+      Assert.assertEquals(1,
+          app.getOutstandingAsksCount(SchedulerRequestKey.create(request),
+              request.getResourceName()));
     }
 
     // New container will be allocated and will move to ALLOCATED state
@@ -2617,7 +2617,7 @@ public class TestCapacityScheduler {
     assertEquals("queue B2 max vcores allocation", 12,
         ((LeafQueue) queueB2).getMaximumAllocation().getVirtualCores());
   }
-  
+
   private void waitContainerAllocated(MockAM am, int mem, int nContainer,
       int startContainerId, MockRM rm, MockNM nm) throws Exception {
     for (int cId = startContainerId; cId < startContainerId + nContainer; cId++) {
@@ -2651,44 +2651,44 @@ public class TestCapacityScheduler {
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 100 * GB, rm1.getResourceTrackerService());
     nm1.registerNode();
-    
+
     RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "b1");
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
-    
+
     waitContainerAllocated(am1, 1 * GB, 1, 2, rm1, nm1);
 
     // Maximum resoure of b1 is 100 * 0.895 * 0.792 = 71 GB
     // 2 GBs used by am, so it's 71 - 2 = 69G.
     Assert.assertEquals(69 * GB,
         am1.doHeartbeat().getAvailableResources().getMemorySize());
-    
+
     RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "b2");
     MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1);
-    
+
     // Allocate 5 containers, each one is 8 GB in am2 (40 GB in total)
     waitContainerAllocated(am2, 8 * GB, 5, 2, rm1, nm1);
-    
+
     // Allocated one more container with 1 GB resource in b1
     waitContainerAllocated(am1, 1 * GB, 1, 3, rm1, nm1);
-    
+
     // Total is 100 GB, 
     // B2 uses 41 GB (5 * 8GB containers and 1 AM container)
     // B1 uses 3 GB (2 * 1GB containers and 1 AM container)
     // Available is 100 - 41 - 3 = 56 GB
     Assert.assertEquals(56 * GB,
         am1.doHeartbeat().getAvailableResources().getMemorySize());
-    
+
     // Now we submit app3 to a1 (in higher level hierarchy), to see if headroom
     // of app1 (in queue b1) updated correctly
     RMApp app3 = rm1.submitApp(1 * GB, "app", "user", null, "a1");
     MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm1);
-    
+
     // Allocate 3 containers, each one is 8 GB in am3 (24 GB in total)
     waitContainerAllocated(am3, 8 * GB, 3, 2, rm1, nm1);
-    
+
     // Allocated one more container with 4 GB resource in b1
     waitContainerAllocated(am1, 1 * GB, 1, 4, rm1, nm1);
-    
+
     // Total is 100 GB, 
     // B2 uses 41 GB (5 * 8GB containers and 1 AM container)
     // B1 uses 4 GB (3 * 1GB containers and 1 AM container)
@@ -2697,7 +2697,7 @@ public class TestCapacityScheduler {
     Assert.assertEquals(30 * GB,
         am1.doHeartbeat().getAvailableResources().getMemorySize());
   }
-  
+
   @Test
   public void testParentQueueMaxCapsAreRespected() throws Exception {
     /*
@@ -2713,7 +2713,7 @@ public class TestCapacityScheduler {
     csConf.setCapacity(A, 50);
     csConf.setMaximumCapacity(A, 50);
     csConf.setCapacity(B, 50);
-    
+
     // Define 2nd-level queues
     csConf.setQueues(A, new String[] {"a1", "a2"});
     csConf.setCapacity(A1, 50);
@@ -2722,7 +2722,7 @@ public class TestCapacityScheduler {
     csConf.setUserLimitFactor(A2, 100.0f);
     csConf.setCapacity(B1, B1_CAPACITY);
     csConf.setUserLimitFactor(B1, 100.0f);
-    
+
     YarnConfiguration conf = new YarnConfiguration(csConf);
     conf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true);
 
@@ -2733,12 +2733,12 @@ public class TestCapacityScheduler {
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 24 * GB, rm1.getResourceTrackerService());
     nm1.registerNode();
-    
+
     // Launch app1 in a1, resource usage is 1GB (am) + 4GB * 2 = 9GB 
     RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "a1");
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
     waitContainerAllocated(am1, 4 * GB, 2, 2, rm1, nm1);
-    
+
     // Try to launch app2 in a2, asked 2GB, should success 
     RMApp app2 = rm1.submitApp(2 * GB, "app", "user", null, "a2");
     MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1);
@@ -2755,24 +2755,24 @@ public class TestCapacityScheduler {
     Assert.fail("Shouldn't successfully allocate containers for am2, "
         + "queue-a's max capacity will be violated if container allocated");
   }
-  
+
   @SuppressWarnings("unchecked")
   private <E> Set<E> toSet(E... elements) {
     Set<E> set = Sets.newHashSet(elements);
     return set;
   }
-  
+
   @Test
   public void testQueueHierarchyPendingResourceUpdate() throws Exception {
     Configuration conf =
         TestUtils.getConfigurationWithQueueLabels(new Configuration(false));
     conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
-    
+
     final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
     mgr.init(conf);
     mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y"));
     mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
-    
+
     MemoryRMStateStore memStore = new MemoryRMStateStore();
     memStore.init(conf);
     MockRM rm = new MockRM(conf, memStore) {
@@ -2780,74 +2780,74 @@ public class TestCapacityScheduler {
         return mgr;
       }
     };
-    
+
     rm.start();
     MockNM nm1 = // label = x
         new MockNM("h1:1234", 200 * GB, rm.getResourceTrackerService());
     nm1.registerNode();
-    
+
     MockNM nm2 = // label = ""
         new MockNM("h2:1234", 200 * GB, rm.getResourceTrackerService());
     nm2.registerNode();
-    
+
     // Launch app1 in queue=a1
     RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "a1");
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2);
-    
+
     // Launch app2 in queue=b1  
     RMApp app2 = rm.submitApp(8 * GB, "app", "user", null, "b1");
     MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm2);
-    
+
     // am1 asks for 8 * 1GB container for no label
     am1.allocate(Arrays.asList(ResourceRequest.newInstance(
         Priority.newInstance(1), "*", Resources.createResource(1 * GB), 8)),
         null);
-    
+
     checkPendingResource(rm, "a1", 8 * GB, null);
     checkPendingResource(rm, "a", 8 * GB, null);
     checkPendingResource(rm, "root", 8 * GB, null);
-    
+
     // am2 asks for 8 * 1GB container for no label
     am2.allocate(Arrays.asList(ResourceRequest.newInstance(
         Priority.newInstance(1), "*", Resources.createResource(1 * GB), 8)),
         null);
-    
+
     checkPendingResource(rm, "a1", 8 * GB, null);
     checkPendingResource(rm, "a", 8 * GB, null);
     checkPendingResource(rm, "b1", 8 * GB, null);
     checkPendingResource(rm, "b", 8 * GB, null);
     // root = a + b
     checkPendingResource(rm, "root", 16 * GB, null);
-    
+
     // am2 asks for 8 * 1GB container in another priority for no label
     am2.allocate(Arrays.asList(ResourceRequest.newInstance(
         Priority.newInstance(2), "*", Resources.createResource(1 * GB), 8)),
         null);
-    
+
     checkPendingResource(rm, "a1", 8 * GB, null);
     checkPendingResource(rm, "a", 8 * GB, null);
     checkPendingResource(rm, "b1", 16 * GB, null);
     checkPendingResource(rm, "b", 16 * GB, null);
     // root = a + b
     checkPendingResource(rm, "root", 24 * GB, null);
-    
+
     // am1 asks 4 GB resource instead of 8 * GB for priority=1
     am1.allocate(Arrays.asList(ResourceRequest.newInstance(
         Priority.newInstance(1), "*", Resources.createResource(4 * GB), 1)),
         null);
-    
+
     checkPendingResource(rm, "a1", 4 * GB, null);
     checkPendingResource(rm, "a", 4 * GB, null);
     checkPendingResource(rm, "b1", 16 * GB, null);
     checkPendingResource(rm, "b", 16 * GB, null);
     // root = a + b
     checkPendingResource(rm, "root", 20 * GB, null);
-    
+
     // am1 asks 8 * GB resource which label=x
     am1.allocate(Arrays.asList(ResourceRequest.newInstance(
         Priority.newInstance(2), "*", Resources.createResource(8 * GB), 1,
         true, "x")), null);
-    
+
     checkPendingResource(rm, "a1", 4 * GB, null);
     checkPendingResource(rm, "a", 4 * GB, null);
     checkPendingResource(rm, "a1", 8 * GB, "x");
@@ -2857,7 +2857,7 @@ public class TestCapacityScheduler {
     // root = a + b
     checkPendingResource(rm, "root", 20 * GB, null);
     checkPendingResource(rm, "root", 8 * GB, "x");
-    
+
     // some containers allocated for am1, pending resource should decrease
     ContainerId containerId =
         ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
@@ -2866,7 +2866,7 @@ public class TestCapacityScheduler {
     containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
     Assert.assertTrue(rm.waitForState(nm2, containerId,
         RMContainerState.ALLOCATED));
-    
+
     checkPendingResource(rm, "a1", 0 * GB, null);
     checkPendingResource(rm, "a", 0 * GB, null);
     checkPendingResource(rm, "a1", 0 * GB, "x");
@@ -2878,23 +2878,23 @@ public class TestCapacityScheduler {
     // root = a + b
     checkPendingResourceGreaterThanZero(rm, "root", null);
     checkPendingResource(rm, "root", 0 * GB, "x");
-    
+
     // complete am2, pending resource should be 0 now
     AppAttemptRemovedSchedulerEvent appRemovedEvent =
         new AppAttemptRemovedSchedulerEvent(
           am2.getApplicationAttemptId(), RMAppAttemptState.FINISHED, false);
     rm.getResourceScheduler().handle(appRemovedEvent);
-    
+
     checkPendingResource(rm, "a1", 0 * GB, null);
     checkPendingResource(rm, "a", 0 * GB, null);
     checkPendingResource(rm, "a1", 0 * GB, "x");
-    checkPendingResource(rm, "a", 0 * GB, "x"); 
+    checkPendingResource(rm, "a", 0 * GB, "x");
     checkPendingResource(rm, "b1", 0 * GB, null);
     checkPendingResource(rm, "b", 0 * GB, null);
     checkPendingResource(rm, "root", 0 * GB, null);
     checkPendingResource(rm, "root", 0 * GB, "x");
   }
-  
+
   private void checkPendingResource(MockRM rm, String queueName, int memory,
       String label) {
     CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
@@ -2932,10 +2932,10 @@ public class TestCapacityScheduler {
     Resource minAllocResource = Resource.newInstance(minAllocMb, 1);
     String queueName = "a1";
     RMApp rmApp = rm.submitApp(amMemory, "app-1", "user_0", null, queueName);
-       
+
     assertEquals("RMApp does not containes minimum allocation",
         minAllocResource, rmApp.getAMResourceRequest().getCapability());
-    
+
     ResourceScheduler scheduler = rm.getRMContext().getScheduler();
     LeafQueue queueA =
         (LeafQueue) ((CapacityScheduler) scheduler).getQueue(queueName);
@@ -3164,7 +3164,7 @@ public class TestCapacityScheduler {
         DominantResourceCalculator.class.getName());
     verifyAMLimitForLeafQueue(config);
   }
-  
+
   private FiCaSchedulerApp getFiCaSchedulerApp(MockRM rm,
       ApplicationId appId) {
     CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
@@ -3177,10 +3177,10 @@ public class TestCapacityScheduler {
     Configuration conf =
         TestUtils.getConfigurationWithQueueLabels(new Configuration(false));
     conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
-    
+
     final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
     mgr.init(conf);
-    
+
     MemoryRMStateStore memStore = new MemoryRMStateStore();
     memStore.init(conf);
     MockRM rm = new MockRM(conf, memStore) {
@@ -3188,17 +3188,17 @@ public class TestCapacityScheduler {
         return mgr;
       }
     };
-    
+
     rm.start();
-    
+
     MockNM nm1 = // label = ""
         new MockNM("h1:1234", 200 * GB, rm.getResourceTrackerService());
     nm1.registerNode();
-    
+
     // Launch app1 in queue=a1
     RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "a1");
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
-    
+
     // Allocate two more containers
     am1.allocate(
         Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1),
@@ -3227,15 +3227,15 @@ public class TestCapacityScheduler {
                 .newInstance(0, containerId1,
                     ContainerUpdateType.INCREASE_RESOURCE,
                     Resources.createResource(3 * GB), null)));
-    
+
     FiCaSchedulerApp app = getFiCaSchedulerApp(rm, app1.getApplicationId());
-    
+
     Assert.assertEquals(2 * GB,
         app.getAppAttemptResourceUsage().getPending().getMemorySize());
     checkPendingResource(rm, "a1", 2 * GB, null);
     checkPendingResource(rm, "a", 2 * GB, null);
     checkPendingResource(rm, "root", 2 * GB, null);
-    
+
     // am1 asks to change containerId2 (2G -> 3G) and containerId3 (2G -> 5G)
     am1.sendContainerResizingRequest(Arrays.asList(
         UpdateContainerRequest
@@ -3246,13 +3246,13 @@ public class TestCapacityScheduler {
                 .newInstance(0, containerId3,
                     ContainerUpdateType.INCREASE_RESOURCE,
                     Resources.createResource(5 * GB), null)));
-    
+
     Assert.assertEquals(6 * GB,
         app.getAppAttemptResourceUsage().getPending().getMemorySize());
     checkPendingResource(rm, "a1", 6 * GB, null);
     checkPendingResource(rm, "a", 6 * GB, null);
     checkPendingResource(rm, "root", 6 * GB, null);
-    
+
     // am1 asks to change containerId1 (1G->3G), containerId2 (2G -> 4G) and
     // containerId3 (2G -> 2G)
     am1.sendContainerResizingRequest(Arrays.asList(
@@ -3335,7 +3335,7 @@ public class TestCapacityScheduler {
         + CapacitySchedulerConfiguration.MAXIMUM_ALLOCATION_VCORES;
     conf.setInt(propName, maxAllocVcores);
   }
-  
+
   private void sentRMContainerLaunched(MockRM rm, ContainerId containerId) {
     CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
     RMContainer rmContainer = cs.getRMContainer(containerId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2977bc6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index e495328..b2695bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -23,7 +23,6 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyBoolean;
-import static org.mockito.Matchers.anyString;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
@@ -1056,9 +1055,13 @@ public class TestLeafQueue {
     //test case 3
     qb.finishApplication(app_0.getApplicationId(), user_0);
     qb.finishApplication(app_2.getApplicationId(), user_1);
-    qb.releaseResource(clusterResource, app_0, app_0.getResource(u0SchedKey),
+    qb.releaseResource(clusterResource, app_0,
+        app_0.getAppSchedulingInfo().getPendingAsk(u0SchedKey)
+            .getPerAllocationResource(),
         null, null, false);
-    qb.releaseResource(clusterResource, app_2, app_2.getResource(u1SchedKey),
+    qb.releaseResource(clusterResource, app_2,
+        app_2.getAppSchedulingInfo().getPendingAsk(u1SchedKey)
+            .getPerAllocationResource(),
         null, null, false);
 
     qb.setUserLimit(50);
@@ -1956,7 +1959,7 @@ public class TestLeafQueue {
     applyCSAssignment(clusterResource, assignment, a, nodes, apps);
     verifyNoContainerAllocated(assignment);
     assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey));
-    assertEquals(3, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey));
     assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // None->NODE_LOCAL
 
     // Another off switch, shouldn't allocate due to delay scheduling
@@ -1965,7 +1968,7 @@ public class TestLeafQueue {
     applyCSAssignment(clusterResource, assignment, a, nodes, apps);
     verifyNoContainerAllocated(assignment);
     assertEquals(2, app_0.getSchedulingOpportunities(schedulerKey));
-    assertEquals(3, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey));
     assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // None->NODE_LOCAL
     
     // Another off switch, shouldn't allocate due to delay scheduling
@@ -1974,7 +1977,7 @@ public class TestLeafQueue {
     applyCSAssignment(clusterResource, assignment, a, nodes, apps);
     verifyNoContainerAllocated(assignment);
     assertEquals(3, app_0.getSchedulingOpportunities(schedulerKey));
-    assertEquals(3, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey));
     assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // None->NODE_LOCAL
     
     // Another off switch, now we should allocate 
@@ -1985,7 +1988,7 @@ public class TestLeafQueue {
     verifyContainerAllocated(assignment, NodeType.OFF_SWITCH);
     // should NOT reset
     assertEquals(4, app_0.getSchedulingOpportunities(schedulerKey));
-    assertEquals(2, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(2, app_0.getOutstandingAsksCount(schedulerKey));
     
     // NODE_LOCAL - node_0
     assignment = a.assignContainers(clusterResource, node_0,
@@ -1994,7 +1997,7 @@ public class TestLeafQueue {
     verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
     // should reset
     assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
-    assertEquals(1, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey));
     
     // NODE_LOCAL - node_1
     assignment = a.assignContainers(clusterResource, node_1,
@@ -2003,7 +2006,7 @@ public class TestLeafQueue {
     verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
     // should reset
     assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
-    assertEquals(0, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey));
     assertEquals(NodeType.NODE_LOCAL, assignment.getType());
     
     // Add 1 more request to check for RACK_LOCAL
@@ -2018,7 +2021,7 @@ public class TestLeafQueue {
         TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 4, // one extra
             true, priority, recordFactory));
     app_0.updateResourceRequests(app_0_requests_0);
-    assertEquals(4, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(4, app_0.getOutstandingAsksCount(schedulerKey));
     
     // Rack-delay
     doReturn(true).when(a).getRackLocalityFullReset();
@@ -2029,7 +2032,7 @@ public class TestLeafQueue {
         new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
     applyCSAssignment(clusterResource, assignment, a, nodes, apps);
     assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey));
-    assertEquals(4, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(4, app_0.getOutstandingAsksCount(schedulerKey));
 
     // Should assign RACK_LOCAL now
     assignment = a.assignContainers(clusterResource, node_3,
@@ -2038,14 +2041,14 @@ public class TestLeafQueue {
     verifyContainerAllocated(assignment, NodeType.RACK_LOCAL);
     // should reset
     assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
-    assertEquals(3, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey));
     
     // Shouldn't assign RACK_LOCAL because schedulingOpportunities should have gotten reset.
     assignment = a.assignContainers(clusterResource, node_3,
         new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
     applyCSAssignment(clusterResource, assignment, a, nodes, apps);
     assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey));
-    assertEquals(3, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey));
 
     // Next time we schedule RACK_LOCAL, don't reset
     doReturn(false).when(a).getRackLocalityFullReset();
@@ -2057,7 +2060,7 @@ public class TestLeafQueue {
     verifyContainerAllocated(assignment, NodeType.RACK_LOCAL);
     // should NOT reset
     assertEquals(2, app_0.getSchedulingOpportunities(schedulerKey));
-    assertEquals(2, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(2, app_0.getOutstandingAsksCount(schedulerKey));
 
     // Another RACK_LOCAL since schedulingOpportunities not reset
     assignment = a.assignContainers(clusterResource, node_3,
@@ -2066,7 +2069,7 @@ public class TestLeafQueue {
     verifyContainerAllocated(assignment, NodeType.RACK_LOCAL);
     // should NOT reset
     assertEquals(3, app_0.getSchedulingOpportunities(schedulerKey));
-    assertEquals(1, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey));
     
     // Add a request larger than cluster size to verify
     // OFF_SWITCH delay is capped by cluster size
@@ -2185,9 +2188,9 @@ public class TestLeafQueue {
     applyCSAssignment(clusterResource, assignment, a, nodes, apps);
     verifyNoContainerAllocated(assignment);
     assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey1));
-    assertEquals(2, app_0.getTotalRequiredResources(schedulerKey1));
+    assertEquals(2, app_0.getOutstandingAsksCount(schedulerKey1));
     assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2));
-    assertEquals(1, app_0.getTotalRequiredResources(schedulerKey2));
+    assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey2));
 
     // Another off-switch, shouldn't allocate P1 due to delay scheduling
     // thus, no P2 either!
@@ -2196,9 +2199,9 @@ public class TestLeafQueue {
     applyCSAssignment(clusterResource, assignment, a, nodes, apps);
     verifyNoContainerAllocated(assignment);
     assertEquals(2, app_0.getSchedulingOpportunities(schedulerKey1));
-    assertEquals(2, app_0.getTotalRequiredResources(schedulerKey1));
+    assertEquals(2, app_0.getOutstandingAsksCount(schedulerKey1));
     assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2));
-    assertEquals(1, app_0.getTotalRequiredResources(schedulerKey2));
+    assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey2));
 
     // Another off-switch, shouldn't allocate OFF_SWITCH P1
     assignment = a.assignContainers(clusterResource, node_2,
@@ -2206,9 +2209,9 @@ public class TestLeafQueue {
     applyCSAssignment(clusterResource, assignment, a, nodes, apps);
     verifyContainerAllocated(assignment, NodeType.OFF_SWITCH);
     assertEquals(3, app_0.getSchedulingOpportunities(schedulerKey1));
-    assertEquals(1, app_0.getTotalRequiredResources(schedulerKey1));
+    assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey1));
     assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2));
-    assertEquals(1, app_0.getTotalRequiredResources(schedulerKey2));
+    assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey2));
 
     // Now, DATA_LOCAL for P1
     assignment = a.assignContainers(clusterResource, node_0,
@@ -2216,9 +2219,9 @@ public class TestLeafQueue {
     applyCSAssignment(clusterResource, assignment, a, nodes, apps);
     verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
     assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey1));
-    assertEquals(0, app_0.getTotalRequiredResources(schedulerKey1));
+    assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey1));
     assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2));
-    assertEquals(1, app_0.getTotalRequiredResources(schedulerKey2));
+    assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey2));
 
     // Now, OFF_SWITCH for P2
     assignment = a.assignContainers(clusterResource, node_1,
@@ -2226,9 +2229,9 @@ public class TestLeafQueue {
     applyCSAssignment(clusterResource, assignment, a, nodes, apps);
     verifyContainerAllocated(assignment, NodeType.OFF_SWITCH);
     assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey1));
-    assertEquals(0, app_0.getTotalRequiredResources(schedulerKey1));
+    assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey1));
     assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey2));
-    assertEquals(0, app_0.getTotalRequiredResources(schedulerKey2));
+    assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey2));
 
   }
   
@@ -2309,7 +2312,7 @@ public class TestLeafQueue {
     verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
     assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
     // should reset
-    assertEquals(0, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey));
 
     // No allocation on node_1_0 even though it's node/rack local since
     // required(ANY) == 0
@@ -2320,7 +2323,7 @@ public class TestLeafQueue {
     // Still zero
     // since #req=0
     assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
-    assertEquals(0, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey));
     
     // Add one request
     app_0_requests_0.clear();
@@ -2336,7 +2339,7 @@ public class TestLeafQueue {
     applyCSAssignment(clusterResource, assignment, a, nodes, apps);
     verifyNoContainerAllocated(assignment);
     assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey));
-    assertEquals(1, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey));
     
     // NODE_LOCAL - node_1
     assignment = a.assignContainers(clusterResource, node_1_0,
@@ -2345,7 +2348,7 @@ public class TestLeafQueue {
     verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
     // should reset
     assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
-    assertEquals(0, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey));
   }
 
   @Test (timeout = 30000)
@@ -2721,7 +2724,7 @@ public class TestLeafQueue {
     applyCSAssignment(clusterResource, assignment, a, nodes, apps);
     verifyNoContainerAllocated(assignment);
     assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
-    assertEquals(1, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey));
 
     // Now sanity-check node_local
     app_0_requests_0.add(
@@ -2752,7 +2755,7 @@ public class TestLeafQueue {
     applyCSAssignment(clusterResource, assignment, a, nodes, apps);
     verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
     assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
-    assertEquals(0, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey));
 
   }
   
@@ -3205,7 +3208,7 @@ public class TestLeafQueue {
     applyCSAssignment(clusterResource, assignment, a, nodes, apps);
     verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
     assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey));
-    assertEquals(3, app_0.getTotalRequiredResources(schedulerKey));
+    assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey));
     assertEquals(0, app_0.getLiveContainers().size());
     assertEquals(1, app_1.getLiveContainers().size());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2977bc6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
index d2b5ae1..c5b3f00 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptS
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport;
 
@@ -56,6 +57,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaS
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk;
+import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -548,11 +551,12 @@ public class TestNodeLabelContainerAllocation {
       ApplicationAttemptId attemptId, int memory) {
     CapacityScheduler cs = (CapacityScheduler) rm.getRMContext().getScheduler();
     FiCaSchedulerApp app = cs.getApplicationAttempt(attemptId);
-    ResourceRequest rr =
-        app.getAppSchedulingInfo().getResourceRequest(
+    PendingAsk ask =
+        app.getAppSchedulingInfo().getPendingAsk(
             TestUtils.toSchedulerKey(priority), "*");
     Assert.assertEquals(memory,
-        rr.getCapability().getMemorySize() * rr.getNumContainers());
+        ask.getPerAllocationResource().getMemorySize() * ask
+            .getCount());
   }
   
   private void checkLaunchedContainerNumOnNode(MockRM rm, NodeId nodeId,
@@ -607,18 +611,10 @@ public class TestNodeLabelContainerAllocation {
         (CapacityScheduler) rm1.getRMContext().getScheduler();
     FiCaSchedulerApp app =
         cs.getApplicationAttempt(am1.getApplicationAttemptId());
-    List<ResourceRequest> allResourceRequests =
-        app.getAppSchedulingInfo().getAllResourceRequests();
-    for (ResourceRequest changeReq : allResourceRequests) {
-      if (changeReq.getPriority().getPriority() == 2
-          || changeReq.getPriority().getPriority() == 3) {
-        Assert.assertEquals("Expected label y", "y",
-            changeReq.getNodeLabelExpression());
-      } else if (changeReq.getPriority().getPriority() == 4) {
-        Assert.assertEquals("Expected label EMPTY",
-            RMNodeLabelsManager.NO_LABEL, changeReq.getNodeLabelExpression());
-      }
-    }
+    checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 2, "y");
+    checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 3, "y");
+    checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 4,
+        RMNodeLabelsManager.NO_LABEL);
 
     // Previous any request was Y trying to update with z and the
     // request before ANY label is null
@@ -628,17 +624,11 @@ public class TestNodeLabelContainerAllocation {
     newReq.add(am1.createResourceReq("h1:1234", 1024, 3, 4, null));
     newReq.add(am1.createResourceReq("*", 1024, 4, 5, "z"));
     am1.allocate(newReq, new ArrayList<ContainerId>());
-    allResourceRequests = app.getAppSchedulingInfo().getAllResourceRequests();
-    for (ResourceRequest changeReq : allResourceRequests) {
-      if (changeReq.getPriority().getPriority() == 3
-          || changeReq.getPriority().getPriority() == 4) {
-        Assert.assertEquals("Expected label z", "z",
-            changeReq.getNodeLabelExpression());
-      } else if (changeReq.getPriority().getPriority() == 2) {
-        Assert.assertEquals("Expected label y", "y",
-            changeReq.getNodeLabelExpression());
-      }
-    }
+
+    checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 3, "z");
+    checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 4, "z");
+    checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 2, "y");
+
     // Request before ANY and ANY request is set as NULL. Request should be set
     // with Empty Label
     List<ResourceRequest> resourceRequest1 = new ArrayList<ResourceRequest>();
@@ -653,14 +643,21 @@ public class TestNodeLabelContainerAllocation {
         RMNodeLabelsManager.NO_LABEL));
     resourceRequest1.add(am1.createResourceReq("h2:1234", 1024, 2, 4, null));
     am1.allocate(resourceRequest1, new ArrayList<ContainerId>());
-    allResourceRequests = app.getAppSchedulingInfo().getAllResourceRequests();
-    for (ResourceRequest changeReq : allResourceRequests) {
-      if (changeReq.getPriority().getPriority() == 3) {
-        Assert.assertEquals("Expected label Empty",
-            RMNodeLabelsManager.NO_LABEL, changeReq.getNodeLabelExpression());
-      } else if (changeReq.getPriority().getPriority() == 2) {
-        Assert.assertEquals("Expected label y", RMNodeLabelsManager.NO_LABEL,
-            changeReq.getNodeLabelExpression());
+
+    checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 3,
+        RMNodeLabelsManager.NO_LABEL);
+    checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 2,
+        RMNodeLabelsManager.NO_LABEL);
+  }
+
+  private void checkNodePartitionOfRequestedPriority(AppSchedulingInfo info,
+      int priority, String expectedPartition) {
+    for (SchedulerRequestKey key : info.getSchedulerKeys()) {
+      if (key.getPriority().getPriority() == priority) {
+        Assert.assertEquals("Expected partition is " + expectedPartition,
+            expectedPartition,
+            info.getSchedulingPlacementSet(key)
+                .getPrimaryRequestedNodePartition());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2977bc6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
index f9bf89d..3a154b2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
@@ -329,7 +329,7 @@ public class TestReservations {
     assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
     assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
     assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
-    assertEquals(2, app_0.getTotalRequiredResources(
+    assertEquals(2, app_0.getOutstandingAsksCount(
         toSchedulerKey(priorityReduce)));
 
     // try to assign reducer (5G on node 0 and should reserve)
@@ -348,7 +348,7 @@ public class TestReservations {
     assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
     assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
     assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
-    assertEquals(2, app_0.getTotalRequiredResources(
+    assertEquals(2, app_0.getOutstandingAsksCount(
         toSchedulerKey(priorityReduce)));
 
     // assign reducer to node 2
@@ -367,7 +367,7 @@ public class TestReservations {
     assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
     assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
     assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize());
-    assertEquals(1, app_0.getTotalRequiredResources(
+    assertEquals(1, app_0.getOutstandingAsksCount(
         toSchedulerKey(priorityReduce)));
 
     // node_1 heartbeat and unreserves from node_0 in order to allocate
@@ -386,7 +386,7 @@ public class TestReservations {
     assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
     assertEquals(8 * GB, node_1.getAllocatedResource().getMemorySize());
     assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize());
-    assertEquals(0, app_0.getTotalRequiredResources(
+    assertEquals(0, app_0.getOutstandingAsksCount(
         toSchedulerKey(priorityReduce)));
   }
 
@@ -662,7 +662,7 @@ public class TestReservations {
     assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
     assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
     assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
-    assertEquals(2, app_0.getTotalRequiredResources(
+    assertEquals(2, app_0.getOutstandingAsksCount(
         toSchedulerKey(priorityReduce)));
 
     // try to assign reducer (5G on node 0 and should reserve)
@@ -681,7 +681,7 @@ public class TestReservations {
     assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
     assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
     assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
-    assertEquals(2, app_0.getTotalRequiredResources(
+    assertEquals(2, app_0.getOutstandingAsksCount(
         toSchedulerKey(priorityReduce)));
 
     // assign reducer to node 2
@@ -700,7 +700,7 @@ public class TestReservations {
     assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
     assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
     assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize());
-    assertEquals(1, app_0.getTotalRequiredResources(
+    assertEquals(1, app_0.getOutstandingAsksCount(
         toSchedulerKey(priorityReduce)));
 
     // node_1 heartbeat and won't unreserve from node_0, potentially stuck
@@ -720,7 +720,7 @@ public class TestReservations {
     assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
     assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
     assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize());
-    assertEquals(1, app_0.getTotalRequiredResources(
+    assertEquals(1, app_0.getOutstandingAsksCount(
         toSchedulerKey(priorityReduce)));
   }
 
@@ -841,7 +841,7 @@ public class TestReservations {
     assertEquals(null, node_0.getReservedContainer());
     assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
     assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
-    assertEquals(2, app_0.getTotalRequiredResources(
+    assertEquals(2, app_0.getOutstandingAsksCount(
         toSchedulerKey(priorityReduce)));
 
     // try to assign reducer (5G on node 0 and should reserve)
@@ -859,7 +859,7 @@ public class TestReservations {
         .getMemorySize());
     assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
     assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
-    assertEquals(2, app_0.getTotalRequiredResources(
+    assertEquals(2, app_0.getOutstandingAsksCount(
         toSchedulerKey(priorityReduce)));
 
     // could allocate but told need to unreserve first
@@ -876,7 +876,7 @@ public class TestReservations {
     assertEquals(null, node_0.getReservedContainer());
     assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
     assertEquals(8 * GB, node_1.getAllocatedResource().getMemorySize());
-    assertEquals(1, app_0.getTotalRequiredResources(
+    assertEquals(1, app_0.getOutstandingAsksCount(
         toSchedulerKey(priorityReduce)));
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message