Return-Path: X-Original-To: apmail-hadoop-mapreduce-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-mapreduce-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 777F79D47 for ; Tue, 11 Oct 2011 18:25:34 +0000 (UTC) Received: (qmail 29912 invoked by uid 500); 11 Oct 2011 18:25:34 -0000 Delivered-To: apmail-hadoop-mapreduce-commits-archive@hadoop.apache.org Received: (qmail 29856 invoked by uid 500); 11 Oct 2011 18:25:34 -0000 Mailing-List: contact mapreduce-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: mapreduce-dev@hadoop.apache.org Delivered-To: mailing list mapreduce-commits@hadoop.apache.org Received: (qmail 29715 invoked by uid 99); 11 Oct 2011 18:25:34 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 11 Oct 2011 18:25:33 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 11 Oct 2011 18:25:29 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 747632388A9C; Tue, 11 Oct 2011 18:25:08 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1182003 - in /hadoop/common/branches/branch-0.23/hadoop-mapreduce-project: ./ hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ hadoop-yarn/had... Date: Tue, 11 Oct 2011 18:25:08 -0000 To: mapreduce-commits@hadoop.apache.org From: acmurthy@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20111011182508.747632388A9C@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: acmurthy Date: Tue Oct 11 18:25:07 2011 New Revision: 1182003 URL: http://svn.apache.org/viewvc?rev=1182003&view=rev Log: Merge -c 1182000 from trunk to branch-0.23 to fix MAPREDUCE-3126. Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/CHANGES.txt hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/CHANGES.txt?rev=1182003&r1=1182002&r2=1182003&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/CHANGES.txt (original) +++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/CHANGES.txt Tue Oct 11 18:25:07 2011 @@ -1531,7 +1531,7 @@ Release 0.23.0 - Unreleased job submission files to fail fast. (Abhijit Suresh Shingate via acmurthy) MAPREDUCE-3158. Fix test failures in MRv1 due to default framework being - set to yarn. (Hitesh Shah via acmurhty) + set to yarn. (Hitesh Shah via acmurthy) MAPREDUCE-3167. container-executor is not being packaged with the assembly target. (mahadev) @@ -1542,6 +1542,9 @@ Release 0.23.0 - Unreleased MAPREDUCE-2668. Fixed AuxServices to send a signal on application-finish to all the services. (Thomas Graves via vinodkv) + MAPREDUCE-3126. Fixed a corner case in CapacityScheduler where headroom + wasn't updated on changes to cluster size. (acmurthy) + Release 0.22.0 - Unreleased INCOMPATIBLE CHANGES Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java?rev=1182003&r1=1182002&r2=1182003&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java (original) +++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java Tue Oct 11 18:25:07 2011 @@ -165,6 +165,12 @@ public class CapacitySchedulerConfigurat getInt(getQueuePrefix(queue) + USER_LIMIT, DEFAULT_USER_LIMIT); return userLimit; } + + public void setUserLimit(String queue, int userLimit) { + setInt(getQueuePrefix(queue) + USER_LIMIT, userLimit); + LOG.info("here setUserLimit: queuePrefix=" + getQueuePrefix(queue) + + ", userLimit=" + getUserLimit(queue)); + } public float getUserLimitFactor(String queue) { float userLimitFactor = Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java?rev=1182003&r1=1182002&r2=1182003&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java (original) +++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java Tue Oct 11 18:25:07 2011 @@ -698,9 +698,7 @@ public class LeafQueue implements CSQueu application.showRequests(); synchronized (application) { - Resource userLimit = - computeUserLimit(application, clusterResource, Resources.none()); - setUserResourceLimit(application, userLimit); + computeAndSetUserResourceLimit(application, clusterResource); for (Priority priority : application.getPriorities()) { // Required resource @@ -719,7 +717,7 @@ public class LeafQueue implements CSQueu } // User limits - userLimit = + Resource userLimit = computeUserLimit(application, clusterResource, required); if (!assignToUser(application.getUser(), userLimit)) { break; @@ -807,10 +805,13 @@ public class LeafQueue implements CSQueu return true; } - private void setUserResourceLimit(SchedulerApp application, - Resource resourceLimit) { - application.setAvailableResourceLimit(resourceLimit); - metrics.setAvailableResourcesToUser(application.getUser(), application.getHeadroom()); + private void computeAndSetUserResourceLimit(SchedulerApp application, + Resource clusterResource) { + Resource userLimit = + computeUserLimit(application, clusterResource, Resources.none()); + application.setAvailableResourceLimit(userLimit); + metrics.setAvailableResourcesToUser(application.getUser(), + application.getHeadroom()); } private int roundUp(int memory) { @@ -1270,12 +1271,18 @@ public class LeafQueue implements CSQueu @Override public synchronized void updateClusterResource(Resource clusterResource) { + // Update queue properties maxActiveApplications = computeMaxActiveApplications(clusterResource, maxAMResourcePercent, absoluteCapacity); maxActiveApplicationsPerUser = computeMaxActiveApplicationsPerUser(maxActiveApplications, userLimit, userLimitFactor); + + // Update application properties + for (SchedulerApp application : activeApplications) { + computeAndSetUserResourceLimit(application, clusterResource); + } } private synchronized void updateResource(Resource clusterResource) { Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java?rev=1182003&r1=1182002&r2=1182003&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java (original) +++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java Tue Oct 11 18:25:07 2011 @@ -14,6 +14,7 @@ import org.apache.hadoop.yarn.api.record import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -101,8 +102,10 @@ public class TestApplicationLimits { CapacitySchedulerContext csContext = mock(CapacitySchedulerContext.class); when(csContext.getConfiguration()).thenReturn(csConf); - when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB)); - when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16*GB)); + when(csContext.getMinimumResourceCapability()). + thenReturn(Resources.createResource(GB)); + when(csContext.getMaximumResourceCapability()). + thenReturn(Resources.createResource(16*GB)); // Say cluster has 100 nodes of 16G each Resource clusterResource = Resources.createResource(100 * 16 * GB); @@ -227,6 +230,76 @@ public class TestApplicationLimits { assertEquals(0, queue.getNumPendingApplications(user_1)); } + @Test + public void testHeadroom() throws Exception { + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(); + csConf.setUserLimit(CapacityScheduler.ROOT + "." + A, 25); + setupQueueConfiguration(csConf); + + CapacitySchedulerContext csContext = mock(CapacitySchedulerContext.class); + when(csContext.getConfiguration()).thenReturn(csConf); + when(csContext.getMinimumResourceCapability()). + thenReturn(Resources.createResource(GB)); + when(csContext.getMaximumResourceCapability()). + thenReturn(Resources.createResource(16*GB)); + + // Say cluster has 100 nodes of 16G each + Resource clusterResource = Resources.createResource(100 * 16 * GB); + when(csContext.getClusterResources()).thenReturn(clusterResource); + + Map queues = new HashMap(); + CapacityScheduler.parseQueue(csContext, csConf, null, "root", + queues, queues, + CapacityScheduler.queueComparator, + CapacityScheduler.applicationComparator, + TestUtils.spyHook); + + // Manipulate queue 'a' + LeafQueue queue = TestLeafQueue.stubLeafQueue((LeafQueue)queues.get(A)); + + String host_0 = "host_0"; + String rack_0 = "rack_0"; + SchedulerNode node_0 = TestUtils.getMockNode(host_0, rack_0, 0, 16*GB); + + final String user_0 = "user_0"; + final String user_1 = "user_1"; + + int APPLICATION_ID = 0; + + // Submit first application from user_0, check headroom + SchedulerApp app_0_0 = getMockApplication(APPLICATION_ID++, user_0); + queue.submitApplication(app_0_0, user_0, A); + queue.assignContainers(clusterResource, node_0); // Schedule to compute + Resource expectedHeadroom = Resources.createResource(10*16*GB); + verify(app_0_0).setAvailableResourceLimit(eq(expectedHeadroom)); + + // Submit second application from user_0, check headroom + SchedulerApp app_0_1 = getMockApplication(APPLICATION_ID++, user_0); + queue.submitApplication(app_0_1, user_0, A); + queue.assignContainers(clusterResource, node_0); // Schedule to compute + verify(app_0_0, times(2)).setAvailableResourceLimit(eq(expectedHeadroom)); + verify(app_0_1).setAvailableResourceLimit(eq(expectedHeadroom));// no change + + // Submit first application from user_1, check for new headroom + SchedulerApp app_1_0 = getMockApplication(APPLICATION_ID++, user_1); + queue.submitApplication(app_1_0, user_1, A); + queue.assignContainers(clusterResource, node_0); // Schedule to compute + expectedHeadroom = Resources.createResource(10*16*GB / 2); // changes + verify(app_0_0).setAvailableResourceLimit(eq(expectedHeadroom)); + verify(app_0_1).setAvailableResourceLimit(eq(expectedHeadroom)); + verify(app_1_0).setAvailableResourceLimit(eq(expectedHeadroom)); + + // Now reduce cluster size and check for the smaller headroom + clusterResource = Resources.createResource(90*16*GB); + queue.assignContainers(clusterResource, node_0); // Schedule to compute + expectedHeadroom = Resources.createResource(9*16*GB / 2); // changes + verify(app_0_0).setAvailableResourceLimit(eq(expectedHeadroom)); + verify(app_0_1).setAvailableResourceLimit(eq(expectedHeadroom)); + verify(app_1_0).setAvailableResourceLimit(eq(expectedHeadroom)); + } + + @After public void tearDown() { Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java?rev=1182003&r1=1182002&r2=1182003&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java (original) +++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java Tue Oct 11 18:25:07 2011 @@ -117,7 +117,7 @@ public class TestLeafQueue { LOG.info("Setup top-level queues a and b"); } - private LeafQueue stubLeafQueue(LeafQueue queue) { + static LeafQueue stubLeafQueue(LeafQueue queue) { // Mock some methods for ease in these unit tests