hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wan...@apache.org
Subject [1/2] hadoop git commit: YARN-5889. Improve and refactor user-limit calculation in Capacity Scheduler. (Sunil G via wangda)
Date Thu, 09 Feb 2017 18:26:47 GMT
Repository: hadoop
Updated Branches:
  refs/heads/trunk b6bb99c18 -> 5fb723bb7


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb723bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
new file mode 100644
index 0000000..05503c6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
@@ -0,0 +1,982 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractUsersManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
+import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
+import org.apache.hadoop.yarn.util.resource.Resources;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * {@link UsersManager} tracks users in the system and its respective data
+ * structures.
+ */
+@Private
+public class UsersManager implements AbstractUsersManager {
+
+  private static final Log LOG = LogFactory.getLog(UsersManager.class);
+
+  /*
+   * Member declaration for UsersManager class.
+   */
+  private final LeafQueue lQueue;
+  private final RMNodeLabelsManager labelManager;
+  private final ResourceCalculator resourceCalculator;
+  private final CapacitySchedulerContext scheduler;
+  private Map<String, User> users = new ConcurrentHashMap<>();
+
+  private ResourceUsage totalResUsageForActiveUsers = new ResourceUsage();
+  private ResourceUsage totalResUsageForNonActiveUsers = new ResourceUsage();
+  private Set<String> activeUsersSet = new HashSet<String>();
+  private Set<String> nonActiveUsersSet = new HashSet<String>();
+
+  // Summation of consumed ratios for all users in queue
+  private UsageRatios qUsageRatios;
+
+  // To detect whether there is a change in user count for every user-limit
+  // calculation.
+  private AtomicLong latestVersionOfUsersState = new AtomicLong(0);
+  private Map<String, Map<SchedulingMode, Long>> localVersionOfActiveUsersState =
+      new HashMap<String, Map<SchedulingMode, Long>>();
+  private Map<String, Map<SchedulingMode, Long>> localVersionOfAllUsersState =
+      new HashMap<String, Map<SchedulingMode, Long>>();
+
+  private volatile int userLimit;
+  private volatile float userLimitFactor;
+
+  private WriteLock writeLock;
+  private ReadLock readLock;
+
+  private final QueueMetrics metrics;
+  private AtomicInteger activeUsers = new AtomicInteger(0);
+  private Map<String, Set<ApplicationId>> usersApplications =
+      new HashMap<String, Set<ApplicationId>>();
+
+  // Pre-computed list of user-limits.
+  Map<String, Map<SchedulingMode, Resource>> preComputedActiveUserLimit = new ConcurrentHashMap<>();
+  Map<String, Map<SchedulingMode, Resource>> preComputedAllUserLimit = new ConcurrentHashMap<>();
+
+  /**
+   * UsageRatios will store the total used resources ratio across all users of
+   * the queue.
+   */
+  static private class UsageRatios {
+    private Map<String, Float> usageRatios;
+    private ReadLock readLock;
+    private WriteLock writeLock;
+
+    public UsageRatios() {
+      ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+      readLock = lock.readLock();
+      writeLock = lock.writeLock();
+      usageRatios = new HashMap<String, Float>();
+    }
+
+    private void incUsageRatio(String label, float delta) {
+      try {
+        writeLock.lock();
+        float usage = 0f;
+        if (usageRatios.containsKey(label)) {
+          usage = usageRatios.get(label);
+        }
+        usage += delta;
+        usageRatios.put(label, usage);
+      } finally {
+        writeLock.unlock();
+      }
+    }
+
+    private float getUsageRatio(String label) {
+      try {
+        readLock.lock();
+        Float f = usageRatios.get(label);
+        if (null == f) {
+          return 0.0f;
+        }
+        return f;
+      } finally {
+        readLock.unlock();
+      }
+    }
+
+    private void setUsageRatio(String label, float ratio) {
+      try {
+        writeLock.lock();
+        usageRatios.put(label, ratio);
+      } finally {
+        writeLock.unlock();
+      }
+    }
+  } /* End of UserRatios class */
+
+  /**
+   * User class stores all user related resource usage, application details.
+   */
+  @VisibleForTesting
+  public static class User {
+    ResourceUsage userResourceUsage = new ResourceUsage();
+    String userName = null;
+    volatile Resource userResourceLimit = Resource.newInstance(0, 0);
+    private volatile AtomicInteger pendingApplications = new AtomicInteger(0);
+    private volatile AtomicInteger activeApplications = new AtomicInteger(0);
+
+    private UsageRatios userUsageRatios = new UsageRatios();
+    private WriteLock writeLock;
+
+    public User(String name) {
+      ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+      // Nobody uses read-lock now, will add it when necessary
+      writeLock = lock.writeLock();
+
+      this.userName = name;
+    }
+
+    public ResourceUsage getResourceUsage() {
+      return userResourceUsage;
+    }
+
+    public float setAndUpdateUsageRatio(ResourceCalculator resourceCalculator,
+        Resource resource, String nodePartition) {
+      try {
+        writeLock.lock();
+        userUsageRatios.setUsageRatio(nodePartition, 0);
+        return updateUsageRatio(resourceCalculator, resource, nodePartition);
+      } finally {
+        writeLock.unlock();
+      }
+    }
+
+    public float updateUsageRatio(ResourceCalculator resourceCalculator,
+        Resource resource, String nodePartition) {
+      try {
+        writeLock.lock();
+        float delta;
+        float newRatio = Resources.ratio(resourceCalculator,
+            getUsed(nodePartition), resource);
+        delta = newRatio - userUsageRatios.getUsageRatio(nodePartition);
+        userUsageRatios.setUsageRatio(nodePartition, newRatio);
+        return delta;
+      } finally {
+        writeLock.unlock();
+      }
+    }
+
+    public Resource getUsed() {
+      return userResourceUsage.getUsed();
+    }
+
+    public Resource getAllUsed() {
+      return userResourceUsage.getAllUsed();
+    }
+
+    public Resource getUsed(String label) {
+      return userResourceUsage.getUsed(label);
+    }
+
+    public int getPendingApplications() {
+      return pendingApplications.get();
+    }
+
+    public int getActiveApplications() {
+      return activeApplications.get();
+    }
+
+    public Resource getConsumedAMResources() {
+      return userResourceUsage.getAMUsed();
+    }
+
+    public Resource getConsumedAMResources(String label) {
+      return userResourceUsage.getAMUsed(label);
+    }
+
+    public int getTotalApplications() {
+      return getPendingApplications() + getActiveApplications();
+    }
+
+    public void submitApplication() {
+      pendingApplications.incrementAndGet();
+    }
+
+    public void activateApplication() {
+      pendingApplications.decrementAndGet();
+      activeApplications.incrementAndGet();
+    }
+
+    public void finishApplication(boolean wasActive) {
+      if (wasActive) {
+        activeApplications.decrementAndGet();
+      } else {
+        pendingApplications.decrementAndGet();
+      }
+    }
+
+    public Resource getUserResourceLimit() {
+      return userResourceLimit;
+    }
+
+    public void setUserResourceLimit(Resource userResourceLimit) {
+      this.userResourceLimit = userResourceLimit;
+    }
+  } /* End of User class */
+
+  /**
+   * UsersManager Constructor.
+   *
+   * @param metrics
+   *          Queue Metrics
+   * @param lQueue
+   *          Leaf Queue Object
+   * @param labelManager
+   *          Label Manager instance
+   * @param scheduler
+   *          Capacity Scheduler Context
+   * @param resourceCalculator
+   *          rc
+   */
+  public UsersManager(QueueMetrics metrics, LeafQueue lQueue,
+      RMNodeLabelsManager labelManager, CapacitySchedulerContext scheduler,
+      ResourceCalculator resourceCalculator) {
+    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+    this.lQueue = lQueue;
+    this.scheduler = scheduler;
+    this.labelManager = labelManager;
+    this.resourceCalculator = resourceCalculator;
+    this.qUsageRatios = new UsageRatios();
+    this.metrics = metrics;
+
+    this.writeLock = lock.writeLock();
+    this.readLock = lock.readLock();
+  }
+
+  /**
+   * Get configured user-limit.
+   * @return user limit
+   */
+  public int getUserLimit() {
+    return userLimit;
+  }
+
+  /**
+   * Set configured user-limit.
+   * @param userLimit user limit
+   */
+  public void setUserLimit(int userLimit) {
+    this.userLimit = userLimit;
+  }
+
+  /**
+   * Get configured user-limit factor.
+   * @return user-limit factor
+   */
+  public float getUserLimitFactor() {
+    return userLimitFactor;
+  }
+
+  /**
+   * Set configured user-limit factor.
+   * @param userLimitFactor User Limit factor.
+   */
+  public void setUserLimitFactor(float userLimitFactor) {
+    this.userLimitFactor = userLimitFactor;
+  }
+
+  @VisibleForTesting
+  public float getUsageRatio(String label) {
+    return qUsageRatios.getUsageRatio(label);
+  }
+
+  /**
+   * Force UsersManager to recompute userlimit.
+   */
+  public void userLimitNeedsRecompute() {
+
+    // If latestVersionOfUsersState is negative due to overflow, ideally we need
+    // to reset it. This method is invoked from UsersManager and LeafQueue and
+    // all is happening within write/readLock. Below logic can help to set 0.
+    try {
+      writeLock.lock();
+
+      long value = latestVersionOfUsersState.incrementAndGet();
+      if (value < 0) {
+        latestVersionOfUsersState.set(0);
+      }
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /*
+   * Get all users of queue.
+   */
+  private Map<String, User> getUsers() {
+    return users;
+  }
+
+  /**
+   * Get user object for given user name.
+   *
+   * @param userName
+   *          User Name
+   * @return User object
+   */
+  public User getUser(String userName) {
+    return users.get(userName);
+  }
+
+  /**
+   * Remove user.
+   *
+   * @param userName
+   *          User Name
+   */
+  public void removeUser(String userName) {
+    try {
+      writeLock.lock();
+      this.users.remove(userName);
+
+      // Remove user from active/non-active list as well.
+      activeUsersSet.remove(userName);
+      nonActiveUsersSet.remove(userName);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /**
+   * Get and add user if absent.
+   *
+   * @param userName
+   *          User Name
+   * @return User object
+   */
+  public User getUserAndAddIfAbsent(String userName) {
+    try {
+      writeLock.lock();
+      User u = getUser(userName);
+      if (null == u) {
+        u = new User(userName);
+        addUser(userName, u);
+
+        // Add to nonActive list so that resourceUsage could be tracked
+        if (!nonActiveUsersSet.contains(userName)) {
+          nonActiveUsersSet.add(userName);
+        }
+      }
+      return u;
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /*
+   * Add a new user
+   */
+  private void addUser(String userName, User user) {
+    this.users.put(userName, user);
+  }
+
+  /**
+   * @return an ArrayList of UserInfo objects who are active in this queue
+   */
+  public ArrayList<UserInfo> getUsersInfo() {
+    try {
+      readLock.lock();
+      ArrayList<UserInfo> usersToReturn = new ArrayList<UserInfo>();
+      for (Map.Entry<String, User> entry : getUsers().entrySet()) {
+        User user = entry.getValue();
+        usersToReturn.add(
+            new UserInfo(entry.getKey(), Resources.clone(user.getAllUsed()),
+                user.getActiveApplications(), user.getPendingApplications(),
+                Resources.clone(user.getConsumedAMResources()),
+                Resources.clone(user.getUserResourceLimit()),
+                user.getResourceUsage()));
+      }
+      return usersToReturn;
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  /**
+   * Get computed user-limit for all ACTIVE users in this queue. If cached data
+   * is invalidated due to resource change, this method also enforce to
+   * recompute user-limit.
+   *
+   * @param userName
+   *          Name of user who has submitted one/more app to given queue.
+   * @param clusterResource
+   *          total cluster resource
+   * @param nodePartition
+   *          partition name
+   * @param schedulingMode
+   *          scheduling mode
+   *          RESPECT_PARTITION_EXCLUSIVITY/IGNORE_PARTITION_EXCLUSIVITY
+   * @return Computed User Limit
+   */
+  public Resource getComputedResourceLimitForActiveUsers(String userName,
+      Resource clusterResource, String nodePartition,
+      SchedulingMode schedulingMode) {
+
+    Map<SchedulingMode, Resource> userLimitPerSchedulingMode = preComputedActiveUserLimit
+        .get(nodePartition);
+
+    try {
+      writeLock.lock();
+      if (isRecomputeNeeded(schedulingMode, nodePartition, true)) {
+        // recompute
+        userLimitPerSchedulingMode = reComputeUserLimits(userName,
+            nodePartition, clusterResource, schedulingMode, true);
+
+        // update user count to cache so that we can avoid recompute if no major
+        // changes.
+        setLocalVersionOfUsersState(nodePartition, schedulingMode, true);
+      }
+    } finally {
+      writeLock.unlock();
+    }
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("userLimit is fetched. userLimit = "
+          + userLimitPerSchedulingMode.get(schedulingMode) + ", schedulingMode="
+          + schedulingMode + ", partition=" + nodePartition);
+    }
+
+    return userLimitPerSchedulingMode.get(schedulingMode);
+  }
+
+  /**
+   * Get computed user-limit for all users in this queue. If cached data is
+   * invalidated due to resource change, this method also enforce to recompute
+   * user-limit.
+   *
+   * @param userName
+   *          Name of user who has submitted one/more app to given queue.
+   * @param clusterResource
+   *          total cluster resource
+   * @param nodePartition
+   *          partition name
+   * @param schedulingMode
+   *          scheduling mode
+   *          RESPECT_PARTITION_EXCLUSIVITY/IGNORE_PARTITION_EXCLUSIVITY
+   * @return Computed User Limit
+   */
+  public Resource getComputedResourceLimitForAllUsers(String userName,
+      Resource clusterResource, String nodePartition,
+      SchedulingMode schedulingMode) {
+
+    Map<SchedulingMode, Resource> userLimitPerSchedulingMode = preComputedAllUserLimit
+        .get(nodePartition);
+
+    try {
+      writeLock.lock();
+      if (isRecomputeNeeded(schedulingMode, nodePartition, false)) {
+        // recompute
+        userLimitPerSchedulingMode = reComputeUserLimits(userName,
+            nodePartition, clusterResource, schedulingMode, false);
+
+        // update user count to cache so that we can avoid recompute if no major
+        // changes.
+        setLocalVersionOfUsersState(nodePartition, schedulingMode, false);
+      }
+    } finally {
+      writeLock.unlock();
+    }
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("userLimit is fetched. userLimit = "
+          + userLimitPerSchedulingMode.get(schedulingMode) + ", schedulingMode="
+          + schedulingMode + ", partition=" + nodePartition);
+    }
+
+    return userLimitPerSchedulingMode.get(schedulingMode);
+  }
+
+  /*
+   * Recompute user-limit under following conditions: 1. cached user-limit does
+   * not exist in local map. 2. Total User count doesn't match with local cached
+   * version.
+   */
+  private boolean isRecomputeNeeded(SchedulingMode schedulingMode,
+      String nodePartition, boolean isActive) {
+    return (getLocalVersionOfUsersState(nodePartition, schedulingMode,
+        isActive) != latestVersionOfUsersState.get());
+  }
+
+  /*
+   * Set Local version of user count per label to invalidate cache if needed.
+   */
+  private void setLocalVersionOfUsersState(String nodePartition,
+      SchedulingMode schedulingMode, boolean isActive) {
+    try {
+      writeLock.lock();
+      Map<String, Map<SchedulingMode, Long>> localVersionOfUsersState = (isActive)
+          ? localVersionOfActiveUsersState
+          : localVersionOfAllUsersState;
+
+      Map<SchedulingMode, Long> localVersion = localVersionOfUsersState
+          .get(nodePartition);
+      if (null == localVersion) {
+        localVersion = new HashMap<SchedulingMode, Long>();
+        localVersionOfUsersState.put(nodePartition, localVersion);
+      }
+
+      localVersion.put(schedulingMode, latestVersionOfUsersState.get());
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /*
+   * Get Local version of user count per label to invalidate cache if needed.
+   */
+  private long getLocalVersionOfUsersState(String nodePartition,
+      SchedulingMode schedulingMode, boolean isActive) {
+    try {
+      this.readLock.lock();
+      Map<String, Map<SchedulingMode, Long>> localVersionOfUsersState = (isActive)
+          ? localVersionOfActiveUsersState
+          : localVersionOfAllUsersState;
+
+      if (!localVersionOfUsersState.containsKey(nodePartition)) {
+        return -1;
+      }
+
+      Map<SchedulingMode, Long> localVersion = localVersionOfUsersState
+          .get(nodePartition);
+      if (!localVersion.containsKey(schedulingMode)) {
+        return -1;
+      }
+
+      return localVersion.get(schedulingMode);
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  private Map<SchedulingMode, Resource> reComputeUserLimits(String userName,
+      String nodePartition, Resource clusterResource,
+      SchedulingMode schedulingMode, boolean activeMode) {
+
+    // preselect stored map as per active user-limit or all user computation.
+    Map<String, Map<SchedulingMode, Resource>> computedMap = null;
+    computedMap = (activeMode)
+        ? preComputedActiveUserLimit
+        : preComputedAllUserLimit;
+
+    Map<SchedulingMode, Resource> userLimitPerSchedulingMode = computedMap
+        .get(nodePartition);
+
+    if (userLimitPerSchedulingMode == null) {
+      userLimitPerSchedulingMode = new ConcurrentHashMap<>();
+      computedMap.put(nodePartition, userLimitPerSchedulingMode);
+    }
+
+    // compute user-limit per scheduling mode.
+    Resource computedUserLimit = computeUserLimit(userName, clusterResource,
+        nodePartition, schedulingMode, activeMode);
+
+    // update in local storage
+    userLimitPerSchedulingMode.put(schedulingMode, computedUserLimit);
+
+    return userLimitPerSchedulingMode;
+  }
+
+  private Resource computeUserLimit(String userName, Resource clusterResource,
+      String nodePartition, SchedulingMode schedulingMode, boolean activeUser) {
+    Resource partitionResource = labelManager.getResourceByLabel(nodePartition,
+        clusterResource);
+
+    /*
+     * What is our current capacity?
+     * * It is equal to the max(required, queue-capacity) if we're running
+     * below capacity. The 'max' ensures that jobs in queues with miniscule
+     * capacity (< 1 slot) make progress
+     * * If we're running over capacity, then its (usedResources + required)
+     * (which extra resources we are allocating)
+     */
+    Resource queueCapacity = Resources.multiplyAndNormalizeUp(
+        resourceCalculator, partitionResource,
+        lQueue.getQueueCapacities().getAbsoluteCapacity(nodePartition),
+        lQueue.getMinimumAllocation());
+
+    /*
+     * Assume we have required resource equals to minimumAllocation, this can
+     * make sure user limit can continuously increase till queueMaxResource
+     * reached.
+     */
+    Resource required = lQueue.getMinimumAllocation();
+
+    // Allow progress for queues with miniscule capacity
+    queueCapacity = Resources.max(resourceCalculator, partitionResource,
+        queueCapacity, required);
+
+    /*
+     * We want to base the userLimit calculation on max(queueCapacity,
+     * usedResources+required). However, we want usedResources to be based on
+     * the combined ratios of all the users in the queue so we use consumedRatio
+     * to calculate such. The calculation is dependent on how the
+     * resourceCalculator calculates the ratio between two Resources. DRF
+     * Example: If usedResources is greater than queueCapacity and users have
+     * the following [mem,cpu] usages: User1: [10%,20%] - Dominant resource is
+     * 20% User2: [30%,10%] - Dominant resource is 30% Then total consumedRatio
+     * is then 20+30=50%. Yes, this value can be larger than 100% but for the
+     * purposes of making sure all users are getting their fair share, it works.
+     */
+    Resource consumed = Resources.multiplyAndNormalizeUp(resourceCalculator,
+        partitionResource, getUsageRatio(nodePartition),
+        lQueue.getMinimumAllocation());
+    Resource currentCapacity = Resources.lessThan(resourceCalculator,
+        partitionResource, consumed, queueCapacity)
+            ? queueCapacity
+            : Resources.add(consumed, required);
+
+    /*
+     * Never allow a single user to take more than the queue's configured
+     * capacity * user-limit-factor. Also, the queue's configured capacity
+     * should be higher than queue-hard-limit * ulMin
+     */
+    int usersCount = getNumActiveUsers();
+    Resource resourceUsed = totalResUsageForActiveUsers.getUsed(nodePartition);
+
+    // For non-activeUser calculation, consider all users count.
+    if (!activeUser) {
+      resourceUsed = currentCapacity;
+      usersCount = users.size();
+    }
+
+    /*
+     * User limit resource is determined by: max{currentCapacity / #activeUsers,
+     * currentCapacity * user-limit-percentage%)
+     */
+    Resource userLimitResource = Resources.max(resourceCalculator,
+        partitionResource,
+        Resources.divideAndCeil(resourceCalculator, resourceUsed,
+            usersCount),
+        Resources.divideAndCeil(resourceCalculator,
+            Resources.multiplyAndRoundDown(currentCapacity, getUserLimit()),
+            100));
+
+    // User limit is capped by maxUserLimit
+    // - maxUserLimit = queueCapacity * user-limit-factor
+    // (RESPECT_PARTITION_EXCLUSIVITY)
+    // - maxUserLimit = total-partition-resource (IGNORE_PARTITION_EXCLUSIVITY)
+    //
+    // In IGNORE_PARTITION_EXCLUSIVITY mode, if a queue cannot access a
+    // partition, its guaranteed resource on that partition is 0. And
+    // user-limit-factor computation is based on queue's guaranteed capacity. So
+    // we will not cap user-limit as well as used resource when doing
+    // IGNORE_PARTITION_EXCLUSIVITY allocation.
+    Resource maxUserLimit = Resources.none();
+    if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) {
+      maxUserLimit = Resources.multiplyAndRoundDown(queueCapacity,
+          getUserLimitFactor());
+    } else if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) {
+      maxUserLimit = partitionResource;
+    }
+
+    // Cap final user limit with maxUserLimit
+    userLimitResource = Resources
+        .roundUp(resourceCalculator,
+            Resources.min(resourceCalculator, partitionResource,
+                userLimitResource, maxUserLimit),
+            lQueue.getMinimumAllocation());
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("User limit computation for " + userName + " in queue "
+          + lQueue.getQueueName() + " userLimitPercent=" + lQueue.getUserLimit()
+          + " userLimitFactor=" + lQueue.getUserLimitFactor() + " required: "
+          + required + " consumed: " + consumed + " user-limit-resource: "
+          + userLimitResource + " queueCapacity: " + queueCapacity
+          + " qconsumed: " + lQueue.getQueueResourceUsage().getUsed()
+          + " currentCapacity: " + currentCapacity + " activeUsers: "
+          + usersCount + " clusterCapacity: " + clusterResource
+          + " resourceByLabel: " + partitionResource + " usageratio: "
+          + getUsageRatio(nodePartition) + " Partition: " + nodePartition);
+    }
+    getUser(userName).setUserResourceLimit(userLimitResource);
+    return userLimitResource;
+  }
+
+  /**
+   * Update new usage ratio.
+   *
+   * @param partition
+   *          Node partition
+   * @param clusterResource
+   *          Cluster Resource
+   */
+  public void updateUsageRatio(String partition, Resource clusterResource) {
+    try {
+      writeLock.lock();
+      Resource resourceByLabel = labelManager.getResourceByLabel(partition,
+          clusterResource);
+      float consumed = 0;
+      User user;
+      for (Map.Entry<String, User> entry : getUsers().entrySet()) {
+        user = entry.getValue();
+        consumed += user.setAndUpdateUsageRatio(resourceCalculator,
+            resourceByLabel, partition);
+      }
+
+      qUsageRatios.setUsageRatio(partition, consumed);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /*
+   * Increment Queue Usage Ratio.
+   */
+  private void incQueueUsageRatio(String nodePartition, float delta) {
+    qUsageRatios.incUsageRatio(nodePartition, delta);
+  }
+
+  @Override
+  public void activateApplication(String user, ApplicationId applicationId) {
+    try {
+      this.writeLock.lock();
+
+      Set<ApplicationId> userApps = usersApplications.get(user);
+      if (userApps == null) {
+        userApps = new HashSet<ApplicationId>();
+        usersApplications.put(user, userApps);
+        activeUsers.incrementAndGet();
+        metrics.incrActiveUsers();
+
+        // A user is added to active list. Invalidate user-limit cache.
+        userLimitNeedsRecompute();
+        updateActiveUsersResourceUsage(user);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("User " + user + " added to activeUsers, currently: "
+              + activeUsers);
+        }
+      }
+      if (userApps.add(applicationId)) {
+        metrics.activateApp(user);
+      }
+    } finally {
+      this.writeLock.unlock();
+    }
+  }
+
+  @Override
+  public void deactivateApplication(String user, ApplicationId applicationId) {
+    try {
+      this.writeLock.lock();
+
+      Set<ApplicationId> userApps = usersApplications.get(user);
+      if (userApps != null) {
+        if (userApps.remove(applicationId)) {
+          metrics.deactivateApp(user);
+        }
+        if (userApps.isEmpty()) {
+          usersApplications.remove(user);
+          activeUsers.decrementAndGet();
+          metrics.decrActiveUsers();
+
+          // A user is removed from active list. Invalidate user-limit cache.
+          userLimitNeedsRecompute();
+          updateNonActiveUsersResourceUsage(user);
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("User " + user + " removed from activeUsers, currently: "
+                + activeUsers);
+          }
+        }
+      }
+    } finally {
+      this.writeLock.unlock();
+    }
+  }
+
+  @Override
+  public int getNumActiveUsers() {
+    return activeUsers.get();
+  }
+
+  private void updateActiveUsersResourceUsage(String userName) {
+    try {
+      this.writeLock.lock();
+
+      // For UT case: We might need to add the user to users list.
+      User user = getUserAndAddIfAbsent(userName);
+      ResourceUsage resourceUsage = user.getResourceUsage();
+      // If User is moved to active list, moved resource usage from non-active
+      // to active list.
+      if (nonActiveUsersSet.contains(userName)) {
+        nonActiveUsersSet.remove(userName);
+        activeUsersSet.add(userName);
+
+        // Update total resource usage of active and non-active after user
+        // is moved from non-active to active.
+        for (String partition : resourceUsage.getNodePartitionsSet()) {
+          totalResUsageForNonActiveUsers.decUsed(partition,
+              resourceUsage.getUsed(partition));
+          totalResUsageForActiveUsers.incUsed(partition,
+              resourceUsage.getUsed(partition));
+        }
+
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("User '" + userName
+              + "' has become active. Hence move user to active list."
+              + "Active users size = " + activeUsersSet.size()
+              + "Non-active users size = " + nonActiveUsersSet.size()
+              + "Total Resource usage for active users="
+              + totalResUsageForActiveUsers.getAllUsed() + "."
+              + "Total Resource usage for non-active users="
+              + totalResUsageForNonActiveUsers.getAllUsed());
+        }
+      }
+    } finally {
+      this.writeLock.unlock();
+    }
+  }
+
+  private void updateNonActiveUsersResourceUsage(String userName) {
+    try {
+      this.writeLock.lock();
+
+      // For UT case: We might need to add the user to users list.
+      User user = getUserAndAddIfAbsent(userName);
+      ResourceUsage resourceUsage = user.getResourceUsage();
+      // If User is moved to non-active list, moved resource usage from
+      // non-active to active list.
+      if (activeUsersSet.contains(userName)) {
+        activeUsersSet.remove(userName);
+        nonActiveUsersSet.add(userName);
+
+        // Update total resource usage of active and non-active after user is
+        // moved from active to non-active.
+        for (String partition : resourceUsage.getNodePartitionsSet()) {
+          totalResUsageForActiveUsers.decUsed(partition,
+              resourceUsage.getUsed(partition));
+          totalResUsageForNonActiveUsers.incUsed(partition,
+              resourceUsage.getUsed(partition));
+
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("User '" + userName
+                + "' has become non-active.Hence move user to non-active list."
+                + "Active users size = " + activeUsersSet.size()
+                + "Non-active users size = " + nonActiveUsersSet.size()
+                + "Total Resource usage for active users="
+                + totalResUsageForActiveUsers.getAllUsed() + "."
+                + "Total Resource usage for non-active users="
+                + totalResUsageForNonActiveUsers.getAllUsed());
+          }
+        }
+      }
+    } finally {
+      this.writeLock.unlock();
+    }
+  }
+
+  private ResourceUsage getTotalResourceUsagePerUser(String userName) {
+    if (nonActiveUsersSet.contains(userName)) {
+      return totalResUsageForNonActiveUsers;
+    } else if (activeUsersSet.contains(userName)) {
+      return totalResUsageForActiveUsers;
+    } else {
+      LOG.warn("User '" + userName
+          + "' is not present in active/non-active. This is highly unlikely."
+          + "We can consider this user in non-active list in this case.");
+      return totalResUsageForNonActiveUsers;
+    }
+  }
+
+  /**
+   * During container allocate/release, ensure that all user specific data
+   * structures are updated.
+   *
+   * @param userName
+   *          Name of the user
+   * @param resource
+   *          Resource to increment/decrement
+   * @param nodePartition
+   *          Node label
+   * @param isAllocate
+   *          Indicate whether to allocate or release resource
+   * @return user
+   */
+  public User updateUserResourceUsage(String userName, Resource resource,
+      String nodePartition, boolean isAllocate) {
+    try {
+      this.writeLock.lock();
+
+      // TODO, should use getUser, use this method just to avoid UT failure
+      // which is caused by wrong invoking order, will fix UT separately
+      User user = getUserAndAddIfAbsent(userName);
+
+      // New container is allocated. Invalidate user-limit.
+      updateResourceUsagePerUser(user, resource, nodePartition, isAllocate);
+
+      userLimitNeedsRecompute();
+
+      // Update usage ratios
+      Resource resourceByLabel = labelManager.getResourceByLabel(nodePartition,
+          scheduler.getClusterResource());
+      incQueueUsageRatio(nodePartition, user.updateUsageRatio(
+          resourceCalculator, resourceByLabel, nodePartition));
+
+      return user;
+    } finally {
+      this.writeLock.unlock();
+    }
+  }
+
+  private void updateResourceUsagePerUser(User user, Resource resource,
+      String nodePartition, boolean isAllocate) {
+    ResourceUsage totalResourceUsageForUsers = getTotalResourceUsagePerUser(
+        user.userName);
+
+    if (isAllocate) {
+      user.getResourceUsage().incUsed(nodePartition, resource);
+      totalResourceUsageForUsers.incUsed(nodePartition, resource);
+    } else {
+      user.getResourceUsage().decUsed(nodePartition, resource);
+      totalResourceUsageForUsers.decUsed(nodePartition, resource);
+    }
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(
+          "User resource is updated." + "Total Resource usage for active users="
+              + totalResUsageForActiveUsers.getAllUsed() + "."
+              + "Total Resource usage for non-active users="
+              + totalResUsageForNonActiveUsers.getAllUsed());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb723bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 5e3b9be..30b7305 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -46,7 +46,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerFini
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerReservedEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractUsersManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
@@ -117,24 +117,24 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
       new ConcurrentHashMap<>();
 
   public FiCaSchedulerApp(ApplicationAttemptId applicationAttemptId,
-      String user, Queue queue, ActiveUsersManager activeUsersManager,
+      String user, Queue queue, AbstractUsersManager abstractUsersManager,
       RMContext rmContext) {
-    this(applicationAttemptId, user, queue, activeUsersManager, rmContext,
+    this(applicationAttemptId, user, queue, abstractUsersManager, rmContext,
         Priority.newInstance(0), false);
   }
 
   public FiCaSchedulerApp(ApplicationAttemptId applicationAttemptId,
-      String user, Queue queue, ActiveUsersManager activeUsersManager,
+      String user, Queue queue, AbstractUsersManager abstractUsersManager,
       RMContext rmContext, Priority appPriority, boolean isAttemptRecovering) {
-    this(applicationAttemptId, user, queue, activeUsersManager, rmContext,
+    this(applicationAttemptId, user, queue, abstractUsersManager, rmContext,
         appPriority, isAttemptRecovering, null);
   }
 
   public FiCaSchedulerApp(ApplicationAttemptId applicationAttemptId,
-      String user, Queue queue, ActiveUsersManager activeUsersManager,
+      String user, Queue queue, AbstractUsersManager abstractUsersManager,
       RMContext rmContext, Priority appPriority, boolean isAttemptRecovering,
       ActivitiesManager activitiesManager) {
-    super(applicationAttemptId, user, queue, activeUsersManager, rmContext);
+    super(applicationAttemptId, user, queue, abstractUsersManager, rmContext);
 
     RMApp rmApp = rmContext.getRMApps().get(getApplicationId());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb723bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 2754616..16070e0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -465,7 +465,7 @@ public class FSLeafQueue extends FSQueue {
   }
 
   @Override
-  public ActiveUsersManager getActiveUsersManager() {
+  public ActiveUsersManager getAbstractUsersManager() {
     return activeUsersManager;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb723bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
index f2e5086..2528f3d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
@@ -294,7 +294,7 @@ public class FSParentQueue extends FSQueue {
   }
   
   @Override
-  public ActiveUsersManager getActiveUsersManager() {
+  public ActiveUsersManager getAbstractUsersManager() {
     // Should never be called since all applications are submitted to LeafQueues
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb723bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
index 63e7f40..64dbc7d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
@@ -177,7 +177,7 @@ public class FifoScheduler extends
     }
     
     @Override
-    public ActiveUsersManager getActiveUsersManager() {
+    public ActiveUsersManager getAbstractUsersManager() {
       return activeUsersManager;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb723bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
index e0ac56f..7dcdf58 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
@@ -63,7 +63,7 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
     maxApplications = q.getMaxApplications();
     maxApplicationsPerUser = q.getMaxApplicationsPerUser();
     userLimit = q.getUserLimit();
-    users = new UsersInfo(q.getUsers());
+    users = new UsersInfo(q.getUsersManager().getUsersInfo());
     userLimitFactor = q.getUserLimitFactor();
     AMResourceLimit = new ResourceInfo(q.getAMResourceLimit());
     usedAMResource = new ResourceInfo(q.getQueueResourceUsage().getAMUsed());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb723bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
index b2d7d16..a9e97fd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.QueueCapacities;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
@@ -361,9 +362,10 @@ public class ProportionalCapacityPreemptionPolicyMockFramework {
           queue.getQueueCapacities().getAbsoluteCapacity());
       HashSet<String> users = userMap.get(queue.getQueueName());
       Resource userLimit = Resources.divideAndCeil(rc, capacity, users.size());
-      for (String user : users) {
-        when(queue.getUserLimitPerUser(eq(user), any(Resource.class),
-            anyString())).thenReturn(userLimit);
+      for (String userName : users) {
+        when(queue.getResourceLimitForAllUsers(eq(userName),
+            any(Resource.class), anyString(), any(SchedulingMode.class)))
+                .thenReturn(userLimit);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb723bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
index c5e5183..06253ff 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
@@ -75,7 +75,7 @@ public class TestSchedulerApplicationAttempt {
     RMContext rmContext = mock(RMContext.class);
     when(rmContext.getEpoch()).thenReturn(3L);
     SchedulerApplicationAttempt app = new SchedulerApplicationAttempt(appAttId,
-        user, oldQueue, oldQueue.getActiveUsersManager(), rmContext);
+        user, oldQueue, oldQueue.getAbstractUsersManager(), rmContext);
     oldMetrics.submitApp(user);
     
     // confirm that containerId is calculated based on epoch.
@@ -170,7 +170,7 @@ public class TestSchedulerApplicationAttempt {
     ActiveUsersManager activeUsersManager = new ActiveUsersManager(metrics);
     Queue queue = mock(Queue.class);
     when(queue.getMetrics()).thenReturn(metrics);
-    when(queue.getActiveUsersManager()).thenReturn(activeUsersManager);
+    when(queue.getAbstractUsersManager()).thenReturn(activeUsersManager);
     when(queue.getQueueInfo(false, false)).thenReturn(queueInfo);
     return queue;
   }
@@ -199,7 +199,7 @@ public class TestSchedulerApplicationAttempt {
     Queue queue = createQueue("test", null);
     SchedulerApplicationAttempt app =
         new SchedulerApplicationAttempt(appAttId, user, queue,
-            queue.getActiveUsersManager(), rmContext);
+            queue.getAbstractUsersManager(), rmContext);
 
     // Resource request
     Resource requestedResource = Resource.newInstance(1536, 2);
@@ -212,7 +212,7 @@ public class TestSchedulerApplicationAttempt {
 
     queue = createQueue("test2", null, 0.5f);
     app = new SchedulerApplicationAttempt(appAttId, user, queue,
-        queue.getActiveUsersManager(), rmContext);
+        queue.getAbstractUsersManager(), rmContext);
     app.attemptResourceUsage.incUsed(requestedResource);
     assertEquals(30.0f, app.getResourceUsageReport().getQueueUsagePercentage(),
         0.01f);
@@ -230,7 +230,7 @@ public class TestSchedulerApplicationAttempt {
 
     queue = createQueue("test3", null, 0.0f);
     app = new SchedulerApplicationAttempt(appAttId, user, queue,
-        queue.getActiveUsersManager(), rmContext);
+        queue.getAbstractUsersManager(), rmContext);
 
     // Resource request
     app.attemptResourceUsage.incUsed(requestedResource);
@@ -256,7 +256,7 @@ public class TestSchedulerApplicationAttempt {
     final String user = "user1";
     Queue queue = createQueue("test", null);
     SchedulerApplicationAttempt app = new SchedulerApplicationAttempt(appAttId,
-        user, queue, queue.getActiveUsersManager(), rmContext);
+        user, queue, queue.getAbstractUsersManager(), rmContext);
 
     // Resource request
     Resource requestedResource = Resource.newInstance(1536, 2);
@@ -275,7 +275,7 @@ public class TestSchedulerApplicationAttempt {
     RMContext rmContext = mock(RMContext.class);
     when(rmContext.getEpoch()).thenReturn(3L);
     SchedulerApplicationAttempt app = new SchedulerApplicationAttempt(
-        attemptId, "user", queue, queue.getActiveUsersManager(), rmContext);
+        attemptId, "user", queue, queue.getAbstractUsersManager(), rmContext);
     Priority priority = Priority.newInstance(1);
     SchedulerRequestKey schedulerKey = toSchedulerKey(priority);
     assertEquals(0, app.getSchedulingOpportunities(schedulerKey));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb723bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
index 046ea4a..bb0a123 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
@@ -192,7 +192,7 @@ public class TestApplicationLimits {
         clusterResource));
     
     ActiveUsersManager activeUsersManager = mock(ActiveUsersManager.class);
-    when(queue.getActiveUsersManager()).thenReturn(activeUsersManager);
+    when(queue.getAbstractUsersManager()).thenReturn(activeUsersManager);
 
     assertEquals(Resource.newInstance(8 * GB, 1),
         queue.calculateAndGetAMResourceLimit());
@@ -632,7 +632,7 @@ public class TestApplicationLimits {
         TestUtils.getMockApplicationAttemptId(0, 0); 
     FiCaSchedulerApp app_0_0 = new FiCaSchedulerApp(
       appAttemptId_0_0, user_0, queue, 
-            queue.getActiveUsersManager(), spyRMContext);
+            queue.getAbstractUsersManager(), spyRMContext);
     queue.submitApplicationAttempt(app_0_0, user_0);
 
     List<ResourceRequest> app_0_0_requests = new ArrayList<ResourceRequest>();
@@ -644,7 +644,7 @@ public class TestApplicationLimits {
     // Schedule to compute 
     queue.assignContainers(clusterResource, node_0, new ResourceLimits(
         clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
-    Resource expectedHeadroom = Resources.createResource(10*16*GB, 1);
+    Resource expectedHeadroom = Resources.createResource(5*16*GB, 1);
     assertEquals(expectedHeadroom, app_0_0.getHeadroom());
 
     // Submit second application from user_0, check headroom
@@ -652,7 +652,7 @@ public class TestApplicationLimits {
         TestUtils.getMockApplicationAttemptId(1, 0); 
     FiCaSchedulerApp app_0_1 = new FiCaSchedulerApp(
       appAttemptId_0_1, user_0, queue, 
-            queue.getActiveUsersManager(), spyRMContext);
+            queue.getAbstractUsersManager(), spyRMContext);
     queue.submitApplicationAttempt(app_0_1, user_0);
     
     List<ResourceRequest> app_0_1_requests = new ArrayList<ResourceRequest>();
@@ -672,7 +672,7 @@ public class TestApplicationLimits {
         TestUtils.getMockApplicationAttemptId(2, 0); 
     FiCaSchedulerApp app_1_0 = new FiCaSchedulerApp(
       appAttemptId_1_0, user_1, queue, 
-            queue.getActiveUsersManager(), spyRMContext);
+            queue.getAbstractUsersManager(), spyRMContext);
     queue.submitApplicationAttempt(app_1_0, user_1);
 
     List<ResourceRequest> app_1_0_requests = new ArrayList<ResourceRequest>();
@@ -691,6 +691,11 @@ public class TestApplicationLimits {
 
     // Now reduce cluster size and check for the smaller headroom
     clusterResource = Resources.createResource(90*16*GB);
+
+    // Any change is cluster resource needs to enforce user-limit recomputation.
+    // In existing code, LeafQueue#updateClusterResource handled this. However
+    // here that method was not used.
+    queue.getUsersManager().userLimitNeedsRecompute();
     queue.assignContainers(clusterResource, node_0, new ResourceLimits(
         clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); // Schedule to compute
     expectedHeadroom = Resources.createResource(9*16*GB / 2, 1); // changes

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb723bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimitsByPartition.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimitsByPartition.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimitsByPartition.java
index 2fa06e8..b70a359 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimitsByPartition.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimitsByPartition.java
@@ -657,7 +657,7 @@ public class TestApplicationLimitsByPartition {
     final ApplicationAttemptId appAttemptId_0_0 =
         TestUtils.getMockApplicationAttemptId(0, 0);
     FiCaSchedulerApp app_0_0 = new FiCaSchedulerApp(appAttemptId_0_0, user_0,
-        queue, queue.getActiveUsersManager(), spyRMContext);
+        queue, queue.getAbstractUsersManager(), spyRMContext);
     queue.submitApplicationAttempt(app_0_0, user_0);
 
     List<ResourceRequest> app_0_0_requests = new ArrayList<ResourceRequest>();
@@ -669,16 +669,16 @@ public class TestApplicationLimitsByPartition {
     queue.assignContainers(clusterResource, node_0,
         new ResourceLimits(clusterResource),
         SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
-    //head room = queue capacity = 50 % 90% 160 GB
+    //head room = queue capacity = 50 % 90% 160 GB * 0.25 (UL)
     Resource expectedHeadroom =
-        Resources.createResource((int) (0.5 * 0.9 * 160) * GB, 1);
+        Resources.createResource((int) (0.5 * 0.9 * 160 * 0.25) * GB, 1);
     assertEquals(expectedHeadroom, app_0_0.getHeadroom());
 
     // Submit second application from user_0, check headroom
     final ApplicationAttemptId appAttemptId_0_1 =
         TestUtils.getMockApplicationAttemptId(1, 0);
     FiCaSchedulerApp app_0_1 = new FiCaSchedulerApp(appAttemptId_0_1, user_0,
-        queue, queue.getActiveUsersManager(), spyRMContext);
+        queue, queue.getAbstractUsersManager(), spyRMContext);
     queue.submitApplicationAttempt(app_0_1, user_0);
 
     List<ResourceRequest> app_0_1_requests = new ArrayList<ResourceRequest>();
@@ -701,15 +701,16 @@ public class TestApplicationLimitsByPartition {
     assertEquals(expectedHeadroom, app_0_0.getHeadroom());// no change
     //head room for default label + head room for y partition
     //head room for y partition = 100% 50%(b queue capacity ) *  160 * GB
-    Resource expectedHeadroomWithReqInY =
-        Resources.add(Resources.createResource((int) (0.5 * 160) * GB, 1), expectedHeadroom);
+    Resource expectedHeadroomWithReqInY = Resources.add(
+        Resources.createResource((int) (0.25 * 0.5 * 160) * GB, 1),
+        expectedHeadroom);
     assertEquals(expectedHeadroomWithReqInY, app_0_1.getHeadroom());
 
     // Submit first application from user_1, check for new headroom
     final ApplicationAttemptId appAttemptId_1_0 =
         TestUtils.getMockApplicationAttemptId(2, 0);
     FiCaSchedulerApp app_1_0 = new FiCaSchedulerApp(appAttemptId_1_0, user_1,
-        queue, queue.getActiveUsersManager(), spyRMContext);
+        queue, queue.getAbstractUsersManager(), spyRMContext);
     queue.submitApplicationAttempt(app_1_0, user_1);
 
     List<ResourceRequest> app_1_0_requests = new ArrayList<ResourceRequest>();
@@ -728,12 +729,12 @@ public class TestApplicationLimitsByPartition {
         SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); // Schedule to compute
     //head room = queue capacity = (50 % 90% 160 GB)/2 (for 2 users)
     expectedHeadroom =
-        Resources.createResource((int) (0.5 * 0.9 * 160 * 0.5) * GB, 1);
+        Resources.createResource((int) (0.5 * 0.9 * 160 * 0.25) * GB, 1);
     //head room for default label + head room for y partition
     //head room for y partition = 100% 50%(b queue capacity ) *  160 * GB
-    expectedHeadroomWithReqInY =
-        Resources.add(Resources.createResource((int) (0.5 * 0.5 * 160) * GB, 1),
-            expectedHeadroom);
+    expectedHeadroomWithReqInY = Resources.add(
+        Resources.createResource((int) (0.25 * 0.5 * 160) * GB, 1),
+        expectedHeadroom);
     assertEquals(expectedHeadroom, app_0_0.getHeadroom());
     assertEquals(expectedHeadroomWithReqInY, app_0_1.getHeadroom());
     assertEquals(expectedHeadroomWithReqInY, app_1_0.getHeadroom());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb723bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java
index 0a864fd..732b5d1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java
@@ -184,7 +184,7 @@ public class TestCapacitySchedulerNodeLabelUpdate {
       String userName, String partition, int memory) {
     CapacityScheduler scheduler = (CapacityScheduler) rm.getResourceScheduler();
     LeafQueue queue = (LeafQueue) scheduler.getQueue(queueName);
-    LeafQueue.User user = queue.getUser(userName);
+    UsersManager.User user = queue.getUser(userName);
     Assert.assertEquals(memory,
         user.getResourceUsage().getUsed(partition).getMemorySize());
   }
@@ -241,7 +241,7 @@ public class TestCapacitySchedulerNodeLabelUpdate {
     LeafQueue queue =
         (LeafQueue) ((CapacityScheduler) rm.getResourceScheduler())
             .getQueue("a");
-    ArrayList<UserInfo> users = queue.getUsers();
+    ArrayList<UserInfo> users = queue.getUsersManager().getUsersInfo();
     for (UserInfo userInfo : users) {
       if (userInfo.getUsername().equals("user")) {
         ResourceInfo resourcesUsed = userInfo.getResourcesUsed();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb723bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index bd038e8..ec1b84d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractUsersManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
@@ -77,10 +78,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueStateManager
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
-
-
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UsersManager.User;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue.User;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
@@ -522,19 +521,22 @@ public class TestLeafQueue {
     // Users
     final String user_0 = "user_0";
 
+    // Active Users Manager
+    AbstractUsersManager activeUserManager = a.getAbstractUsersManager();
+
     // Submit applications
     final ApplicationAttemptId appAttemptId_0 = 
         TestUtils.getMockApplicationAttemptId(0, 0); 
     FiCaSchedulerApp app_0 = 
         new FiCaSchedulerApp(appAttemptId_0, user_0, a, 
-            mock(ActiveUsersManager.class), spyRMContext);
+            activeUserManager, spyRMContext);
     a.submitApplicationAttempt(app_0, user_0);
 
     final ApplicationAttemptId appAttemptId_1 = 
         TestUtils.getMockApplicationAttemptId(1, 0); 
     FiCaSchedulerApp app_1 = 
         new FiCaSchedulerApp(appAttemptId_1, user_0, a, 
-            mock(ActiveUsersManager.class), spyRMContext);
+            activeUserManager, spyRMContext);
     a.submitApplicationAttempt(app_1, user_0);  // same user
 
     
@@ -683,7 +685,7 @@ public class TestLeafQueue {
         TestUtils.getMockApplicationAttemptId(0, 0);
     FiCaSchedulerApp app0 =
         new FiCaSchedulerApp(appAttemptId0, user0, b,
-            b.getActiveUsersManager(), spyRMContext);
+            b.getAbstractUsersManager(), spyRMContext);
     b.submitApplicationAttempt(app0, user0);
 
     // Setup some nodes
@@ -747,14 +749,14 @@ public class TestLeafQueue {
         TestUtils.getMockApplicationAttemptId(0, 0);
     FiCaSchedulerApp app0 =
         new FiCaSchedulerApp(appAttemptId0, user0, b,
-            b.getActiveUsersManager(), spyRMContext);
+            b.getAbstractUsersManager(), spyRMContext);
     b.submitApplicationAttempt(app0, user0);
 
     final ApplicationAttemptId appAttemptId2 =
         TestUtils.getMockApplicationAttemptId(2, 0);
     FiCaSchedulerApp app2 =
         new FiCaSchedulerApp(appAttemptId2, user1, b,
-            b.getActiveUsersManager(), spyRMContext);
+            b.getAbstractUsersManager(), spyRMContext);
     b.submitApplicationAttempt(app2, user1);
 
     // Setup some nodes
@@ -775,6 +777,7 @@ public class TestLeafQueue {
     Resource clusterResource =
         Resources.createResource(numNodes * (8 * GB), numNodes * 100);
     when(csContext.getNumClusterNodes()).thenReturn(numNodes);
+    when(csContext.getClusterResource()).thenReturn(clusterResource);
 
     // Setup resource-requests so that one application is memory dominant
     // and other application is vcores dominant
@@ -798,7 +801,7 @@ public class TestLeafQueue {
     User queueUser1 = b.getUser(user1);
 
     assertEquals("There should 2 active users!", 2, b
-        .getActiveUsersManager().getNumActiveUsers());
+        .getAbstractUsersManager().getNumActiveUsers());
     // Fill both Nodes as far as we can
     CSAssignment assign;
     do {
@@ -833,7 +836,7 @@ public class TestLeafQueue {
             / (numNodes * 100.0f)
             + queueUser1.getUsed().getMemorySize()
             / (numNodes * 8.0f * GB);
-    assertEquals(expectedRatio, b.getUsageRatio(""), 0.001);
+    assertEquals(expectedRatio, b.getUsersManager().getUsageRatio(""), 0.001);
     // Add another node and make sure consumedRatio is adjusted
     // accordingly.
     numNodes = 3;
@@ -847,7 +850,7 @@ public class TestLeafQueue {
             / (numNodes * 100.0f)
             + queueUser1.getUsed().getMemorySize()
             / (numNodes * 8.0f * GB);
-    assertEquals(expectedRatio, b.getUsageRatio(""), 0.001);
+    assertEquals(expectedRatio, b.getUsersManager().getUsageRatio(""), 0.001);
   }
 
   @Test
@@ -857,6 +860,9 @@ public class TestLeafQueue {
     //unset maxCapacity
     a.setMaxCapacity(1.0f);
 
+    when(csContext.getClusterResource())
+        .thenReturn(Resources.createResource(16 * GB, 32));
+
     // Users
     final String user_0 = "user_0";
     final String user_1 = "user_1";
@@ -866,14 +872,14 @@ public class TestLeafQueue {
         TestUtils.getMockApplicationAttemptId(0, 0); 
     FiCaSchedulerApp app_0 = 
         new FiCaSchedulerApp(appAttemptId_0, user_0, a, 
-            a.getActiveUsersManager(), spyRMContext);
+            a.getAbstractUsersManager(), spyRMContext);
     a.submitApplicationAttempt(app_0, user_0);
 
     final ApplicationAttemptId appAttemptId_1 = 
         TestUtils.getMockApplicationAttemptId(1, 0); 
     FiCaSchedulerApp app_1 = 
         new FiCaSchedulerApp(appAttemptId_1, user_1, a, 
-            a.getActiveUsersManager(), spyRMContext);
+            a.getAbstractUsersManager(), spyRMContext);
     a.submitApplicationAttempt(app_1, user_1); // different user
 
     // Setup some nodes
@@ -912,7 +918,7 @@ public class TestLeafQueue {
     a.setUserLimitFactor(2);
     
     // There're two active users
-    assertEquals(2, a.getActiveUsersManager().getNumActiveUsers());
+    assertEquals(2, a.getAbstractUsersManager().getNumActiveUsers());
 
     // 1 container to user_0
     applyCSAssignment(clusterResource,
@@ -947,7 +953,7 @@ public class TestLeafQueue {
 
     // app_0 doesn't have outstanding resources, there's only one active user.
     assertEquals("There should only be 1 active user!", 
-        1, a.getActiveUsersManager().getNumActiveUsers());
+        1, a.getAbstractUsersManager().getNumActiveUsers());
 
   }
 
@@ -998,7 +1004,7 @@ public class TestLeafQueue {
               TestUtils.getMockApplicationAttemptId(0, 0);
     FiCaSchedulerApp app_0 =
         new FiCaSchedulerApp(appAttemptId_0, user_0, qb,
-            qb.getActiveUsersManager(), spyRMContext);
+            qb.getAbstractUsersManager(), spyRMContext);
     Map<ApplicationAttemptId, FiCaSchedulerApp> apps = new HashMap<>();
     apps.put(app_0.getApplicationAttemptId(), app_0);
     qb.submitApplicationAttempt(app_0, user_0);
@@ -1009,7 +1015,7 @@ public class TestLeafQueue {
             u0Priority, recordFactory)));
 
     assertEquals("There should only be 1 active user!",
-        1, qb.getActiveUsersManager().getNumActiveUsers());
+        1, qb.getAbstractUsersManager().getNumActiveUsers());
     //get headroom
     applyCSAssignment(clusterResource,
         qb.assignContainers(clusterResource, node_0,
@@ -1026,7 +1032,7 @@ public class TestLeafQueue {
         TestUtils.getMockApplicationAttemptId(2, 0);
     FiCaSchedulerApp app_2 =
         new FiCaSchedulerApp(appAttemptId_2, user_1, qb,
-            qb.getActiveUsersManager(), spyRMContext);
+            qb.getAbstractUsersManager(), spyRMContext);
     apps.put(app_2.getApplicationAttemptId(), app_2);
     Priority u1Priority = TestUtils.createMockPriority(2);
     SchedulerRequestKey u1SchedKey = toSchedulerKey(u1Priority);
@@ -1068,13 +1074,13 @@ public class TestLeafQueue {
         TestUtils.getMockApplicationAttemptId(1, 0);
     FiCaSchedulerApp app_1 =
         new FiCaSchedulerApp(appAttemptId_1, user_0, qb,
-            qb.getActiveUsersManager(), spyRMContext);
+            qb.getAbstractUsersManager(), spyRMContext);
     apps.put(app_1.getApplicationAttemptId(), app_1);
     final ApplicationAttemptId appAttemptId_3 =
         TestUtils.getMockApplicationAttemptId(3, 0);
     FiCaSchedulerApp app_3 =
         new FiCaSchedulerApp(appAttemptId_3, user_1, qb,
-            qb.getActiveUsersManager(), spyRMContext);
+            qb.getAbstractUsersManager(), spyRMContext);
     apps.put(app_3.getApplicationAttemptId(), app_3);
     app_1.updateResourceRequests(Collections.singletonList(
         TestUtils.createResourceRequest(ResourceRequest.ANY, 2*GB, 1, true,
@@ -1103,7 +1109,7 @@ public class TestLeafQueue {
               TestUtils.getMockApplicationAttemptId(4, 0);
     FiCaSchedulerApp app_4 =
               new FiCaSchedulerApp(appAttemptId_4, user_0, qb,
-                      qb.getActiveUsersManager(), spyRMContext);
+                      qb.getAbstractUsersManager(), spyRMContext);
     apps.put(app_4.getApplicationAttemptId(), app_4);
     qb.submitApplicationAttempt(app_4, user_0);
     app_4.updateResourceRequests(Collections.singletonList(
@@ -1126,9 +1132,9 @@ public class TestLeafQueue {
     //testcase3 still active - 2+2+6=10
     assertEquals(10*GB, qb.getUsedResources().getMemorySize());
     //app4 is user 0
-    //maxqueue 16G, userlimit 13G, used 8G, headroom 5G
+    //maxqueue 16G, userlimit 7G, used 8G, headroom 5G
     //(8G used is 6G from this test case - app4, 2 from last test case, app_1)
-    assertEquals(5*GB, app_4.getHeadroom().getMemorySize());
+    assertEquals(0*GB, app_4.getHeadroom().getMemorySize());
   }
 
   @Test
@@ -1147,21 +1153,21 @@ public class TestLeafQueue {
         TestUtils.getMockApplicationAttemptId(0, 0);
     FiCaSchedulerApp app_0 =
         new FiCaSchedulerApp(appAttemptId_0, user_0, a,
-            a.getActiveUsersManager(), spyRMContext);
+            a.getAbstractUsersManager(), spyRMContext);
     a.submitApplicationAttempt(app_0, user_0);
 
     final ApplicationAttemptId appAttemptId_1 =
         TestUtils.getMockApplicationAttemptId(1, 0);
     FiCaSchedulerApp app_1 =
         new FiCaSchedulerApp(appAttemptId_1, user_0, a,
-            a.getActiveUsersManager(), spyRMContext);
+            a.getAbstractUsersManager(), spyRMContext);
     a.submitApplicationAttempt(app_1, user_0);  // same user
 
     final ApplicationAttemptId appAttemptId_2 =
         TestUtils.getMockApplicationAttemptId(2, 0);
     FiCaSchedulerApp app_2 =
         new FiCaSchedulerApp(appAttemptId_2, user_1, a,
-            a.getActiveUsersManager(), spyRMContext);
+            a.getAbstractUsersManager(), spyRMContext);
     a.submitApplicationAttempt(app_2, user_1);
 
     // Setup some nodes
@@ -1247,21 +1253,21 @@ public class TestLeafQueue {
         TestUtils.getMockApplicationAttemptId(0, 0); 
     FiCaSchedulerApp app_0 = 
         new FiCaSchedulerApp(appAttemptId_0, user_0, a, 
-            a.getActiveUsersManager(), spyRMContext);
+            a.getAbstractUsersManager(), spyRMContext);
     a.submitApplicationAttempt(app_0, user_0);
 
     final ApplicationAttemptId appAttemptId_1 =
         TestUtils.getMockApplicationAttemptId(1, 0); 
     FiCaSchedulerApp app_1 = 
         new FiCaSchedulerApp(appAttemptId_1, user_0, a, 
-            a.getActiveUsersManager(), spyRMContext);
+            a.getAbstractUsersManager(), spyRMContext);
     a.submitApplicationAttempt(app_1, user_0);  // same user
 
     final ApplicationAttemptId appAttemptId_2 = 
         TestUtils.getMockApplicationAttemptId(2, 0); 
     FiCaSchedulerApp app_2 = 
         new FiCaSchedulerApp(appAttemptId_2, user_1, a, 
-            a.getActiveUsersManager(), spyRMContext);
+            a.getAbstractUsersManager(), spyRMContext);
     a.submitApplicationAttempt(app_2, user_1);
 
     // Setup some nodes
@@ -1301,7 +1307,7 @@ public class TestLeafQueue {
     // Now, only user_0 should be active since he is the only one with
     // outstanding requests
     assertEquals("There should only be 1 active user!", 
-        1, a.getActiveUsersManager().getNumActiveUsers());
+        1, a.getAbstractUsersManager().getNumActiveUsers());
 
     // 1 container to user_0
     applyCSAssignment(clusterResource,
@@ -1312,8 +1318,8 @@ public class TestLeafQueue {
     assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize());
     assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize());
     // TODO, fix headroom in the future patch
-    assertEquals(1*GB, app_0.getHeadroom().getMemorySize());
-      // User limit = 4G, 2 in use
+    assertEquals(0*GB, app_0.getHeadroom().getMemorySize());
+      // User limit = 2G, 2 in use
     assertEquals(0*GB, app_1.getHeadroom().getMemorySize());
       // the application is not yet active
 
@@ -1325,15 +1331,15 @@ public class TestLeafQueue {
     assertEquals(3*GB, a.getUsedResources().getMemorySize());
     assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize());
     assertEquals(1*GB, app_1.getCurrentConsumption().getMemorySize());
-    assertEquals(1*GB, app_0.getHeadroom().getMemorySize()); // 4G - 3G
-    assertEquals(1*GB, app_1.getHeadroom().getMemorySize()); // 4G - 3G
+    assertEquals(0*GB, app_0.getHeadroom().getMemorySize()); // 4G - 3G
+    assertEquals(0*GB, app_1.getHeadroom().getMemorySize()); // 4G - 3G
     
     // Submit requests for app_1 and set max-cap
     a.setMaxCapacity(.1f);
     app_2.updateResourceRequests(Collections.singletonList(
         TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 1, true,
             priority, recordFactory)));
-    assertEquals(2, a.getActiveUsersManager().getNumActiveUsers());
+    assertEquals(2, a.getAbstractUsersManager().getNumActiveUsers());
 
     // No more to user_0 since he is already over user-limit
     // and no more containers to queue since it's already at max-cap
@@ -1352,7 +1358,7 @@ public class TestLeafQueue {
     app_1.updateResourceRequests(Collections.singletonList(     // unset
         TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 0, true,
             priority, recordFactory)));
-    assertEquals(1, a.getActiveUsersManager().getNumActiveUsers());
+    assertEquals(1, a.getAbstractUsersManager().getNumActiveUsers());
     applyCSAssignment(clusterResource,
         a.assignContainers(clusterResource, node_1,
         new ResourceLimits(clusterResource),
@@ -1378,28 +1384,28 @@ public class TestLeafQueue {
         TestUtils.getMockApplicationAttemptId(0, 0); 
     FiCaSchedulerApp app_0 = 
         new FiCaSchedulerApp(appAttemptId_0, user_0, a, 
-            a.getActiveUsersManager(), spyRMContext);
+            a.getAbstractUsersManager(), spyRMContext);
     a.submitApplicationAttempt(app_0, user_0);
 
     final ApplicationAttemptId appAttemptId_1 = 
         TestUtils.getMockApplicationAttemptId(1, 0); 
     FiCaSchedulerApp app_1 = 
         new FiCaSchedulerApp(appAttemptId_1, user_0, a, 
-            a.getActiveUsersManager(), spyRMContext);
+            a.getAbstractUsersManager(), spyRMContext);
     a.submitApplicationAttempt(app_1, user_0);  // same user
 
     final ApplicationAttemptId appAttemptId_2 = 
         TestUtils.getMockApplicationAttemptId(2, 0); 
     FiCaSchedulerApp app_2 = 
         new FiCaSchedulerApp(appAttemptId_2, user_1, a, 
-            a.getActiveUsersManager(), spyRMContext);
+            a.getAbstractUsersManager(), spyRMContext);
     a.submitApplicationAttempt(app_2, user_1);
 
     final ApplicationAttemptId appAttemptId_3 = 
         TestUtils.getMockApplicationAttemptId(3, 0); 
     FiCaSchedulerApp app_3 = 
         new FiCaSchedulerApp(appAttemptId_3, user_2, a, 
-            a.getActiveUsersManager(), spyRMContext);
+            a.getAbstractUsersManager(), spyRMContext);
     a.submitApplicationAttempt(app_3, user_2);
 
     Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(
@@ -1417,7 +1423,8 @@ public class TestLeafQueue {
     Resource clusterResource = 
         Resources.createResource(numNodes * (8*GB), numNodes * 16);
     when(csContext.getNumClusterNodes()).thenReturn(numNodes);
-    
+    when(csContext.getClusterResource()).thenReturn(clusterResource);
+
     // Setup resource-requests
     Priority priority = TestUtils.createMockPriority(1);
     app_0.updateResourceRequests(Collections.singletonList(
@@ -1744,6 +1751,7 @@ public class TestLeafQueue {
     
     when(csContext.getNode(node_0.getNodeID())).thenReturn(node_0);
     when(csContext.getNode(node_1.getNodeID())).thenReturn(node_1);
+    when(csContext.getClusterResource()).thenReturn(Resource.newInstance(8, 1));
 
     Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(
         app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(),
@@ -3648,7 +3656,7 @@ public class TestLeafQueue {
     final String user = "user1";
     FiCaSchedulerApp app =
         new FiCaSchedulerApp(appAttId, user, queue,
-            queue.getActiveUsersManager(), rmContext);
+            queue.getAbstractUsersManager(), rmContext);
 
     // Resource request
     Resource requestedResource = Resource.newInstance(1536, 2);
@@ -3663,7 +3671,7 @@ public class TestLeafQueue {
     // child of root, its absolute capaicty is also 50%.
     queue = createQueue("test2", null, 0.5f, 0.5f);
     app = new FiCaSchedulerApp(appAttId, user, queue,
-        queue.getActiveUsersManager(), rmContext);
+        queue.getAbstractUsersManager(), rmContext);
     app.getAppAttemptResourceUsage().incUsed(requestedResource);
     // In "test2" queue, 1536 used is 30% of "test2" and 15% of the cluster.
     assertEquals(30.0f, app.getResourceUsageReport().getQueueUsagePercentage(),
@@ -3675,7 +3683,7 @@ public class TestLeafQueue {
     // Therefore, "test2.1" capacity is 50% and absolute capacity is 25%.
     AbstractCSQueue qChild = createQueue("test2.1", queue, 0.5f, 0.25f);
     app = new FiCaSchedulerApp(appAttId, user, qChild,
-        qChild.getActiveUsersManager(), rmContext);
+        qChild.getAbstractUsersManager(), rmContext);
     app.getAppAttemptResourceUsage().incUsed(requestedResource);
     // In "test2.1" queue, 1536 used is 60% of "test2.1" and 15% of the cluster.
     assertEquals(60.0f, app.getResourceUsageReport().getQueueUsagePercentage(),
@@ -3699,7 +3707,7 @@ public class TestLeafQueue {
     ActiveUsersManager activeUsersManager = new ActiveUsersManager(metrics);
     AbstractCSQueue queue = mock(AbstractCSQueue.class);
     when(queue.getMetrics()).thenReturn(metrics);
-    when(queue.getActiveUsersManager()).thenReturn(activeUsersManager);
+    when(queue.getAbstractUsersManager()).thenReturn(activeUsersManager);
     when(queue.getQueueInfo(false, false)).thenReturn(queueInfo);
     QueueCapacities qCaps = mock(QueueCapacities.class);
     when(qCaps.getAbsoluteCapacity(any())).thenReturn(absCap);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb723bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
index c5b3f00..fc1d284 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
@@ -1049,7 +1049,7 @@ public class TestNodeLabelContainerAllocation {
     RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "b");
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2);
     
-    // Each application request 5 * 1GB container
+    // Each application request 50 * 1GB container
     am1.allocate("*", 1 * GB, 50, new ArrayList<ContainerId>());
     
     // NM1 do 50 heartbeats
@@ -1169,12 +1169,14 @@ public class TestNodeLabelContainerAllocation {
     csConf.setAccessibleNodeLabels(A, toSet("x"));
     csConf.setCapacityByLabel(A, "x", 50);
     csConf.setMaximumCapacityByLabel(A, "x", 50);
+    csConf.setUserLimit(A, 200);
 
     final String B = CapacitySchedulerConfiguration.ROOT + ".b";
     csConf.setCapacity(B, 50);
     csConf.setAccessibleNodeLabels(B, toSet("x"));
     csConf.setCapacityByLabel(B, "x", 50);
     csConf.setMaximumCapacityByLabel(B, "x", 50);
+    csConf.setUserLimit(B, 200);
 
     // set node -> label
     mgr.addToCluserNodeLabels(ImmutableSet.of(
@@ -1207,6 +1209,7 @@ public class TestNodeLabelContainerAllocation {
 
     SchedulerNode schedulerNode1 = cs.getSchedulerNode(nm1.getNodeId());
 
+    cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
     for (int i = 0; i < 50; i++) {
       cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
     }
@@ -1250,7 +1253,7 @@ public class TestNodeLabelContainerAllocation {
       cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
     }
   }
-  
+
   private void waitSchedulerNodeJoined(MockRM rm, int expectedNodeNum)
       throws InterruptedException {
     int totalWaitTick = 100; // wait 10 sec at most.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message