hadoop-yarn-issues mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From "ASF GitHub Bot (JIRA)" <j...@apache.org>
Subject [jira] [Commented] (YARN-4597) Add SCHEDULE to NM container lifecycle
Date Fri, 11 Nov 2016 19:40:09 GMT

    [ https://issues.apache.org/jira/browse/YARN-4597?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15657950#comment-15657950
] 

ASF GitHub Bot commented on YARN-4597:
--------------------------------------

Github user kambatla commented on a diff in the pull request:

    https://github.com/apache/hadoop/pull/143#discussion_r87645581
  
    --- Diff: hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ResourceUtilizationManager.java
---
    @@ -0,0 +1,163 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler;
    +
    +import org.apache.hadoop.yarn.api.records.ExecutionType;
    +import org.apache.hadoop.yarn.api.records.Resource;
    +import org.apache.hadoop.yarn.api.records.ResourceUtilization;
    +import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;
    +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
    +import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor;
    +
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;
    +
    +/**
    + * This class abstracts out how a container contributes to Resource Utilization.
    + * It is used by the {@link ContainerScheduler} to determine which
    + * OPPORTUNISTIC containers to be killed to make room for a GUARANTEED
    + * container.
    + * It currently equates resource utilization with the total resource allocated
    + * to the container. Another implementation might choose to use the actual
    + * resource utilization.
    + */
    +
    +public class ResourceUtilizationManager {
    +
    +  private static final Logger LOG =
    +      LoggerFactory.getLogger(ResourceUtilizationManager.class);
    +
    +  private ResourceUtilization containersAllocation;
    +  private ContainerScheduler scheduler;
    +
    +  ResourceUtilizationManager(ContainerScheduler scheduler) {
    +    this.containersAllocation = ResourceUtilization.newInstance(0, 0, 0.0f);
    +    this.scheduler = scheduler;
    +  }
    +
    +  /**
    +   * Get the current accumulated utilization. Currently it is the accumulation
    +   * of totally allocated resources to a container.
    +   * @return ResourceUtilization Resource Utilization.
    +   */
    +  public ResourceUtilization getCurrentUtilization() {
    +    return this.containersAllocation;
    +  }
    +
    +  /**
    +   * Add Container's resources to the accumulated Utilization.
    +   * @param container Container.
    +   */
    +  public void addContainerResources(Container container) {
    +    increaseResourceUtilization(
    +        getContainersMonitor(), this.containersAllocation,
    +        container.getResource());
    +  }
    +
    +  /**
    +   * Subtract Container's resources to the accumulated Utilization.
    +   * @param container Container.
    +   */
    +  public void subtractContainerResource(Container container) {
    +    decreaseResourceUtilization(
    +        getContainersMonitor(), this.containersAllocation,
    +        container.getResource());
    +  }
    +
    +  /**
    +   * Check if NM has resources available currently to run the container.
    +   * @param container Container.
    +   * @return True, if NM has resources available currently to run the container.
    +   */
    +  public boolean hasResourcesAvailable(Container container) {
    +    long pMemBytes = container.getResource().getMemorySize() * 1024 * 1024L;
    +    return hasResourcesAvailable(pMemBytes,
    +        (long) (getContainersMonitor().getVmemRatio()* pMemBytes),
    +        container.getResource().getVirtualCores());
    +  }
    +
    +  private boolean hasResourcesAvailable(long pMemBytes, long vMemBytes,
    +      int cpuVcores) {
    +    // Check physical memory.
    +    if (LOG.isDebugEnabled()) {
    +      LOG.debug("pMemCheck [current={} + asked={} > allowed={}]",
    +          this.containersAllocation.getPhysicalMemory(),
    +          (pMemBytes >> 20),
    +          (getContainersMonitor().getPmemAllocatedForContainers() >> 20));
    +    }
    +    if (this.containersAllocation.getPhysicalMemory() +
    +        (int) (pMemBytes >> 20) >
    +        (int) (getContainersMonitor()
    +            .getPmemAllocatedForContainers() >> 20)) {
    +      return false;
    +    }
    +
    +    if (LOG.isDebugEnabled()) {
    +      LOG.debug("before vMemCheck" +
    +              "[isEnabled={}, current={} + asked={} > allowed={}]",
    +          getContainersMonitor().isVmemCheckEnabled(),
    +          this.containersAllocation.getVirtualMemory(), (vMemBytes >> 20),
    +          (getContainersMonitor().getVmemAllocatedForContainers() >> 20));
    +    }
    +    // Check virtual memory.
    +    if (getContainersMonitor().isVmemCheckEnabled() &&
    +        this.containersAllocation.getVirtualMemory() +
    +            (int) (vMemBytes >> 20) >
    +            (int) (getContainersMonitor()
    +                .getVmemAllocatedForContainers() >> 20)) {
    +      return false;
    +    }
    +
    +    float vCores = (float) cpuVcores /
    +        getContainersMonitor().getVCoresAllocatedForContainers();
    +    if (LOG.isDebugEnabled()) {
    +      LOG.debug("before cpuCheck [asked={} > allowed={}]",
    +          this.containersAllocation.getCPU(), vCores);
    +    }
    +    // Check CPU.
    +    if (this.containersAllocation.getCPU() + vCores > 1.0f) {
    +      return false;
    +    }
    +    return true;
    +  }
    +
    +  public ContainersMonitor getContainersMonitor() {
    +    return this.scheduler.getContainersMonitor();
    +  }
    +
    +  public static void increaseResourceUtilization(
    +      ContainersMonitor containersMonitor, ResourceUtilization resourceAlloc,
    +      Resource resource) {
    +    float vCores = (float) resource.getVirtualCores() /
    +        containersMonitor.getVCoresAllocatedForContainers();
    +    int vmem = (int) (resource.getMemorySize()
    +        * containersMonitor.getVmemRatio());
    +    resourceAlloc.addTo((int)resource.getMemorySize(), vmem, vCores);
    +  }
    +
    +  public static void decreaseResourceUtilization(
    --- End diff --
    
    Shouldn't this be a non-static method on ResourceUtilization instead? 


> Add SCHEDULE to NM container lifecycle
> --------------------------------------
>
>                 Key: YARN-4597
>                 URL: https://issues.apache.org/jira/browse/YARN-4597
>             Project: Hadoop YARN
>          Issue Type: New Feature
>          Components: nodemanager
>            Reporter: Chris Douglas
>            Assignee: Arun Suresh
>              Labels: oct16-hard
>         Attachments: YARN-4597.001.patch, YARN-4597.002.patch, YARN-4597.003.patch, YARN-4597.004.patch,
YARN-4597.005.patch, YARN-4597.006.patch, YARN-4597.007.patch, YARN-4597.008.patch, YARN-4597.009.patch
>
>
> Currently, the NM immediately launches containers after resource localization. Several
features could be more cleanly implemented if the NM included a separate stage for reserving
resources.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: yarn-issues-unsubscribe@hadoop.apache.org
For additional commands, e-mail: yarn-issues-help@hadoop.apache.org


Mime
View raw message