hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From asur...@apache.org
Subject [44/50] [abbrv] hadoop git commit: YARN-7780. Documentation for Placement Constraints. (Konstantinos Karanasos via asuresh)
Date Tue, 30 Jan 2018 18:10:26 GMT
YARN-7780. Documentation for Placement Constraints. (Konstantinos Karanasos via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ae4cc99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ae4cc99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ae4cc99

Branch: refs/heads/YARN-6592
Commit: 6ae4cc995afb6e27922f4515b68033efa544afa7
Parents: 06d22eb
Author: Arun Suresh <asuresh@apache.org>
Authored: Tue Jan 30 07:38:27 2018 -0800
Committer: Arun Suresh <asuresh@apache.org>
Committed: Tue Jan 30 07:54:37 2018 -0800

----------------------------------------------------------------------
 .../yarn/api/resource/PlacementConstraints.java |  17 ++-
 .../hadoop/yarn/conf/YarnConfiguration.java     |  11 +-
 .../site/markdown/PlacementConstraints.md.vm    | 149 +++++++++++++++++++
 3 files changed, 164 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae4cc99/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
index 70a8080..c1549c5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
@@ -96,8 +96,9 @@ public final class PlacementConstraints {
    * Creates a constraint that restricts the number of allocations within a
    * given scope (e.g., node or rack).
    *
-   * For example, {@code cardinality(NODE, 3, 10)}, restricts the number of
-   * allocations per node to be no less than 3 and no more than 10.
+   * For example, {@code cardinality(NODE, 3, 10, "zk")} is satisfied on nodes
+   * where there are no less than 3 allocations with tag "zk" and no more than
+   * 10.
    *
    * @param scope the scope of the constraint
    * @param minCardinality determines the minimum number of allocations within
@@ -132,7 +133,7 @@ public final class PlacementConstraints {
 
   /**
    * Similar to {@link #cardinality(String, int, int, String...)}, but
-   * determines only the maximum cardinality (the minimum can be as low as 0).
+   * determines only the maximum cardinality (the minimum cardinality is 0).
    *
    * @param scope the scope of the constraint
    * @param maxCardinality determines the maximum number of allocations within
@@ -150,7 +151,7 @@ public final class PlacementConstraints {
    *
    * Consider a set of nodes N that belongs to the scope specified in the
    * constraint. If the target expressions are satisfied at least minCardinality
-   * times and at most max-cardinality times in the node set N, then the
+   * times and at most maxCardinality times in the node set N, then the
    * constraint is satisfied.
    *
    * For example, {@code targetCardinality(RACK, 2, 10, allocationTag("zk"))},
@@ -197,7 +198,7 @@ public final class PlacementConstraints {
 
     /**
      * Constructs a target expression on a node partition. It is satisfied if
-     * the specified node partition has one of the specified nodePartitions
+     * the specified node partition has one of the specified nodePartitions.
      *
      * @param nodePartitions the set of values that the attribute should take
      *          values from
@@ -211,7 +212,7 @@ public final class PlacementConstraints {
 
     /**
      * Constructs a target expression on an allocation tag. It is satisfied if
-     * the there are allocations with one of the given tags.
+     * there are allocations with one of the given tags.
      *
      * @param allocationTags the set of tags that the attribute should take
      *          values from
@@ -224,8 +225,8 @@ public final class PlacementConstraints {
 
     /**
      * Constructs a target expression on an allocation tag. It is satisfied if
-     * the there are allocations with one of the given tags. Comparing to
-     * {@link PlacementTargets#allocationTag(String...)}, this only check tags
+     * there are allocations with one of the given tags. Comparing to
+     * {@link PlacementTargets#allocationTag(String...)}, this only checks tags
      * within the application.
      *
      * @param allocationTags the set of tags that the attribute should take

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae4cc99/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index f5bb2c7..118f9fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -532,6 +532,12 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_SCHEDULER = 
     RM_PREFIX + "scheduler.class";
 
+  /** Enable rich placement constraints. */
+  public static final String RM_PLACEMENT_CONSTRAINTS_ENABLED =
+      RM_PREFIX + "placement-constraints.enabled";
+
+  public static final boolean DEFAULT_RM_PLACEMENT_CONSTRAINTS_ENABLED = false;
+
   /** Placement Algorithm. */
   public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_CLASS =
       RM_PREFIX + "placement-constraints.algorithm.class";
@@ -540,11 +546,6 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_ITERATOR =
       RM_PREFIX + "placement-constraints.algorithm.iterator";
 
-  public static final String RM_PLACEMENT_CONSTRAINTS_ENABLED =
-      RM_PREFIX + "placement-constraints.enabled";
-
-  public static final boolean DEFAULT_RM_PLACEMENT_CONSTRAINTS_ENABLED = false;
-
   public static final String RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS =
       RM_PREFIX + "placement-constraints.retry-attempts";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae4cc99/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
new file mode 100644
index 0000000..7926eab
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
@@ -0,0 +1,149 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+#set ( $H3 = '###' )
+#set ( $H4 = '####' )
+#set ( $H5 = '#####' )
+
+Placement Constraints
+=====================
+
+<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
+
+
+Overview
+--------
+
+YARN allows applications to specify placement constraints in the form of data locality (preference
to specific nodes or racks) or (non-overlapping) node labels. This document focuses on more
expressive placement constraints in YARN. Such constraints can be crucial for the performance
and resilience of applications, especially those that include long-running containers, such
as services, machine-learning and streaming workloads.
+
+For example, it may be beneficial to co-locate the allocations of a job on the same rack
(*affinity* constraints) to reduce network costs, spread allocations across machines (*anti-affinity*
constraints) to minimize resource interference, or allow up to a specific number of allocations
in a node group (*cardinality* constraints) to strike a balance between the two. Placement
decisions also affect resilience. For example, allocations placed within the same cluster
upgrade domain would go offline simultaneously.
+
+The applications can specify constraints without requiring knowledge of the underlying topology
of the cluster (e.g., one does not need to specify the specific node or rack where their containers
should be placed with constraints) or the other applications deployed. Currently **intra-application**
constraints are supported, but the design that is followed is generic and support for constraints
across applications will soon be added. Moreover, all constraints at the moment are **hard**,
that is, if the constraints for a container cannot be satisfied due to the current cluster
condition or conflicting constraints, the container request gets rejected.
+
+Note that in this document we use the notion of “allocation” to refer to a unit of resources
(e.g., CPU and memory) that gets allocated in a node. In the current implementation of YARN,
an allocation corresponds to a single container. However, in case an application uses an allocation
to spawn more than one containers, an allocation could correspond to multiple containers.
+
+
+Quick Guide
+-----------
+
+We first describe how to enable scheduling with placement constraints and then provide examples
of how to experiment with this feature using the distributed shell, an application that allows
to run a given shell command on a set of containers.
+
+$H3 Enabling placement constraints
+
+To enable placement constraints, the following property has to be set to **true** in **conf/yarn-site.xml**:
+
+| Property | Description | Default value |
+|:-------- |:----------- |:------------- |
+| `yarn.resourcemanager.placement-constraints.enabled` | Enables rich placement constraints.
| `false` |
+
+
+Further, the user can choose between the following two alternatives for placing containers
with constraints:
+
+* **Placement processor:** Following this approach, the placement of containers with constraints
is determined as a pre-processing step before the capacity or the fair scheduler is called.
Once the placement is decided, the capacity/fair scheduler is invoked to perform the actual
allocation. The advantage of this approach is that it supports all constraint types (affinity,
anti-affinity, cardinality). Moreover, it considers multiple containers at a time, which allows
to satisfy more constraints than a container-at-a-time approach can achieve. As it sits outside
the main scheduler, it can be used by both the capacity and fair schedulers. Note that at
the moment it does not account for task priorities within an application, given that such
priorities might be conflicting with the placement constraints.
+* **Placement allocator in capacity scheduler:** This approach places containers with constraints
within the capacity scheduler. It currently supports anti-affinity constraints (no affinity
or cardinality) and places one container at a time. However, it supports traditional task
priorities within an application.
+
+The placement processor approach supports a wider range of constraints and can allow more
containers to be placed especially when applications have demanding constraints or the cluster
is highly-utilized (due to considering multiple containers at a time). However, if respecting
task priority within an application is important for the user and the capacity scheduler is
used, then the placement allocator in the capacity scheduler should be used instead.
+
+By default, the placement processor approach is enabled. To use the placement allocator in
the capacity scheduler instead, the following parameter has to be set to **true** in the **conf/capacity-scheduler.xml**:
+
+| Property | Description | Default value |
+|:-------- |:----------- |:------------- |
+| `yarn.scheduler.capacity.scheduling-request.allowed` | When set to false, the placement
processor is used; when set to true, the allocator inside the capacity scheduler is used.
| `false` |
+
+
+
+$H3 Experimenting with placement constraints using distributed shell
+
+Users can experiment with placement constraints by using the distributed shell application
through the following command:
+
+```
+$ yarn org.apache.hadoop.yarn.applications.distributedshell.Client -jar share/hadoop/yarn/hadoop-yarn-applications-distributedshell-${project.version}.jar
-shell_command sleep -shell_args 10 -placement_spec PlacementSpec
+```
+
+where **PlacementSpec** is of the form:
+
+```
+PlacementSpec => "" | KeyVal;PlacementSpec
+KeyVal        => SourceTag=Constraint
+SourceTag     => String
+Constraint    => NumContainers | NumContainers,"IN",Scope,TargetTag | NumContainers,"NOTIN",Scope,TargetTag
| NumContainers,"CARDINALITY",Scope,TargetTag,MinCard,MaxCard
+NumContainers => int
+Scope         => "NODE" | "RACK"
+TargetTag     => String
+MinCard       => int
+MaxCard       => int
+```
+
+Note that when the `-placement_spec` argument is specified in the distributed shell command,
the `-num-containers` argument should not be used. In case `-num-containers` argument is used
in conjunction with `-placement-spec`, the former is ignored. This is because in PlacementSpec,
we determine the number of containers per tag, making the `-num-containers` redundant and
possibly conflicting. Moreover, if `-placement_spec` is used, all containers will be requested
with GUARANTEED execution type.
+
+An example of PlacementSpec is the following:
+```
+zk=3,NOTIN,NODE,zk:hbase=5,IN,RACK,zk:spark=7,CARDINALITY,NODE,hbase,1,3
+```
+The above encodes two constraints:
+* place 3 containers with tag "zk" (standing for ZooKeeper) with node anti-affinity to each
other, i.e., do not place more than one container per node (notice that in this first constraint,
the SourceTag and the TargetTag of the constraint coincide);
+* place 5 containers with tag "hbase" with affinity to a rack on which containers with tag
"zk" are running (i.e., an "hbase" container should not be placed at a rack where an "zk"
container is running, given that "zk" is the TargetTag of the second constraint);
+* place 7 container with tag "spark" in nodes that have at least one, but no more than three,
containers, with tag "hbase".
+
+
+
+Defining Placement Constraints
+------------------------------
+
+$H3 Allocation tags
+
+Allocation tags are string tags that an application can associate with (groups of) its containers.
Tags are used to identify components of applications. For example, an HBase Master allocation
can be tagged with "hbase-m", and Region Servers with "hbase-rs". Other examples are "latency-critical"
to refer to the more general demands of the allocation, or "app_0041" to denote the job ID.
Allocation tags play a key role in constraints, as they allow to refer to multiple allocations
that share a common tag.
+
+Note that instead of using the `ResourceRequest` object to define allocation tags, we use
the new `SchedulingRequest` object. This has many similarities with the `ResourceRequest`,
but better separates the sizing of the requested allocations (number and size of allocations,
priority, execution type, etc.), and the constraints dictating how these allocations should
be placed (resource name, relaxed locality). Applications can still use `ResourceRequest`
objects, but in order to define allocation tags and constraints, they need to use the `SchedulingRequest`
object. Within a single `AllocateRequest`, an application should use either the `ResourceRequest`
or the `SchedulingRequest` objects, but not both of them.
+
+$H4 Differences between node labels, node attributes and allocation tags
+
+The difference between allocation tags and node labels or node attributes (YARN-3409), is
that allocation tags are attached to allocations and not to nodes. When an allocation gets
allocated to a node by the scheduler, the set of tags of that allocation are automatically
added to the node for the duration of the allocation. Hence, a node inherits the tags of the
allocations that are currently allocated to the node. Likewise, a rack inherits the tags of
its nodes. Moreover, similar to node labels and unlike node attributes, allocation tags have
no value attached to them. As we show below, our constraints can refer to allocation tags,
as well as node labels and node attributes.
+
+
+$H3 Placement constraints API
+
+Applications can use the public API in the `PlacementConstraints` to construct placement
constraint. Before describing the methods for building constraints, we describe the methods
of the `PlacementTargets` class that are used to construct the target expressions that will
then be used in constraints:
+
+| Method | Description |
+|:------ |:----------- |
+| `allocationTag(String... allocationTags)` | Constructs a target expression on an allocation
tag. It is satisfied if there are allocations with one of the given tags. |
+| `allocationTagToIntraApp(String... allocationTags)` | similar to `allocationTag(String...)`,
but targeting only the containers of the application that will use this target (intra-application
constraints). |
+| `nodePartition(String... nodePartitions)` | Constructs a target expression on a node partition.
It is satisfied for nodes that belong to one of the `nodePartitions`. |
+| `nodeAttribute(String attributeKey, String... attributeValues)` | Constructs a target expression
on a node attribute. It is satisfied if the specified node attribute has one of the specified
values. |
+
+Note that the `nodeAttribute` method above is not yet functional, as it requires the ongoing
node attributes feature.
+
+The methods of the `PlacementConstraints` class for building constraints are the following:
+
+| Method | Description |
+|:------ |:----------- |
+| `targetIn(String scope, TargetExpression... targetExpressions)` | Creates a constraint
that requires allocations to be placed on nodes that satisfy all target expressions within
the given scope (e.g., node or rack). For example, `targetIn(RACK, allocationTag("hbase-m"))`,
allows allocations on nodes that belong to a rack that has at least one allocation with tag
"hbase-m". |
+| `targetNotIn(String scope, TargetExpression... targetExpressions)` | Creates a constraint
that requires allocations to be placed on nodes that belong to a scope (e.g., node or rack)
that does not satisfy any of the target expressions. |
+| `cardinality(String scope, int minCardinality, int maxCardinality, String... allocationTags)`
| Creates a constraint that restricts the number of allocations within a given scope (e.g.,
node or rack). For example, {@code cardinality(NODE, 3, 10, "zk")} is satisfied on nodes where
there are no less than 3 allocations with tag "zk" and no more than 10. |
+| `minCardinality(String scope, int minCardinality, String... allocationTags)` | Similar
to `cardinality(String, int, int, String...)`, but determines only the minimum cardinality
(the maximum cardinality is unbound). |
+| `maxCardinality(String scope, int maxCardinality, String... allocationTags)` | Similar
to `cardinality(String, int, int, String...)`, but determines only the maximum cardinality
(the minimum cardinality is 0). |
+| `targetCardinality(String scope, int minCardinality, int maxCardinality, String... allocationTags)`
| This constraint generalizes the cardinality and target constraints. Consider a set of nodes
N that belongs to the scope specified in the constraint. If the target expressions are satisfied
at least minCardinality times and at most maxCardinality times in the node set N, then the
constraint is satisfied. For example, `targetCardinality(RACK, 2, 10, allocationTag("zk"))`,
requires an allocation to be placed within a rack that has at least 2 and at most 10 other
allocations with tag "zk". |
+
+The `PlacementConstraints` class also includes method for building compound constraints (AND/OR
expressions with multiple constraints). Adding support for compound constraints is work in
progress.
+
+
+$H3 Specifying constraints in applications
+
+Applications have to specify the containers for which each constraint will be enabled. To
this end, applications can provide a mapping from a set of allocation tags (source tags) to
a placement constraint. For example, an entry of this mapping could be "hbase"->constraint1,
which means that constraint1 will be applied when scheduling each allocation with tag "hbase".
+
+When using the placement processor approach (see [Enabling placement constraints](#Enabling_placement_constraints)),
this constraint mapping is specified within the `RegisterApplicationMasterRequest`.
+
+When using the placement allocator in the capacity scheduler, the constraints can also be
added at each `SchedulingRequest` object. Each such constraint is valid for the tag of that
scheduling request. In case constraints are specified both at the `ReisterApplicationMasterRequest`
and the scheduling requests, the latter override the former.
+


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message