Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 8E266200BD8 for ; Wed, 7 Dec 2016 16:00:04 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id 8D01E160B33; Wed, 7 Dec 2016 15:00:04 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 5BBAD160B30 for ; Wed, 7 Dec 2016 16:00:02 +0100 (CET) Received: (qmail 11780 invoked by uid 500); 7 Dec 2016 15:00:00 -0000 Mailing-List: contact commits-help@ambari.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: ambari-dev@ambari.apache.org Delivered-To: mailing list commits@ambari.apache.org Received: (qmail 10596 invoked by uid 99); 7 Dec 2016 15:00:00 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 07 Dec 2016 15:00:00 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id E966CF17F4; Wed, 7 Dec 2016 14:59:59 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: ncole@apache.org To: commits@ambari.apache.org Date: Wed, 07 Dec 2016 15:00:39 -0000 Message-Id: In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [41/50] [abbrv] ambari git commit: AMBARI-19094. HDP 3.0 support for YARN/MR with configs, kerberos, widgets, metrics, quicklinks, and themes (alejandro) archived-at: Wed, 07 Dec 2016 15:00:04 -0000 AMBARI-19094. HDP 3.0 support for YARN/MR with configs, kerberos, widgets, metrics, quicklinks, and themes (alejandro) Project: http://git-wip-us.apache.org/repos/asf/ambari/repo Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a62119ab Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a62119ab Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a62119ab Branch: refs/heads/branch-dev-patch-upgrade Commit: a62119ab9f1abc355bc99ee0fc71afac6723d827 Parents: 2a710b3 Author: Alejandro Fernandez Authored: Tue Dec 6 16:04:56 2016 -0800 Committer: Alejandro Fernandez Committed: Tue Dec 6 16:54:11 2016 -0800 ---------------------------------------------------------------------- .../HDP/3.0/services/YARN/YARN_widgets.json | 670 +++++++++++++++ .../YARN/configuration-mapred/mapred-env.xml | 51 ++ .../YARN/configuration-mapred/mapred-site.xml | 134 +++ .../YARN/configuration/capacity-scheduler.xml | 71 ++ .../YARN/configuration/ranger-yarn-audit.xml | 177 ++++ .../ranger-yarn-plugin-properties.xml | 82 ++ .../configuration/ranger-yarn-policymgr-ssl.xml | 66 ++ .../YARN/configuration/ranger-yarn-security.xml | 58 ++ .../services/YARN/configuration/yarn-env.xml | 200 +++++ .../services/YARN/configuration/yarn-log4j.xml | 103 +++ .../services/YARN/configuration/yarn-site.xml | 814 +++++++++++++++++++ .../stacks/HDP/3.0/services/YARN/kerberos.json | 278 +++++++ .../stacks/HDP/3.0/services/YARN/metainfo.xml | 173 ++++ .../YARN/quicklinks-mapred/quicklinks.json | 80 ++ .../services/YARN/quicklinks/quicklinks.json | 80 ++ .../3.0/services/YARN/themes-mapred/theme.json | 132 +++ .../HDP/3.0/services/YARN/themes/theme.json | 250 ++++++ 17 files changed, 3419 insertions(+) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json new file mode 100644 index 0000000..782f21d --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json @@ -0,0 +1,670 @@ +{ + "layouts": [ + { + "layout_name": "default_yarn_dashboard", + "display_name": "Standard YARN Dashboard", + "section_name": "YARN_SUMMARY", + "widgetLayoutInfo": [ + { + "widget_name": "Memory Utilization", + "description": "Percentage of total memory allocated to containers running in the cluster.", + "widget_type": "GRAPH", + "is_visible": true, + "metrics": [ + { + "name": "yarn.QueueMetrics.Queue=root.AllocatedMB", + "metric_path": "metrics/yarn/Queue/root/AllocatedMB", + "service_name": "YARN", + "component_name": "RESOURCEMANAGER", + "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE" + }, + { + "name": "yarn.QueueMetrics.Queue=root.AvailableMB", + "metric_path": "metrics/yarn/Queue/root/AvailableMB", + "service_name": "YARN", + "component_name": "RESOURCEMANAGER", + "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE" + } + ], + "values": [ + { + "name": "Memory Utilization", + "value": "${(yarn.QueueMetrics.Queue=root.AllocatedMB / (yarn.QueueMetrics.Queue=root.AllocatedMB + yarn.QueueMetrics.Queue=root.AvailableMB)) * 100}" + } + ], + "properties": { + "display_unit": "%", + "graph_type": "LINE", + "time_range": "1" + } + }, + { + "widget_name": "CPU Utilization", + "description": "Percentage of total virtual cores allocated to containers running in the cluster.", + "widget_type": "GRAPH", + "is_visible": true, + "metrics": [ + { + "name": "yarn.QueueMetrics.Queue=root.AllocatedVCores", + "metric_path": "metrics/yarn/Queue/root/AllocatedVCores", + "service_name": "YARN", + "component_name": "RESOURCEMANAGER", + "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE" + }, + { + "name": "yarn.QueueMetrics.Queue=root.AvailableVCores", + "metric_path": "metrics/yarn/Queue/root/AvailableVCores", + "service_name": "YARN", + "component_name": "RESOURCEMANAGER", + "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE" + } + ], + "values": [ + { + "name": "Total Allocatable CPU Utilized across NodeManager", + "value": "${(yarn.QueueMetrics.Queue=root.AllocatedVCores / (yarn.QueueMetrics.Queue=root.AllocatedVCores + yarn.QueueMetrics.Queue=root.AvailableVCores)) * 100}" + } + ], + "properties": { + "display_unit": "%", + "graph_type": "LINE", + "time_range": "1" + } + }, + { + "widget_name": "Bad Local Disks", + "description": "Number of unhealthy local disks across all NodeManagers.", + "widget_type": "NUMBER", + "is_visible": true, + "metrics": [ + { + "name": "yarn.NodeManagerMetrics.BadLocalDirs", + "metric_path": "metrics/yarn/BadLocalDirs", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "yarn.NodeManagerMetrics.BadLogDirs", + "metric_path": "metrics/yarn/BadLogDirs", + "service_name": "YARN", + "component_name": "NODEMANAGER" + } + ], + "values": [ + { + "name": "Number of unhealthy local disks for NodeManager", + "value": "${yarn.NodeManagerMetrics.BadLocalDirs + yarn.NodeManagerMetrics.BadLogDirs}" + } + ], + "properties": { + "display_unit": "" + } + }, + { + "widget_name": "Container Failures", + "description": "Percentage of all containers failing in the cluster.", + "widget_type": "GRAPH", + "is_visible": true, + "metrics": [ + { + "name": "yarn.NodeManagerMetrics.ContainersFailed._rate", + "metric_path": "metrics/yarn/ContainersFailed._rate", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate", + "metric_path": "metrics/yarn/ContainersCompleted._rate", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate", + "metric_path": "metrics/yarn/ContainersLaunched._rate", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "yarn.NodeManagerMetrics.ContainersIniting._sum", + "metric_path": "metrics/yarn/ContainersIniting._sum", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "yarn.NodeManagerMetrics.ContainersKilled._rate", + "metric_path": "metrics/yarn/ContainersKilled._rate", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "yarn.NodeManagerMetrics.ContainersRunning._sum", + "metric_path": "metrics/yarn/ContainersRunning._sum", + "service_name": "YARN", + "component_name": "NODEMANAGER" + } + ], + "values": [ + { + "name": "Container Failures", + "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting._sum + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning._sum)) * 100}" + } + ], + "properties": { + "display_unit": "%", + "graph_type": "LINE", + "time_range": "1" + } + }, + { + "widget_name": "App Failures", + "description": "Percentage of all launched applications failing in the cluster.", + "widget_type": "GRAPH", + "is_visible": true, + "metrics": [ + { + "name": "yarn.QueueMetrics.Queue=root.AppsFailed._rate", + "metric_path": "metrics/yarn/Queue/root/AppsFailed._rate", + "service_name": "YARN", + "component_name": "RESOURCEMANAGER", + "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE" + }, + { + "name": "yarn.QueueMetrics.Queue=root.AppsKilled._rate", + "metric_path": "metrics/yarn/Queue/root/AppsKilled._rate", + "service_name": "YARN", + "component_name": "RESOURCEMANAGER", + "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE" + }, + { + "name": "yarn.QueueMetrics.Queue=root.AppsPending", + "metric_path": "metrics/yarn/Queue/root/AppsPending", + "service_name": "YARN", + "component_name": "RESOURCEMANAGER", + "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE" + }, + { + "name": "yarn.QueueMetrics.Queue=root.AppsRunning", + "metric_path": "metrics/yarn/Queue/root/AppsRunning", + "service_name": "YARN", + "component_name": "RESOURCEMANAGER", + "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE" + }, + { + "name": "yarn.QueueMetrics.Queue=root.AppsSubmitted._rate", + "metric_path": "metrics/yarn/Queue/root/AppsSubmitted._rate", + "service_name": "YARN", + "component_name": "RESOURCEMANAGER", + "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE" + }, + { + "name": "yarn.QueueMetrics.Queue=root.AppsCompleted._rate", + "metric_path": "metrics/yarn/Queue/root/AppsCompleted._rate", + "service_name": "YARN", + "component_name": "RESOURCEMANAGER", + "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE" + } + ], + "values": [ + { + "name": "App Failures", + "value": "${(yarn.QueueMetrics.Queue=root.AppsFailed._rate/(yarn.QueueMetrics.Queue=root.AppsFailed._rate + yarn.QueueMetrics.Queue=root.AppsKilled._rate + yarn.QueueMetrics.Queue=root.AppsPending + yarn.QueueMetrics.Queue=root.AppsRunning + yarn.QueueMetrics.Queue=root.AppsSubmitted._rate + yarn.QueueMetrics.Queue=root.AppsCompleted._rate)) * 100}" + } + ], + "properties": { + "display_unit": "%", + "graph_type": "LINE", + "time_range": "1" + } + }, + { + "widget_name": "Pending Apps", + "description": "Count of applications waiting for cluster resources to become available.", + "widget_type": "GRAPH", + "is_visible": true, + "metrics": [ + { + "name": "yarn.QueueMetrics.Queue=root.AppsPending", + "metric_path": "metrics/yarn/Queue/root/AppsPending", + "service_name": "YARN", + "component_name": "RESOURCEMANAGER", + "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE" + } + ], + "values": [ + { + "name": "Pending Apps", + "value": "${yarn.QueueMetrics.Queue=root.AppsPending}" + } + ], + "properties": { + "display_unit": "Apps", + "graph_type": "LINE", + "time_range": "1" + } + }, + { + "widget_name": "Cluster Memory", + "description": "Percentage of memory used across all NodeManager hosts.", + "widget_type": "GRAPH", + "is_visible": true, + "metrics": [ + { + "name": "mem_total._sum", + "metric_path": "metrics/memory/mem_total._avg", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "mem_free._sum", + "metric_path": "metrics/memory/mem_free._avg", + "service_name": "YARN", + "component_name": "NODEMANAGER" + } + ], + "values": [ + { + "name": "Memory utilization", + "value": "${((mem_total._sum - mem_free._sum)/mem_total._sum) * 100}" + } + ], + "properties": { + "display_unit": "%", + "graph_type": "LINE", + "time_range": "1" + } + }, + { + "widget_name": "Cluster Disk", + "description": "Sum of disk throughput for all NodeManager hosts.", + "widget_type": "GRAPH", + "is_visible": true, + "metrics": [ + { + "name": "read_bps._sum", + "metric_path": "metrics/disk/read_bps._sum", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "write_bps._sum", + "metric_path": "metrics/disk/write_bps._sum", + "service_name": "YARN", + "component_name": "NODEMANAGER" + } + ], + "values": [ + { + "name": "Read throughput", + "value": "${read_bps._sum/1048576}" + }, + { + "name": "Write throughput", + "value": "${write_bps._sum/1048576}" + } + ], + "properties": { + "display_unit": "Mbps", + "graph_type": "LINE", + "time_range": "1" + } + }, + { + "widget_name": "Cluster Network", + "description": "Average of Network utilized across all NodeManager hosts.", + "default_section_name": "YARN_SUMMARY", + "widget_type": "GRAPH", + "is_visible": true, + "metrics": [ + { + "name": "pkts_in._avg", + "metric_path": "metrics/network/pkts_in._avg", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "pkts_out._avg", + "metric_path": "metrics/network/pkts_out._avg", + "service_name": "YARN", + "component_name": "NODEMANAGER" + } + ], + "values": [ + { + "name": "Packets In", + "value": "${pkts_in._avg}" + }, + { + "name": "Packets Out", + "value": "${pkts_out._avg}" + } + ], + "properties": { + "graph_type": "LINE", + "time_range": "1" + } + }, + { + "widget_name": "Cluster CPU", + "description": "Percentage of CPU utilized across all NodeManager hosts.", + "default_section_name": "YARN_SUMMARY", + "widget_type": "GRAPH", + "is_visible": true, + "metrics": [ + { + "name": "cpu_system._sum", + "metric_path": "metrics/cpu/cpu_system._sum", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "cpu_user._sum", + "metric_path": "metrics/cpu/cpu_user._sum", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "cpu_nice._sum", + "metric_path": "metrics/cpu/cpu_nice._sum", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "cpu_idle._sum", + "metric_path": "metrics/cpu/cpu_idle._sum", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "cpu_wio._sum", + "metric_path": "metrics/cpu/cpu_wio._sum", + "service_name": "YARN", + "component_name": "NODEMANAGER" + } + ], + "values": [ + { + "name": "CPU utilization", + "value": "${((cpu_system._sum + cpu_user._sum + cpu_nice._sum)/(cpu_system._sum + cpu_user._sum + cpu_nice._sum + cpu_idle._sum + cpu_wio._sum)) * 100}" + } + ], + "properties": { + "graph_type": "LINE", + "time_range": "1", + "display_unit": "%" + } + } + ] + }, + { + "layout_name": "default_yarn_heatmap", + "display_name": "YARN Heatmaps", + "section_name": "YARN_HEATMAPS", + "widgetLayoutInfo": [ + { + "widget_name": "YARN local disk space utilization per NodeManager", + "description": "", + "widget_type": "HEATMAP", + "is_visible": true, + "metrics": [ + { + "name": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc", + "metric_path": "metrics/yarn/GoodLocalDirsDiskUtilizationPerc", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc", + "metric_path": "metrics/yarn/GoodLogDirsDiskUtilizationPerc", + "service_name": "YARN", + "component_name": "NODEMANAGER" + } + ], + "values": [ + { + "name": "YARN local disk space utilization per NodeManager", + "value": "${(yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc + yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc)/2}" + } + ], + "properties": { + "display_unit": "%", + "max_limit": "100" + } + }, + { + "widget_name": "Total Allocatable RAM Utilized per NodeManager", + "description": "", + "widget_type": "HEATMAP", + "is_visible": false, + "metrics": [ + { + "name": "yarn.NodeManagerMetrics.AllocatedGB", + "metric_path": "metrics/yarn/AllocatedGB", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "yarn.NodeManagerMetrics.AvailableGB", + "metric_path": "metrics/yarn/AvailableGB", + "service_name": "YARN", + "component_name": "NODEMANAGER" + } + ], + "values": [ + { + "name": "Total Allocatable RAM Utilized per NodeManager", + "value": "${(yarn.NodeManagerMetrics.AllocatedGB/(yarn.NodeManagerMetrics.AvailableGB + yarn.NodeManagerMetrics.AllocatedGB)) * 100}" + } + ], + "properties": { + "display_unit": "%", + "max_limit": "100" + } + }, + { + "widget_name": "Total Allocatable CPU Utilized per NodeManager", + "description": "", + "widget_type": "HEATMAP", + "is_visible": false, + "metrics": [ + { + "name": "yarn.NodeManagerMetrics.AllocatedVCores", + "metric_path": "metrics/yarn/AllocatedVCores", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "yarn.NodeManagerMetrics.AvailableVCores", + "metric_path": "metrics/yarn/AvailableVCores", + "service_name": "YARN", + "component_name": "NODEMANAGER" + } + ], + "values": [ + { + "name": "Total Allocatable CPU Utilized per NodeManager", + "value": "${(yarn.NodeManagerMetrics.AllocatedVCores/(yarn.NodeManagerMetrics.AllocatedVCores + yarn.NodeManagerMetrics.AvailableVCores)) * 100}" + } + ], + "properties": { + "display_unit": "%", + "max_limit": "100" + } + }, + { + "widget_name": "Container Failures", + "description": "", + "widget_type": "HEATMAP", + "is_visible": false, + "metrics": [ + { + "name": "yarn.NodeManagerMetrics.ContainersFailed._rate", + "metric_path": "metrics/yarn/ContainersFailed._rate", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate", + "metric_path": "metrics/yarn/ContainersCompleted._rate", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate", + "metric_path": "metrics/yarn/ContainersLaunched._rate", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "yarn.NodeManagerMetrics.ContainersIniting", + "metric_path": "metrics/yarn/ContainersIniting", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "yarn.NodeManagerMetrics.ContainersKilled._rate", + "metric_path": "metrics/yarn/ContainersKilled._rate", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "yarn.NodeManagerMetrics.ContainersRunning", + "metric_path": "metrics/yarn/ContainersRunning", + "service_name": "YARN", + "component_name": "NODEMANAGER" + } + ], + "values": [ + { + "name": "Container Failures", + "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning)) * 100}" + } + ], + "properties": { + "display_unit": "%", + "max_limit": "100" + } + }, + { + "widget_name": "NodeManager GC Time", + "description": "", + "widget_type": "HEATMAP", + "is_visible": false, + "metrics": [ + { + "name": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis", + "metric_path": "metrics/jvm/gcTimeMillis", + "service_name": "YARN", + "component_name": "NODEMANAGER" + } + ], + "values": [ + { + "name": "NodeManager Garbage Collection Time", + "value": "${Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis}" + } + ], + "properties": { + "display_unit": "ms", + "max_limit": "10000" + } + }, + { + "widget_name": "NodeManager JVM Heap Memory Used", + "description": "", + "widget_type": "HEATMAP", + "is_visible": false, + "metrics": [ + { + "name": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM", + "metric_path": "metrics/jvm/memHeapUsedM", + "service_name": "YARN", + "component_name": "NODEMANAGER" + } + ], + "values": [ + { + "name": "NodeManager JVM Heap Memory Used", + "value": "${Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM}" + } + ], + "properties": { + "display_unit": "MB", + "max_limit": "512" + } + }, + { + "widget_name": "Allocated Containers", + "description": "", + "widget_type": "HEATMAP", + "is_visible": false, + "metrics": [ + { + "name": "yarn.NodeManagerMetrics.AllocatedContainers", + "metric_path": "metrics/yarn/AllocatedContainers", + "service_name": "YARN", + "component_name": "NODEMANAGER" + } + ], + "values": [ + { + "name": "Allocated Containers", + "value": "${yarn.NodeManagerMetrics.AllocatedContainers}" + } + ], + "properties": { + "display_unit": "", + "max_limit": "100" + } + }, + { + "widget_name": "NodeManager RAM Utilized", + "description": "", + "widget_type": "HEATMAP", + "is_visible": false, + "metrics": [ + { + "name": "yarn.NodeManagerMetrics.AllocatedGB", + "metric_path": "metrics/yarn/AllocatedGB", + "service_name": "YARN", + "component_name": "NODEMANAGER" + } + ], + "values": [ + { + "name": "NodeManager RAM Utilized", + "value": "${yarn.NodeManagerMetrics.AllocatedGB}" + } + ], + "properties": { + "display_unit": "", + "max_limit": "100" + } + }, + { + "widget_name": "NodeManager CPU Utilized", + "description": "", + "widget_type": "HEATMAP", + "is_visible": false, + "metrics": [ + { + "name": "yarn.NodeManagerMetrics.AllocatedVCores", + "metric_path": "metrics/yarn/AllocatedVCores", + "service_name": "YARN", + "component_name": "NODEMANAGER" + } + ], + "values": [ + { + "name": "NodeManager CPU Utilized", + "value": "${yarn.NodeManagerMetrics.AllocatedVCores}" + } + ], + "properties": { + "display_unit": "", + "max_limit": "100" + } + } + ] + } + ] +} http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml new file mode 100644 index 0000000..a70fad3 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml @@ -0,0 +1,51 @@ + + + + + + + + content + mapred-env template + This is the jinja template for mapred-env.sh file + +# export JAVA_HOME=/home/y/libexec/jdk1.6.0/ + +export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}} + +export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA + +#export HADOOP_JOB_HISTORYSERVER_OPTS= +#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default. +#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger. +#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default. +#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default +#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0. +export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS" +export HADOOP_OPTS="-Djava.io.tmpdir={{hadoop_java_io_tmpdir}} $HADOOP_OPTS" +export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}" + + + content + + + + http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml new file mode 100644 index 0000000..cef2b14 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml @@ -0,0 +1,134 @@ + + + + + + + + mapreduce.admin.user.env + LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64 + + Additional execution environment entries for map and reduce task processes. + This is not an additive property. You must preserve the original value if + you want your map and reduce tasks to have access to native libraries (compression, etc) + + + + + mapreduce.application.classpath + $PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure + + CLASSPATH for MR applications. A comma-separated list of CLASSPATH + entries. + + + + + mapreduce.application.framework.path + /hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework + + + + + yarn.app.mapreduce.am.admin-command-opts + -Dhdp.version=${hdp.version} + + Java opts for the MR App Master processes. + The following symbol, if present, will be interpolated: @taskid@ is replaced + by current TaskID. Any other occurrences of '@' will go unchanged. + For example, to enable verbose gc logging to a file named for the taskid in + /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of: + -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc + + Usage of -Djava.library.path can cause programs to no longer function if + hadoop native libraries are used. These values should instead be set as part + of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and + mapreduce.reduce.env config settings. + + + + + mapreduce.admin.map.child.java.opts + -server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version} + + + + + mapreduce.admin.reduce.child.java.opts + -server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version} + + + + + mapreduce.reduce.shuffle.fetch.retry.enabled + 1 + + + + + mapreduce.reduce.shuffle.fetch.retry.interval-ms + 1000 + + + + + mapreduce.reduce.shuffle.fetch.retry.timeout-ms + 30000 + + + + + mapreduce.job.emit-timeline-data + false + + + + + mapreduce.jobhistory.bind-host + 0.0.0.0 + + + + + + + mapreduce.jobhistory.recovery.enable + true + Enable the history server to store server state and recover + server state upon startup. If enabled then + mapreduce.jobhistory.recovery.store.class must be specified. + + + + + mapreduce.jobhistory.recovery.store.class + org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService + The HistoryServerStateStoreService class to store history server + state for recovery. + + + + + mapreduce.jobhistory.recovery.store.leveldb.path + /hadoop/mapreduce/jhs + The URI where history server state will be stored if HistoryServerLeveldbSystemStateStoreService + is configured as the recovery storage class. + + + + http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml new file mode 100644 index 0000000..4768e46 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml @@ -0,0 +1,71 @@ + + + + + + yarn.scheduler.capacity.resource-calculator + + The ResourceCalculator implementation to be used to compare Resources in the scheduler. + The default i.e. org.apache.hadoop.yarn.util.resource.DefaultResourseCalculator only uses + Memory while DominantResourceCalculator uses Dominant-resource to compare multi-dimensional + resources such as Memory, CPU etc. A Java ResourceCalculator class name is expected. + + org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator + CPU Scheduling + + value-list + + + org.apache.hadoop.yarn.util.resource.DominantResourceCalculator + + + + org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator + + + + 1 + + + + + yarn.scheduler.capacity.root.accessible-node-labels + * + + + + + + + + + capacity-scheduler + Enter key=value (one per line) for all properties of capacity-scheduler.xml + + + hive-interactive-env + enable_hive_interactive + + + hive-interactive-env + llap_queue_capacity + + + + + http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml new file mode 100644 index 0000000..a6b1baa --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml @@ -0,0 +1,177 @@ + + + + + xasecure.audit.is.enabled + true + Is Audit enabled? + + + + xasecure.audit.destination.db + false + Audit to DB + Is Audit to DB enabled? + + boolean + + + + ranger-env + xasecure.audit.destination.db + + + + + + xasecure.audit.destination.db.jdbc.url + {{audit_jdbc_url}} + Audit DB JDBC URL + + + + xasecure.audit.destination.db.user + {{xa_audit_db_user}} + Audit DB JDBC User + + + + xasecure.audit.destination.db.password + crypted + PASSWORD + Audit DB JDBC Password + + password + + + + + xasecure.audit.destination.db.jdbc.driver + {{jdbc_driver}} + Audit DB JDBC Driver + + + + xasecure.audit.credential.provider.file + jceks://file{{credential_file}} + Credential file store + + + + xasecure.audit.destination.db.batch.filespool.dir + /var/log/hadoop/yarn/audit/db/spool + /var/log/hadoop/yarn/audit/db/spool + + + + xasecure.audit.destination.hdfs + true + Audit to HDFS + Is Audit to HDFS enabled? + + boolean + + + + ranger-env + xasecure.audit.destination.hdfs + + + + + + xasecure.audit.destination.hdfs.dir + hdfs://NAMENODE_HOSTNAME:8020/ranger/audit + HDFS folder to write audit to, make sure the service user has requried permissions + + + ranger-env + xasecure.audit.destination.hdfs.dir + + + + + + xasecure.audit.destination.hdfs.batch.filespool.dir + /var/log/hadoop/yarn/audit/hdfs/spool + /var/log/hadoop/yarn/audit/hdfs/spool + + + + xasecure.audit.destination.solr + false + Audit to SOLR + Is Solr audit enabled? + + boolean + + + + ranger-env + xasecure.audit.destination.solr + + + + + + xasecure.audit.destination.solr.urls + + Solr URL + + true + + + + ranger-admin-site + ranger.audit.solr.urls + + + + + + xasecure.audit.destination.solr.zookeepers + NONE + Solr Zookeeper string + + + ranger-admin-site + ranger.audit.solr.zookeepers + + + + + + xasecure.audit.destination.solr.batch.filespool.dir + /var/log/hadoop/yarn/audit/solr/spool + /var/log/hadoop/yarn/audit/solr/spool + + + + xasecure.audit.provider.summary.enabled + false + Audit provider summary enabled + Enable Summary audit? + + boolean + + + + http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml new file mode 100644 index 0000000..97867cc --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml @@ -0,0 +1,82 @@ + + + + + policy_user + ambari-qa + Policy user for YARN + This user must be system user and also present at Ranger admin portal + + + + hadoop.rpc.protection + + Used for repository creation on ranger admin + + true + + + + + common.name.for.certificate + + Common name for certificate, this value should match what is specified in repo within ranger admin + + true + + + + + ranger-yarn-plugin-enabled + No + Enable Ranger for YARN + Enable ranger yarn plugin ? + + + ranger-env + ranger-yarn-plugin-enabled + + + + boolean + false + + + + + REPOSITORY_CONFIG_USERNAME + yarn + Ranger repository config user + Used for repository creation on ranger admin + + + + REPOSITORY_CONFIG_PASSWORD + yarn + Ranger repository config password + PASSWORD + Used for repository creation on ranger admin + + password + + + + http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml new file mode 100644 index 0000000..5410104 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml @@ -0,0 +1,66 @@ + + + + + xasecure.policymgr.clientssl.keystore + /usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks + Java Keystore files + + + + xasecure.policymgr.clientssl.keystore.password + myKeyFilePassword + PASSWORD + password for keystore + + password + + + + + xasecure.policymgr.clientssl.truststore + /usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks + java truststore file + + + + xasecure.policymgr.clientssl.truststore.password + changeit + PASSWORD + java truststore password + + password + + + + + xasecure.policymgr.clientssl.keystore.credential.file + jceks://file{{credential_file}} + java keystore credential file + + + + xasecure.policymgr.clientssl.truststore.credential.file + jceks://file{{credential_file}} + java truststore credential file + + + http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml new file mode 100644 index 0000000..5f69962 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml @@ -0,0 +1,58 @@ + + + + + ranger.plugin.yarn.service.name + {{repo_name}} + Name of the Ranger service containing policies for this Yarn instance + + + + ranger.plugin.yarn.policy.source.impl + org.apache.ranger.admin.client.RangerAdminRESTClient + Class to retrieve policies from the source + + + + ranger.plugin.yarn.policy.rest.url + {{policymgr_mgr_url}} + URL to Ranger Admin + + + + ranger.plugin.yarn.policy.rest.ssl.config.file + /etc/hadoop/conf/ranger-policymgr-ssl-yarn.xml + Path to the file containing SSL details to contact Ranger Admin + + + + ranger.plugin.yarn.policy.pollIntervalMs + 30000 + How often to poll for changes in policies? + + + + ranger.plugin.yarn.policy.cache.dir + /etc/ranger/{{repo_name}}/policycache + Directory where Ranger policies are cached after successful retrieval from the source + + + http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml new file mode 100644 index 0000000..bbc2930 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml @@ -0,0 +1,200 @@ + + + + + + apptimelineserver_heapsize + 1024 + AppTimelineServer Java heap size + Max heapsize for AppTimelineServer using a numerical value in the scale of MB + + false + MB + int + + + + + + + yarn_cgroups_enabled + false + You can use CGroups to isolate CPU-heavy processes in a Hadoop cluster. + CPU Isolation + + value-list + + + true + + + + false + + + + 1 + + + + + + + is_supported_yarn_ranger + true + Set to false by default, needs to be set to true in stacks that use Ranger Yarn Plugin + + + + + content + yarn-env template + This is the jinja template for yarn-env.sh file + + export HADOOP_YARN_HOME={{hadoop_yarn_home}} + export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER + export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER + export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}} + export JAVA_HOME={{java64_home}} + export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}" + + # We need to add the EWMA appender for the yarn daemons only; + # however, YARN_ROOT_LOGGER is shared by the yarn client and the + # daemons. This is restrict the EWMA appender to daemons only. + INVOKER="${0##*/}" + if [ "$INVOKER" == "yarn-daemon.sh" ]; then + export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA} + fi + + # User for YARN daemons + export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn} + + # resolve links - $0 may be a softlink + export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}" + + # some Java parameters + # export JAVA_HOME=/home/y/libexec/jdk1.6.0/ + if [ "$JAVA_HOME" != "" ]; then + #echo "run java in $JAVA_HOME" + JAVA_HOME=$JAVA_HOME + fi + + if [ "$JAVA_HOME" = "" ]; then + echo "Error: JAVA_HOME is not set." + exit 1 + fi + + JAVA=$JAVA_HOME/bin/java + JAVA_HEAP_MAX=-Xmx1000m + + # For setting YARN specific HEAP sizes please use this + # Parameter and set appropriately + YARN_HEAPSIZE={{yarn_heapsize}} + + # check envvars which might override default args + if [ "$YARN_HEAPSIZE" != "" ]; then + JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m" + fi + + # Resource Manager specific parameters + + # Specify the max Heapsize for the ResourceManager using a numerical value + # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set + # the value to 1000. + # This value will be overridden by an Xmx setting specified in either YARN_OPTS + # and/or YARN_RESOURCEMANAGER_OPTS. + # If not specified, the default value will be picked from either YARN_HEAPMAX + # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. + export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}} + + # Specify the JVM options to be used when starting the ResourceManager. + # These options will be appended to the options specified as YARN_OPTS + # and therefore may override any similar flags set in YARN_OPTS + #export YARN_RESOURCEMANAGER_OPTS= + + # Node Manager specific parameters + + # Specify the max Heapsize for the NodeManager using a numerical value + # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set + # the value to 1000. + # This value will be overridden by an Xmx setting specified in either YARN_OPTS + # and/or YARN_NODEMANAGER_OPTS. + # If not specified, the default value will be picked from either YARN_HEAPMAX + # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. + export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}} + + # Specify the max Heapsize for the timeline server using a numerical value + # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set + # the value to 1024. + # This value will be overridden by an Xmx setting specified in either YARN_OPTS + # and/or YARN_TIMELINESERVER_OPTS. + # If not specified, the default value will be picked from either YARN_HEAPMAX + # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. + export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}} + + # Specify the JVM options to be used when starting the NodeManager. + # These options will be appended to the options specified as YARN_OPTS + # and therefore may override any similar flags set in YARN_OPTS + #export YARN_NODEMANAGER_OPTS= + + # so that filenames w/ spaces are handled correctly in loops below + IFS= + + + # default log directory and file + if [ "$YARN_LOG_DIR" = "" ]; then + YARN_LOG_DIR="$HADOOP_YARN_HOME/logs" + fi + if [ "$YARN_LOGFILE" = "" ]; then + YARN_LOGFILE='yarn.log' + fi + + # default policy file for service-level authorization + if [ "$YARN_POLICYFILE" = "" ]; then + YARN_POLICYFILE="hadoop-policy.xml" + fi + + # restore ordinary behaviour + unset IFS + + + YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR" + YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR" + YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE" + YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE" + YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME" + YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING" + YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" + YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" + export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT" + export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT" + if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then + YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" + fi + YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE" + YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}" + + + content + + + + http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml new file mode 100644 index 0000000..9ac34f3 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml @@ -0,0 +1,103 @@ + + + + + + content + yarn-log4j template + Custom log4j.properties + +#Relative to Yarn Log Dir Prefix +yarn.log.dir=. +# +# Job Summary Appender +# +# Use following logger to send summary to separate file defined by +# hadoop.mapreduce.jobsummary.log.file rolled daily: +# hadoop.mapreduce.jobsummary.logger=INFO,JSA +# +hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger} +hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log +log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender +# Set the ResourceManager summary log filename +yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log +# Set the ResourceManager summary log level and appender +yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger} +#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY + +# To enable AppSummaryLogging for the RM, +# set yarn.server.resourcemanager.appsummary.logger to +# LEVEL,RMSUMMARY in hadoop-env.sh + +# Appender for ResourceManager Application Summary Log +# Requires the following properties to be set +# - hadoop.log.dir (Hadoop Log directory) +# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename) +# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender) +log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender +log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file} +log4j.appender.RMSUMMARY.MaxFileSize=256MB +log4j.appender.RMSUMMARY.MaxBackupIndex=20 +log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout +log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n +log4j.appender.JSA.layout=org.apache.log4j.PatternLayout +log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n +log4j.appender.JSA.DatePattern=.yyyy-MM-dd +log4j.appender.JSA.layout=org.apache.log4j.PatternLayout +log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger} +log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false + +# Appender for viewing information for errors and warnings +yarn.ewma.cleanupInterval=300 +yarn.ewma.messageAgeLimitSeconds=86400 +yarn.ewma.maxUniqueMessages=250 +log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender +log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval} +log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds} +log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages} + +# Audit logging for ResourceManager +rm.audit.logger=${hadoop.root.logger} +log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger} +log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false +log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender +log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log +log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout +log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n +log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd + +# Audit logging for NodeManager +nm.audit.logger=${hadoop.root.logger} +log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger} +log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false +log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender +log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log +log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout +log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n +log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd + + + content + false + + + +