ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jlun...@apache.org
Subject [3/3] ambari git commit: AMBARI-16293: Spark service fails to start (jluniya)
Date Fri, 06 May 2016 22:59:39 GMT
AMBARI-16293: Spark service fails to start (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/bd6eecce
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/bd6eecce
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/bd6eecce

Branch: refs/heads/trunk
Commit: bd6eecce681c9bce75d76e3d589f56bccb7ef8db
Parents: 766ad0a
Author: Jayush Luniya <jluniya@hortonworks.com>
Authored: Fri May 6 15:59:28 2016 -0700
Committer: Jayush Luniya <jluniya@hortonworks.com>
Committed: Fri May 6 15:59:28 2016 -0700

----------------------------------------------------------------------
 .../common-services/SPARK/1.2.0.2.2/alerts.json |  32 ---
 .../1.2.0.2.2/configuration/spark-defaults.xml  | 152 --------------
 .../SPARK/1.2.0.2.2/configuration/spark-env.xml | 127 ------------
 .../configuration/spark-log4j-properties.xml    |  46 -----
 .../configuration/spark-metrics-properties.xml  | 164 ---------------
 .../SPARK/1.2.0.2.2/kerberos.json               |  53 -----
 .../SPARK/1.2.0.2.2/metainfo.xml                | 149 --------------
 .../package/scripts/job_history_server.py       | 103 ----------
 .../SPARK/1.2.0.2.2/package/scripts/params.py   | 198 -------------------
 .../1.2.0.2.2/package/scripts/service_check.py  |  43 ----
 .../1.2.0.2.2/package/scripts/setup_spark.py    | 114 -----------
 .../1.2.0.2.2/package/scripts/spark_client.py   |  61 ------
 .../1.2.0.2.2/package/scripts/spark_service.py  | 122 ------------
 .../package/scripts/spark_thrift_server.py      |  87 --------
 .../1.2.0.2.2/package/scripts/status_params.py  |  39 ----
 .../common-services/SPARK/1.2.1/alerts.json     |  32 +++
 .../1.2.1/configuration/spark-defaults.xml      | 152 ++++++++++++++
 .../SPARK/1.2.1/configuration/spark-env.xml     | 127 ++++++++++++
 .../configuration/spark-log4j-properties.xml    |  46 +++++
 .../configuration/spark-metrics-properties.xml  | 164 +++++++++++++++
 .../common-services/SPARK/1.2.1/kerberos.json   |  53 +++++
 .../common-services/SPARK/1.2.1/metainfo.xml    | 180 +++++++++++++++++
 .../1.2.1/package/scripts/job_history_server.py | 103 ++++++++++
 .../SPARK/1.2.1/package/scripts/params.py       | 198 +++++++++++++++++++
 .../1.2.1/package/scripts/service_check.py      |  43 ++++
 .../SPARK/1.2.1/package/scripts/setup_spark.py  | 114 +++++++++++
 .../SPARK/1.2.1/package/scripts/spark_client.py |  61 ++++++
 .../1.2.1/package/scripts/spark_service.py      | 122 ++++++++++++
 .../package/scripts/spark_thrift_server.py      |  87 ++++++++
 .../1.2.1/package/scripts/status_params.py      |  39 ++++
 .../SPARK/1.2.1/quicklinks/quicklinks.json      |  27 +++
 .../SPARK/1.3.1.2.3/metainfo.xml                | 141 -------------
 .../common-services/SPARK/1.3.1/metainfo.xml    | 141 +++++++++++++
 .../SPARK/1.4.1.2.3/kerberos.json               |  64 ------
 .../SPARK/1.4.1.2.3/metainfo.xml                |  93 ---------
 .../common-services/SPARK/1.4.1/kerberos.json   |  64 ++++++
 .../common-services/SPARK/1.4.1/metainfo.xml    |  93 +++++++++
 .../configuration/spark-hive-site-override.xml  |  55 ++++++
 .../configuration/spark-thrift-sparkconf.xml    | 100 ++++++++++
 .../common-services/SPARK/1.5.2/metainfo.xml    |  42 ++++
 .../1.6.0/configuration/spark-defaults.xml      |  53 +++++
 .../spark-thrift-fairscheduler.xml              |  37 ++++
 .../configuration/spark-thrift-sparkconf.xml    | 193 ++++++++++++++++++
 .../common-services/SPARK/1.6.0/metainfo.xml    |  44 +++++
 .../stacks/HDP/2.2/services/SPARK/metainfo.xml  |  32 +--
 .../services/SPARK/quicklinks/quicklinks.json   |  27 ---
 .../configuration/spark-hive-site-override.xml  |  55 ------
 .../configuration/spark-thrift-sparkconf.xml    | 100 ----------
 .../stacks/HDP/2.3/services/SPARK/metainfo.xml  |  20 +-
 .../services/SPARK/quicklinks/quicklinks.json   |  27 ---
 .../SPARK/configuration/spark-defaults.xml      |  53 -----
 .../spark-thrift-fairscheduler.xml              |  37 ----
 .../configuration/spark-thrift-sparkconf.xml    | 193 ------------------
 .../stacks/HDP/2.4/services/SPARK/metainfo.xml  |  15 +-
 .../stacks/HDP/2.5/services/SPARK/metainfo.xml  |   2 +-
 .../server/stack/KerberosDescriptorTest.java    |   2 +-
 .../stacks/2.2/SPARK/test_job_history_server.py |   2 +-
 .../stacks/2.2/SPARK/test_spark_client.py       |   2 +-
 .../2.2/SPARK/test_spark_service_check.py       |   4 +-
 .../2.2/configs/spark-job-history-server.json   |   2 +-
 .../2.3/SPARK/test_spark_thrift_server.py       |   2 +-
 61 files changed, 2381 insertions(+), 2352 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/alerts.json b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/alerts.json
deleted file mode 100644
index 0e38f16..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/alerts.json
+++ /dev/null
@@ -1,32 +0,0 @@
-{
-  "SPARK": {
-    "service": [],
-    "SPARK_JOBHISTORYSERVER": [
-      {
-        "name": "SPARK_JOBHISTORYSERVER_PROCESS",
-        "label": "Spark History Server",
-        "description": "This host-level alert is triggered if the Spark History Server cannot be determined to be up.",
-        "interval": 1,
-        "scope": "HOST",
-        "source": {
-          "type": "PORT",
-          "uri": "{{spark-defaults/spark.history.ui.port}}",
-          "default_port": 18080,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5
-            }
-          }
-        }
-      }
-    ]
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/configuration/spark-defaults.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/configuration/spark-defaults.xml b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/configuration/spark-defaults.xml
deleted file mode 100644
index b507d5e..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/configuration/spark-defaults.xml
+++ /dev/null
@@ -1,152 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="true">
-
-  <property>
-    <name>spark.yarn.executor.memoryOverhead</name>
-    <value>384</value>
-    <description>
-      The amount of off heap memory (in megabytes) to be allocated per executor.
-      This is memory that accounts for things like VM overheads, interned strings,
-      other native overheads, etc.
-    </description>
-  </property>
-
-  <property>
-    <name>spark.yarn.driver.memoryOverhead</name>
-    <value>384</value>
-    <description>
-      The amount of off heap memory (in megabytes) to be allocated per driver.
-      This is memory that accounts for things like VM overheads, interned strings,
-      other native overheads, etc.
-    </description>
-  </property>
-
-  <property>
-    <name>spark.yarn.scheduler.heartbeat.interval-ms</name>
-    <value>5000</value>
-    <description>
-      The interval in ms in which the Spark application master heartbeats into the YARN ResourceManager.
-    </description>
-  </property>
-
-  <property>
-    <name>spark.yarn.max.executor.failures</name>
-    <value>3</value>
-    <description>
-      The maximum number of executor failures before failing the application.
-    </description>
-  </property>
-
-  <property>
-    <name>spark.yarn.queue</name>
-    <value>default</value>
-    <description>
-      The name of the YARN queue to which the application is submitted.
-    </description>
-  </property>
-
-  <property>
-    <name>spark.yarn.containerLauncherMaxThreads</name>
-    <value>25</value>
-    <description>
-      The maximum number of threads to use in the application master for launching executor containers.
-    </description>
-  </property>
-
-  <property>
-    <name>spark.yarn.submit.file.replication</name>
-    <value>3</value>
-    <description>
-      HDFS replication level for the files uploaded into HDFS for the application.
-      These include things like the Spark jar, the app jar, and any distributed cache files/archives.
-    </description>
-  </property>
-
-  <property>
-    <name>spark.yarn.preserve.staging.files</name>
-    <value>false</value>
-    <description>
-      Set to true to preserve the staged files (Spark jar, app jar, distributed cache files) at the
-      end of the job rather then delete them.
-    </description>
-  </property>
-
-  <property>
-    <name>spark.yarn.services</name>
-    <value>org.apache.spark.deploy.yarn.history.YarnHistoryService</value>
-    <description>
-      Service required for publishing events to the YARN Application Timeline Service.
-    </description>
-  </property>
-
-  <property>
-    <name>spark.history.provider</name>
-    <value>org.apache.spark.deploy.history.yarn.server.YarnHistoryProvider</value>
-    <description>
-      Name of the class implementing the application history backend which publishes to YARN Application Timeline Service.
-    </description>
-  </property>
-
-  <property>
-    <name>spark.history.ui.port</name>
-    <value>18080</value>
-    <description>
-      The port to which the web interface of the History Server binds.
-    </description>
-  </property>
-
-  <property>
-    <name>spark.history.kerberos.principal</name>
-    <value>none</value>
-    <description>
-      Kerberos principal name for the Spark History Server.
-    </description>
-  </property>
-
-  <property>
-    <name>spark.history.kerberos.keytab</name>
-    <value>none</value>
-    <description>
-      Location of the kerberos keytab file for the Spark History Server.
-    </description>
-  </property>
-
-  <property>
-    <name>spark.history.provider</name>
-    <value>org.apache.spark.deploy.yarn.history.YarnHistoryProvider</value>
-    <description>Name of history provider class</description>
-  </property>
-
-  <property>
-    <name>spark.yarn.historyServer.address</name>
-    <value>{{spark_history_server_host}}:{{spark_history_ui_port}}</value>
-    <description>The address of the Spark history server (i.e. host.com:18080). The address should not contain a scheme (http://). Defaults to not being set since the history server is an optional service. This address is given to the YARN ResourceManager when the Spark application finishes to link the application from the ResourceManager UI to the Spark history server UI.</description>
-  </property>
-
-  <property>
-    <name>spark.yarn.max.executor.failures</name>
-    <value>3</value>
-    <description>The maximum number of executor failures before failing the application.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/configuration/spark-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/configuration/spark-env.xml b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/configuration/spark-env.xml
deleted file mode 100644
index 8a5117a..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/configuration/spark-env.xml
+++ /dev/null
@@ -1,127 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_adding_forbidden="true">
-  <property>
-    <name>spark_user</name>
-    <display-name>Spark User</display-name>
-    <value>spark</value>
-    <property-type>USER</property-type>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>spark_group</name>
-    <display-name>Spark Group</display-name>
-    <value>spark</value>
-    <property-type>GROUP</property-type>
-    <description>spark group</description>
-    <value-attributes>
-      <type>user</type>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>spark_log_dir</name>
-    <value>/var/log/spark</value>
-    <description>Spark Log Dir</description>
-    <value-attributes>
-      <type>directory</type>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>spark_pid_dir</name>
-    <value>/var/run/spark</value>
-    <value-attributes>
-      <type>directory</type>
-    </value-attributes>
-  </property>
-
-  <!-- spark-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for spark-env.sh file</description>
-    <value>
-#!/usr/bin/env bash
-
-# This file is sourced when running various Spark programs.
-# Copy it as spark-env.sh and edit that to configure Spark for your site.
-
-# Options read in YARN client mode
-#SPARK_EXECUTOR_INSTANCES="2" #Number of workers to start (Default: 2)
-#SPARK_EXECUTOR_CORES="1" #Number of cores for the workers (Default: 1).
-#SPARK_EXECUTOR_MEMORY="1G" #Memory per Worker (e.g. 1000M, 2G) (Default: 1G)
-#SPARK_DRIVER_MEMORY="512 Mb" #Memory for Master (e.g. 1000M, 2G) (Default: 512 Mb)
-#SPARK_YARN_APP_NAME="spark" #The name of your application (Default: Spark)
-#SPARK_YARN_QUEUE="~@~Xdefault~@~Y" #The hadoop queue to use for allocation requests (Default: @~Xdefault~@~Y)
-#SPARK_YARN_DIST_FILES="" #Comma separated list of files to be distributed with the job.
-#SPARK_YARN_DIST_ARCHIVES="" #Comma separated list of archives to be distributed with the job.
-
-# Generic options for the daemons used in the standalone deploy mode
-
-# Alternate conf dir. (Default: ${SPARK_HOME}/conf)
-export SPARK_CONF_DIR=${SPARK_CONF_DIR:-{{spark_home}}/conf}
-
-# Where log files are stored.(Default:${SPARK_HOME}/logs)
-#export SPARK_LOG_DIR=${SPARK_HOME:-{{spark_home}}}/logs
-export SPARK_LOG_DIR={{spark_log_dir}}
-
-# Where the pid file is stored. (Default: /tmp)
-export SPARK_PID_DIR={{spark_pid_dir}}
-
-# A string representing this instance of spark.(Default: $USER)
-SPARK_IDENT_STRING=$USER
-
-# The scheduling priority for daemons. (Default: 0)
-SPARK_NICENESS=0
-
-export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
-
-# The java implementation to use.
-export JAVA_HOME={{java_home}}
-
-if [ -d "/etc/tez/conf/" ]; then
-  export TEZ_CONF_DIR=/etc/tez/conf
-else
-  export TEZ_CONF_DIR=
-fi
-
-</value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>spark_thrift_cmd_opts</name>
-    <description>additional spark thrift server commandline options</description>
-    <value></value>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/configuration/spark-log4j-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/configuration/spark-log4j-properties.xml b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/configuration/spark-log4j-properties.xml
deleted file mode 100644
index 9eca965..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/configuration/spark-log4j-properties.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>content</name>
-    <description>Spark-log4j-Properties</description>
-    <value>
-# Set everything to be logged to the console
-log4j.rootCategory=INFO, console
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n
-
-# Settings to quiet third party logs that are too verbose
-log4j.logger.org.eclipse.jetty=WARN
-log4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR
-log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
-log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
-
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/configuration/spark-metrics-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/configuration/spark-metrics-properties.xml b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/configuration/spark-metrics-properties.xml
deleted file mode 100644
index a8e9f69..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/configuration/spark-metrics-properties.xml
+++ /dev/null
@@ -1,164 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true">
-  <property>
-    <name>content</name>
-    <description>Spark-metrics-properties</description>
-    <value>
-# syntax: [instance].sink|source.[name].[options]=[value]
-
-# This file configures Spark's internal metrics system. The metrics system is
-# divided into instances which correspond to internal components.
-# Each instance can be configured to report its metrics to one or more sinks.
-# Accepted values for [instance] are "master", "worker", "executor", "driver",
-# and "applications". A wild card "*" can be used as an instance name, in
-# which case all instances will inherit the supplied property.
-#
-# Within an instance, a "source" specifies a particular set of grouped metrics.
-# there are two kinds of sources:
-# 1. Spark internal sources, like MasterSource, WorkerSource, etc, which will
-# collect a Spark component's internal state. Each instance is paired with a
-# Spark source that is added automatically.
-# 2. Common sources, like JvmSource, which will collect low level state.
-# These can be added through configuration options and are then loaded
-# using reflection.
-#
-# A "sink" specifies where metrics are delivered to. Each instance can be
-# assigned one or more sinks.
-#
-# The sink|source field specifies whether the property relates to a sink or
-# source.
-#
-# The [name] field specifies the name of source or sink.
-#
-# The [options] field is the specific property of this source or sink. The
-# source or sink is responsible for parsing this property.
-#
-# Notes:
-# 1. To add a new sink, set the "class" option to a fully qualified class
-# name (see examples below).
-# 2. Some sinks involve a polling period. The minimum allowed polling period
-# is 1 second.
-# 3. Wild card properties can be overridden by more specific properties.
-# For example, master.sink.console.period takes precedence over
-# *.sink.console.period.
-# 4. A metrics specific configuration
-# "spark.metrics.conf=${SPARK_HOME}/conf/metrics.properties" should be
-# added to Java properties using -Dspark.metrics.conf=xxx if you want to
-# customize metrics system. You can also put the file in ${SPARK_HOME}/conf
-# and it will be loaded automatically.
-# 5. MetricsServlet is added by default as a sink in master, worker and client
-# driver, you can send http request "/metrics/json" to get a snapshot of all the
-# registered metrics in json format. For master, requests "/metrics/master/json" and
-# "/metrics/applications/json" can be sent seperately to get metrics snapshot of
-# instance master and applications. MetricsServlet may not be configured by self.
-#
-
-## List of available sinks and their properties.
-
-# org.apache.spark.metrics.sink.ConsoleSink
-# Name: Default: Description:
-# period 10 Poll period
-# unit seconds Units of poll period
-
-# org.apache.spark.metrics.sink.CSVSink
-# Name: Default: Description:
-# period 10 Poll period
-# unit seconds Units of poll period
-# directory /tmp Where to store CSV files
-
-# org.apache.spark.metrics.sink.GangliaSink
-# Name: Default: Description:
-# host NONE Hostname or multicast group of Ganglia server
-# port NONE Port of Ganglia server(s)
-# period 10 Poll period
-# unit seconds Units of poll period
-# ttl 1 TTL of messages sent by Ganglia
-# mode multicast Ganglia network mode ('unicast' or 'multicast')
-
-# org.apache.spark.metrics.sink.JmxSink
-
-# org.apache.spark.metrics.sink.MetricsServlet
-# Name: Default: Description:
-# path VARIES* Path prefix from the web server root
-# sample false Whether to show entire set of samples for histograms ('false' or 'true')
-#
-# * Default path is /metrics/json for all instances except the master. The master has two paths:
-# /metrics/aplications/json # App information
-# /metrics/master/json # Master information
-
-# org.apache.spark.metrics.sink.GraphiteSink
-# Name: Default: Description:
-# host NONE Hostname of Graphite server
-# port NONE Port of Graphite server
-# period 10 Poll period
-# unit seconds Units of poll period
-# prefix EMPTY STRING Prefix to prepend to metric name
-
-## Examples
-# Enable JmxSink for all instances by class name
-#*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink
-
-# Enable ConsoleSink for all instances by class name
-#*.sink.console.class=org.apache.spark.metrics.sink.ConsoleSink
-
-# Polling period for ConsoleSink
-#*.sink.console.period=10
-
-#*.sink.console.unit=seconds
-
-# Master instance overlap polling period
-#master.sink.console.period=15
-
-#master.sink.console.unit=seconds
-
-# Enable CsvSink for all instances
-#*.sink.csv.class=org.apache.spark.metrics.sink.CsvSink
-
-# Polling period for CsvSink
-#*.sink.csv.period=1
-
-#*.sink.csv.unit=minutes
-
-# Polling directory for CsvSink
-#*.sink.csv.directory=/tmp/
-
-# Worker instance overlap polling period
-#worker.sink.csv.period=10
-
-#worker.sink.csv.unit=minutes
-
-# Enable jvm source for instance master, worker, driver and executor
-#master.source.jvm.class=org.apache.spark.metrics.source.JvmSource
-
-#worker.source.jvm.class=org.apache.spark.metrics.source.JvmSource
-
-#driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource
-
-#executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource
-
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/kerberos.json b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/kerberos.json
deleted file mode 100644
index fa6af33..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/kerberos.json
+++ /dev/null
@@ -1,53 +0,0 @@
-{
-  "services": [
-    {
-      "name": "SPARK",
-      "identities": [
-        {
-          "name": "/smokeuser"
-        },
-        {
-          "name": "sparkuser",
-          "principal": {
-            "value": "${spark-env/spark_user}-${cluster_name|toLower()}@${realm}",
-            "type" : "user",
-            "configuration": "spark-defaults/spark.history.kerberos.principal",
-            "local_username" : "${spark-env/spark_user}"
-          },
-          "keytab": {
-            "file": "${keytab_dir}/spark.headless.keytab",
-            "owner": {
-              "name": "${spark-env/spark_user}",
-              "access": "r"
-            },
-            "group": {
-              "name": "${cluster-env/user_group}",
-               "access": ""
-            },
-            "configuration": "spark-defaults/spark.history.kerberos.keytab"
-           }
-        }
-      ],
-      "configurations": [
-        {
-          "spark-defaults": {
-            "spark.history.kerberos.enabled": "true"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "SPARK_JOBHISTORYSERVER",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        },
-        {
-          "name": "SPARK_CLIENT"
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/metainfo.xml b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/metainfo.xml
deleted file mode 100644
index 61016ab..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/metainfo.xml
+++ /dev/null
@@ -1,149 +0,0 @@
-<?xml version="1.0"?>
-<!--Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>SPARK</name>
-      <displayName>Spark</displayName>
-      <comment>Apache Spark is a fast and general engine for large-scale data processing.</comment>
-      <version>1.2.0.2.2</version>
-      <components>
-        <component>
-          <name>SPARK_JOBHISTORYSERVER</name>
-          <displayName>Spark History Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <dependencies>
-            <dependency>
-              <name>TEZ/TEZ_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-               <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-               <scope>host</scope>
-               <auto-deploy>
-                 <enabled>true</enabled>
-               </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-             </auto-deploy>
-           </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/job_history_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-        <component>
-          <name>SPARK_CLIENT</name>
-          <displayName>Spark Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-               <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-               <scope>host</scope>
-               <auto-deploy>
-                 <enabled>true</enabled>
-               </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-             </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/spark_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>env</type>
-              <fileName>spark-log4j.properties</fileName>
-              <dictionaryName>spark-log4j-properties</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>spark-env.sh</fileName>
-              <dictionaryName>spark-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>spark-metrics.properties</fileName>
-              <dictionaryName>spark-metrics-properties</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>properties</type>
-              <fileName>spark-defaults.conf</fileName>
-              <dictionaryName>spark-defaults</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-      </components>
-
-      <configuration-dependencies>
-        <config-type>spark-defaults</config-type>
-        <config-type>spark-env</config-type>
-        <config-type>spark-log4j-properties</config-type>
-        <config-type>spark-metrics-properties</config-type>
-      </configuration-dependencies>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>YARN</service>
-        <service>TEZ</service>
-      </requiredServices>
-
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py
deleted file mode 100644
index bccd714..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-import os
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions import StackFeature
-from resource_management.core.logger import Logger
-from resource_management.core import shell
-from setup_spark import *
-from spark_service import spark_service
-
-
-class JobHistoryServer(Script):
-
-  def install(self, env):
-    import params
-    env.set_params(params)
-    
-    self.install_packages(env)
-    
-  def configure(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    
-    setup_spark(env, 'server', upgrade_type=upgrade_type, action = 'config')
-    
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    
-    self.configure(env)
-    spark_service('jobhistoryserver', upgrade_type=upgrade_type, action='start')
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    
-    spark_service('jobhistoryserver', upgrade_type=upgrade_type, action='stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    check_process_status(status_params.spark_history_server_pid_file)
-    
-
-  def get_component_name(self):
-    return "spark-historyserver"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      Logger.info("Executing Spark Job History Server Stack Upgrade pre-restart")
-      conf_select.select(params.stack_name, "spark", params.version)
-      stack_select.select("spark-historyserver", params.version)
-
-      # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
-      # need to copy the tarball, otherwise, copy it.
-      if params.version and check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.version):
-        resource_created = copy_to_hdfs(
-          "tez",
-          params.user_group,
-          params.hdfs_user,
-          host_sys_prepped=params.host_sys_prepped)
-        if resource_created:
-          params.HdfsResource(None, action="execute")
-          
-  def get_log_folder(self):
-    import params
-    return params.spark_log_dir
-  
-  def get_user(self):
-    import params
-    return params.spark_user
-
-if __name__ == "__main__":
-  JobHistoryServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
deleted file mode 100644
index c5f3eb6..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
+++ /dev/null
@@ -1,198 +0,0 @@
-#!/usr/bin/python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-
-import status_params
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions import StackFeature
-from setup_spark import *
-
-import resource_management.libraries.functions
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.get_stack_version import get_stack_version
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-
-from resource_management.libraries.script.script import Script
-
-# a map of the Ambari role to the component name
-# for use with <stack-root>/current/<component>
-SERVER_ROLE_DIRECTORY_MAP = {
-  'SPARK_JOBHISTORYSERVER' : 'spark-historyserver',
-  'SPARK_CLIENT' : 'spark-client',
-  'SPARK_THRIFTSERVER' : 'spark-thriftserver'
-}
-
-component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SPARK_CLIENT")
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-stack_name = status_params.stack_name
-stack_root = Script.get_stack_root()
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
-version = default("/commandParams/version", None)
-
-spark_conf = '/etc/spark/conf'
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-
-if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
-  hadoop_home = stack_select.get_hadoop_dir("home")
-  spark_conf = format("{stack_root}/current/{component_directory}/conf")
-  spark_log_dir = config['configurations']['spark-env']['spark_log_dir']
-  spark_pid_dir = status_params.spark_pid_dir
-  spark_home = format("{stack_root}/current/{component_directory}")
-
-spark_thrift_server_conf_file = spark_conf + "/spark-thrift-sparkconf.conf"
-java_home = config['hostLevelParams']['java_home']
-
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-user_group = config['configurations']['cluster-env']['user_group']
-
-spark_user = status_params.spark_user
-hive_user = status_params.hive_user
-spark_group = status_params.spark_group
-user_group = status_params.user_group
-spark_hdfs_user_dir = format("/user/{spark_user}")
-spark_history_dir = default('/configurations/spark-defaults/spark.history.fs.logDirectory', "hdfs:///spark-history")
-
-spark_history_server_pid_file = status_params.spark_history_server_pid_file
-spark_thrift_server_pid_file = status_params.spark_thrift_server_pid_file
-
-spark_history_server_start = format("{spark_home}/sbin/start-history-server.sh")
-spark_history_server_stop = format("{spark_home}/sbin/stop-history-server.sh")
-
-spark_thrift_server_start = format("{spark_home}/sbin/start-thriftserver.sh")
-spark_thrift_server_stop = format("{spark_home}/sbin/stop-thriftserver.sh")
-spark_logs_dir = format("{spark_home}/logs")
-
-spark_submit_cmd = format("{spark_home}/bin/spark-submit")
-spark_smoke_example = "org.apache.spark.examples.SparkPi"
-spark_service_check_cmd = format(
-  "{spark_submit_cmd} --class {spark_smoke_example}  --master yarn-cluster  --num-executors 1 --driver-memory 256m  --executor-memory 256m   --executor-cores 1  {spark_home}/lib/spark-examples*.jar 1")
-
-spark_jobhistoryserver_hosts = default("/clusterHostInfo/spark_jobhistoryserver_hosts", [])
-
-if len(spark_jobhistoryserver_hosts) > 0:
-  spark_history_server_host = spark_jobhistoryserver_hosts[0]
-else:
-  spark_history_server_host = "localhost"
-
-# spark-defaults params
-spark_yarn_historyServer_address = default(spark_history_server_host, "localhost")
-
-spark_history_ui_port = config['configurations']['spark-defaults']['spark.history.ui.port']
-
-spark_env_sh = config['configurations']['spark-env']['content']
-spark_log4j_properties = config['configurations']['spark-log4j-properties']['content']
-spark_metrics_properties = config['configurations']['spark-metrics-properties']['content']
-
-hive_server_host = default("/clusterHostInfo/hive_server_host", [])
-is_hive_installed = not len(hive_server_host) == 0
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-spark_kerberos_keytab =  config['configurations']['spark-defaults']['spark.history.kerberos.keytab']
-spark_kerberos_principal =  config['configurations']['spark-defaults']['spark.history.kerberos.principal']
-
-spark_thriftserver_hosts = default("/clusterHostInfo/spark_thriftserver_hosts", [])
-has_spark_thriftserver = not len(spark_thriftserver_hosts) == 0
-
-# hive-site params
-spark_hive_properties = {
-  'hive.metastore.uris': config['configurations']['hive-site']['hive.metastore.uris']
-}
-
-# security settings
-if security_enabled:
-  spark_principal = spark_kerberos_principal.replace('_HOST',spark_history_server_host.lower())
-
-  if is_hive_installed:
-    spark_hive_properties.update({
-      'hive.metastore.sasl.enabled': str(config['configurations']['hive-site']['hive.metastore.sasl.enabled']).lower(),
-      'hive.metastore.kerberos.keytab.file': config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file'],
-      'hive.server2.authentication.spnego.principal': config['configurations']['hive-site']['hive.server2.authentication.spnego.principal'],
-      'hive.server2.authentication.spnego.keytab': config['configurations']['hive-site']['hive.server2.authentication.spnego.keytab'],
-      'hive.metastore.kerberos.principal': config['configurations']['hive-site']['hive.metastore.kerberos.principal'],
-      'hive.server2.authentication.kerberos.principal': config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal'],
-      'hive.server2.authentication.kerberos.keytab': config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab'],
-      'hive.server2.authentication': config['configurations']['hive-site']['hive.server2.authentication'],
-    })
-
-    hive_kerberos_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
-    hive_kerberos_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal']
-
-# thrift server support - available on HDP 2.3 or higher
-spark_thrift_sparkconf = None
-spark_thrift_cmd_opts_properties = ''
-spark_thrift_fairscheduler_content = None
-spark_thrift_master = "yarn-client"
-if 'nm_hosts' in config['clusterHostInfo'] and len(config['clusterHostInfo']['nm_hosts']) == 1:
-  # use local mode when there's only one nodemanager
-  spark_thrift_master = "local[4]"
-
-if has_spark_thriftserver and 'spark-thrift-sparkconf' in config['configurations']:
-  spark_thrift_sparkconf = config['configurations']['spark-thrift-sparkconf']
-  spark_thrift_cmd_opts_properties = config['configurations']['spark-env']['spark_thrift_cmd_opts']
-  if is_hive_installed:
-    # update default metastore client properties (async wait for metastore component) it is useful in case of
-    # blueprint provisioning when hive-metastore and spark-thriftserver is not on the same host.
-    spark_hive_properties.update({
-      'hive.metastore.client.socket.timeout' : config['configurations']['hive-site']['hive.metastore.client.socket.timeout']
-    })
-    spark_hive_properties.update(config['configurations']['spark-hive-site-override'])
-
-  if 'spark-thrift-fairscheduler' in config['configurations'] and 'fairscheduler_content' in config['configurations']['spark-thrift-fairscheduler']:
-    spark_thrift_fairscheduler_content = config['configurations']['spark-thrift-fairscheduler']['fairscheduler_content']
-
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-hdfs_site = config['configurations']['hdfs-site']
-
-dfs_type = default("/commandParams/dfs_type", "")
-
-import functools
-#create partial functions with common arguments for every HdfsResource call
-#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs,
-  immutable_paths = get_not_managed_resources(),
-  dfs_type = dfs_type
- )

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/service_check.py
deleted file mode 100644
index 694f046..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/service_check.py
+++ /dev/null
@@ -1,43 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agree in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-import subprocess
-import time
-
-from resource_management import *
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.format import format
-from resource_management.core.resources.system import Execute
-from resource_management.core.logger import Logger
-
-class SparkServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    if params.security_enabled:
-      spark_kinit_cmd = format("{kinit_path_local} -kt {spark_kerberos_keytab} {spark_principal}; ")
-      Execute(spark_kinit_cmd, user=params.spark_user)
-
-    Execute(format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k http://{spark_history_server_host}:{spark_history_ui_port} | grep 200"),
-      tries = 10,
-      try_sleep=3,
-      logoutput=True
-    )
-
-if __name__ == "__main__":
-  SparkServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/setup_spark.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/setup_spark.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/setup_spark.py
deleted file mode 100644
index eca8534..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/setup_spark.py
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-import fileinput
-import shutil
-import os
-from resource_management import *
-from resource_management.core.exceptions import ComponentIsNotRunning
-from resource_management.core.logger import Logger
-from resource_management.core import shell
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions import StackFeature
-
-def setup_spark(env, type, upgrade_type = None, action = None):
-  import params
-
-  Directory([params.spark_pid_dir, params.spark_log_dir],
-            owner=params.spark_user,
-            group=params.user_group,
-            mode=0775,
-            create_parents = True
-  )
-  if type == 'server' and action == 'config':
-    params.HdfsResource(params.spark_hdfs_user_dir,
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.spark_user,
-                       mode=0775
-    )
-    params.HdfsResource(None, action="execute")
-
-  PropertiesFile(format("{spark_conf}/spark-defaults.conf"),
-    properties = params.config['configurations']['spark-defaults'],
-    key_value_delimiter = " ",
-    owner=params.spark_user,
-    group=params.spark_group,
-  )
-
-  # create spark-env.sh in etc/conf dir
-  File(os.path.join(params.spark_conf, 'spark-env.sh'),
-       owner=params.spark_user,
-       group=params.spark_group,
-       content=InlineTemplate(params.spark_env_sh),
-       mode=0644,
-  )
-
-  #create log4j.properties in etc/conf dir
-  File(os.path.join(params.spark_conf, 'log4j.properties'),
-       owner=params.spark_user,
-       group=params.spark_group,
-       content=params.spark_log4j_properties,
-       mode=0644,
-  )
-
-  #create metrics.properties in etc/conf dir
-  File(os.path.join(params.spark_conf, 'metrics.properties'),
-       owner=params.spark_user,
-       group=params.spark_group,
-       content=InlineTemplate(params.spark_metrics_properties)
-  )
-  
-  Directory(params.spark_logs_dir,
-       owner=params.spark_user,
-       group=params.spark_group,
-       mode=0755,   
-  )
-
-  if params.is_hive_installed:
-    XmlConfig("hive-site.xml",
-          conf_dir=params.spark_conf,
-          configurations=params.spark_hive_properties,
-          owner=params.spark_user,
-          group=params.spark_group,
-          mode=0644)
-
-  if params.has_spark_thriftserver:
-    PropertiesFile(params.spark_thrift_server_conf_file,
-      properties = params.config['configurations']['spark-thrift-sparkconf'],
-      owner = params.hive_user,
-      group = params.user_group,
-      key_value_delimiter = " ",
-    )
-
-  effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
-  if effective_version:
-    effective_version = format_stack_version(effective_version)
-
-  if params.spark_thrift_fairscheduler_content and effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
-    # create spark-thrift-fairscheduler.xml
-    File(os.path.join(params.spark_conf,"spark-thrift-fairscheduler.xml"),
-      owner=params.spark_user,
-      group=params.spark_group,
-      mode=0755,
-      content=InlineTemplate(params.spark_thrift_fairscheduler_content)
-    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_client.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_client.py
deleted file mode 100644
index 3838061..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_client.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions import StackFeature
-from resource_management.core.exceptions import ClientComponentHasNoStatus
-from resource_management.core.logger import Logger
-from resource_management.core import shell
-from setup_spark import setup_spark
-
-
-class SparkClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    
-    setup_spark(env, 'client', upgrade_type=upgrade_type, action = 'config')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-  
-  def get_component_name(self):
-    return "spark-client"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      Logger.info("Executing Spark Client Stack Upgrade pre-restart")
-      conf_select.select(params.stack_name, "spark", params.version)
-      stack_select.select("spark-client", params.version)
-
-if __name__ == "__main__":
-  SparkClient().execute()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_service.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_service.py
deleted file mode 100644
index c3784d6..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_service.py
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-import socket
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
-from resource_management.libraries.functions import format
-from resource_management.core.resources.system import File, Execute
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.show_logs import show_logs
-
-def spark_service(name, upgrade_type=None, action=None):
-  import params
-
-  if action == 'start':
-
-    effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
-    if effective_version:
-      effective_version = format_stack_version(effective_version)
-
-    if effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
-      # copy spark-hdp-assembly.jar to hdfs
-      copy_to_hdfs("spark", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
-      # create spark history directory
-      params.HdfsResource(params.spark_history_dir,
-                          type="directory",
-                          action="create_on_execute",
-                          owner=params.spark_user,
-                          group=params.user_group,
-                          mode=0777,
-                          recursive_chmod=True
-                          )
-      params.HdfsResource(None, action="execute")
-
-    if params.security_enabled:
-      spark_kinit_cmd = format("{kinit_path_local} -kt {spark_kerberos_keytab} {spark_principal}; ")
-      Execute(spark_kinit_cmd, user=params.spark_user)
-
-    # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
-    # need to copy the tarball, otherwise, copy it.
-    if params.stack_version_formatted and check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.stack_version_formatted):
-      resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
-      if resource_created:
-        params.HdfsResource(None, action="execute")
-
-    if name == 'jobhistoryserver':
-      historyserver_no_op_test = format(
-      'ls {spark_history_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_history_server_pid_file}` >/dev/null 2>&1')
-      try:
-        Execute(format('{spark_history_server_start}'),
-                user=params.spark_user,
-                environment={'JAVA_HOME': params.java_home},
-                not_if=historyserver_no_op_test)
-      except:
-        show_logs(params.spark_log_dir, user=params.spark_user)
-        raise
-
-    elif name == 'sparkthriftserver':
-      if params.security_enabled:
-        hive_principal = params.hive_kerberos_principal.replace('_HOST', socket.getfqdn().lower())
-        hive_kinit_cmd = format("{kinit_path_local} -kt {hive_kerberos_keytab} {hive_principal}; ")
-        Execute(hive_kinit_cmd, user=params.hive_user)
-
-      thriftserver_no_op_test = format(
-      'ls {spark_thrift_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_thrift_server_pid_file}` >/dev/null 2>&1')
-      try:
-        Execute(format('{spark_thrift_server_start} --properties-file {spark_thrift_server_conf_file} {spark_thrift_cmd_opts_properties}'),
-                user=params.hive_user,
-                environment={'JAVA_HOME': params.java_home},
-                not_if=thriftserver_no_op_test
-        )
-      except:
-        show_logs(params.spark_log_dir, user=params.hive_user)
-        raise
-  elif action == 'stop':
-    if name == 'jobhistoryserver':
-      try:
-        Execute(format('{spark_history_server_stop}'),
-                user=params.spark_user,
-                environment={'JAVA_HOME': params.java_home}
-        )
-      except:
-        show_logs(params.spark_log_dir, user=params.spark_user)
-        raise
-      File(params.spark_history_server_pid_file,
-        action="delete"
-      )
-
-    elif name == 'sparkthriftserver':
-      try:
-        Execute(format('{spark_thrift_server_stop}'),
-                user=params.hive_user,
-                environment={'JAVA_HOME': params.java_home}
-        )
-      except:
-        show_logs(params.spark_log_dir, user=params.hive_user)
-        raise
-      File(params.spark_thrift_server_pid_file,
-        action="delete"
-      )
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_thrift_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_thrift_server.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_thrift_server.py
deleted file mode 100644
index 9311454..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_thrift_server.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-import os
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.core.logger import Logger
-from resource_management.core import shell
-from setup_spark import setup_spark
-from spark_service import spark_service
-
-
-class SparkThriftServer(Script):
-
-  def install(self, env):
-    import params
-    env.set_params(params)
-
-    self.install_packages(env)
-
-  def configure(self, env ,upgrade_type=None):
-    import params
-    env.set_params(params)
-    setup_spark(env, 'server', upgrade_type = upgrade_type, action = 'config')
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    self.configure(env)
-    spark_service('sparkthriftserver', upgrade_type=upgrade_type, action='start')
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    spark_service('sparkthriftserver', upgrade_type=upgrade_type, action='stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.spark_thrift_server_pid_file)
-
-  def get_component_name(self):
-    return "spark-thriftserver"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    if params.version and check_stack_feature(StackFeature.SPARK_THRIFTSERVER, params.version):
-      Logger.info("Executing Spark Thrift Server Stack Upgrade pre-restart")
-      conf_select.select(params.stack_name, "spark", params.version)
-      stack_select.select("spark-thriftserver", params.version)
-      
-  def get_log_folder(self):
-    import params
-    return params.spark_log_dir
-  
-  def get_user(self):
-    import params
-    return params.hive_user
-
-if __name__ == "__main__":
-  SparkThriftServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/status_params.py
deleted file mode 100644
index 86e7f7d..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/status_params.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.functions import format
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.default import default
-
-config = Script.get_config()
-
-spark_user = config['configurations']['spark-env']['spark_user']
-spark_group = config['configurations']['spark-env']['spark_group']
-user_group = config['configurations']['cluster-env']['user_group']
-
-if 'hive-env' in config['configurations']:
-  hive_user = config['configurations']['hive-env']['hive_user']
-else:
-  hive_user = "hive"
-
-spark_pid_dir = config['configurations']['spark-env']['spark_pid_dir']
-spark_history_server_pid_file = format("{spark_pid_dir}/spark-{spark_user}-org.apache.spark.deploy.history.HistoryServer-1.pid")
-spark_thrift_server_pid_file = format("{spark_pid_dir}/spark-{hive_user}-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1.pid")
-stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.1/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/alerts.json b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/alerts.json
new file mode 100644
index 0000000..0e38f16
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/alerts.json
@@ -0,0 +1,32 @@
+{
+  "SPARK": {
+    "service": [],
+    "SPARK_JOBHISTORYSERVER": [
+      {
+        "name": "SPARK_JOBHISTORYSERVER_PROCESS",
+        "label": "Spark History Server",
+        "description": "This host-level alert is triggered if the Spark History Server cannot be determined to be up.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{spark-defaults/spark.history.ui.port}}",
+          "default_port": 18080,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-defaults.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-defaults.xml b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-defaults.xml
new file mode 100644
index 0000000..b507d5e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-defaults.xml
@@ -0,0 +1,152 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>spark.yarn.executor.memoryOverhead</name>
+    <value>384</value>
+    <description>
+      The amount of off heap memory (in megabytes) to be allocated per executor.
+      This is memory that accounts for things like VM overheads, interned strings,
+      other native overheads, etc.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.yarn.driver.memoryOverhead</name>
+    <value>384</value>
+    <description>
+      The amount of off heap memory (in megabytes) to be allocated per driver.
+      This is memory that accounts for things like VM overheads, interned strings,
+      other native overheads, etc.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.yarn.scheduler.heartbeat.interval-ms</name>
+    <value>5000</value>
+    <description>
+      The interval in ms in which the Spark application master heartbeats into the YARN ResourceManager.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.yarn.max.executor.failures</name>
+    <value>3</value>
+    <description>
+      The maximum number of executor failures before failing the application.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.yarn.queue</name>
+    <value>default</value>
+    <description>
+      The name of the YARN queue to which the application is submitted.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.yarn.containerLauncherMaxThreads</name>
+    <value>25</value>
+    <description>
+      The maximum number of threads to use in the application master for launching executor containers.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.yarn.submit.file.replication</name>
+    <value>3</value>
+    <description>
+      HDFS replication level for the files uploaded into HDFS for the application.
+      These include things like the Spark jar, the app jar, and any distributed cache files/archives.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.yarn.preserve.staging.files</name>
+    <value>false</value>
+    <description>
+      Set to true to preserve the staged files (Spark jar, app jar, distributed cache files) at the
+      end of the job rather then delete them.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.yarn.services</name>
+    <value>org.apache.spark.deploy.yarn.history.YarnHistoryService</value>
+    <description>
+      Service required for publishing events to the YARN Application Timeline Service.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.history.provider</name>
+    <value>org.apache.spark.deploy.history.yarn.server.YarnHistoryProvider</value>
+    <description>
+      Name of the class implementing the application history backend which publishes to YARN Application Timeline Service.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.history.ui.port</name>
+    <value>18080</value>
+    <description>
+      The port to which the web interface of the History Server binds.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.history.kerberos.principal</name>
+    <value>none</value>
+    <description>
+      Kerberos principal name for the Spark History Server.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.history.kerberos.keytab</name>
+    <value>none</value>
+    <description>
+      Location of the kerberos keytab file for the Spark History Server.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.history.provider</name>
+    <value>org.apache.spark.deploy.yarn.history.YarnHistoryProvider</value>
+    <description>Name of history provider class</description>
+  </property>
+
+  <property>
+    <name>spark.yarn.historyServer.address</name>
+    <value>{{spark_history_server_host}}:{{spark_history_ui_port}}</value>
+    <description>The address of the Spark history server (i.e. host.com:18080). The address should not contain a scheme (http://). Defaults to not being set since the history server is an optional service. This address is given to the YARN ResourceManager when the Spark application finishes to link the application from the ResourceManager UI to the Spark history server UI.</description>
+  </property>
+
+  <property>
+    <name>spark.yarn.max.executor.failures</name>
+    <value>3</value>
+    <description>The maximum number of executor failures before failing the application.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-env.xml b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-env.xml
new file mode 100644
index 0000000..8a5117a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-env.xml
@@ -0,0 +1,127 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>spark_user</name>
+    <display-name>Spark User</display-name>
+    <value>spark</value>
+    <property-type>USER</property-type>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>spark_group</name>
+    <display-name>Spark Group</display-name>
+    <value>spark</value>
+    <property-type>GROUP</property-type>
+    <description>spark group</description>
+    <value-attributes>
+      <type>user</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>spark_log_dir</name>
+    <value>/var/log/spark</value>
+    <description>Spark Log Dir</description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>spark_pid_dir</name>
+    <value>/var/run/spark</value>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+  </property>
+
+  <!-- spark-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for spark-env.sh file</description>
+    <value>
+#!/usr/bin/env bash
+
+# This file is sourced when running various Spark programs.
+# Copy it as spark-env.sh and edit that to configure Spark for your site.
+
+# Options read in YARN client mode
+#SPARK_EXECUTOR_INSTANCES="2" #Number of workers to start (Default: 2)
+#SPARK_EXECUTOR_CORES="1" #Number of cores for the workers (Default: 1).
+#SPARK_EXECUTOR_MEMORY="1G" #Memory per Worker (e.g. 1000M, 2G) (Default: 1G)
+#SPARK_DRIVER_MEMORY="512 Mb" #Memory for Master (e.g. 1000M, 2G) (Default: 512 Mb)
+#SPARK_YARN_APP_NAME="spark" #The name of your application (Default: Spark)
+#SPARK_YARN_QUEUE="~@~Xdefault~@~Y" #The hadoop queue to use for allocation requests (Default: @~Xdefault~@~Y)
+#SPARK_YARN_DIST_FILES="" #Comma separated list of files to be distributed with the job.
+#SPARK_YARN_DIST_ARCHIVES="" #Comma separated list of archives to be distributed with the job.
+
+# Generic options for the daemons used in the standalone deploy mode
+
+# Alternate conf dir. (Default: ${SPARK_HOME}/conf)
+export SPARK_CONF_DIR=${SPARK_CONF_DIR:-{{spark_home}}/conf}
+
+# Where log files are stored.(Default:${SPARK_HOME}/logs)
+#export SPARK_LOG_DIR=${SPARK_HOME:-{{spark_home}}}/logs
+export SPARK_LOG_DIR={{spark_log_dir}}
+
+# Where the pid file is stored. (Default: /tmp)
+export SPARK_PID_DIR={{spark_pid_dir}}
+
+# A string representing this instance of spark.(Default: $USER)
+SPARK_IDENT_STRING=$USER
+
+# The scheduling priority for daemons. (Default: 0)
+SPARK_NICENESS=0
+
+export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+# The java implementation to use.
+export JAVA_HOME={{java_home}}
+
+if [ -d "/etc/tez/conf/" ]; then
+  export TEZ_CONF_DIR=/etc/tez/conf
+else
+  export TEZ_CONF_DIR=
+fi
+
+</value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>spark_thrift_cmd_opts</name>
+    <description>additional spark thrift server commandline options</description>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/bd6eecce/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-log4j-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-log4j-properties.xml b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-log4j-properties.xml
new file mode 100644
index 0000000..9eca965
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-log4j-properties.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>content</name>
+    <description>Spark-log4j-Properties</description>
+    <value>
+# Set everything to be logged to the console
+log4j.rootCategory=INFO, console
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n
+
+# Settings to quiet third party logs that are too verbose
+log4j.logger.org.eclipse.jetty=WARN
+log4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR
+log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
+log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
+
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+</configuration>


Mime
View raw message