ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From alejan...@apache.org
Subject [05/13] ambari git commit: AMBARI-18928. Perf: Add Hadoop Core services to PERF stack (alejandro)
Date Fri, 18 Nov 2016 22:48:43 GMT
http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_checkpoint_time.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_checkpoint_time.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_checkpoint_time.py
new file mode 100644
index 0000000..1e69def
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_checkpoint_time.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+OK_MESSAGE = 'Ok'
+
+HDFS_SITE_KEY = '{{hdfs-site}}'
+
+NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
+NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
+NN_HTTP_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
+NN_CHECKPOINT_TX_KEY = '{{hdfs-site/dfs.namenode.checkpoint.txns}}'
+NN_CHECKPOINT_PERIOD_KEY = '{{hdfs-site/dfs.namenode.checkpoint.period}}'
+
+KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
+KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+SMOKEUSER_KEY = "{{cluster-env/smokeuser}}"
+EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
+
+logger = logging.getLogger('ambari_alerts')
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (HDFS_SITE_KEY, NN_HTTP_ADDRESS_KEY, NN_HTTPS_ADDRESS_KEY, NN_HTTP_POLICY_KEY, EXECUTABLE_SEARCH_PATHS,
+      NN_CHECKPOINT_TX_KEY, NN_CHECKPOINT_PERIOD_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY, SMOKEUSER_KEY)
+  
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if configurations is None:
+    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
+
+  result_code = RESULT_CODE_OK
+  label = OK_MESSAGE
+  return (result_code, [label])
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py
new file mode 100644
index 0000000..8c122ae
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+RESULT_STATE_OK = 'OK'
+RESULT_STATE_WARNING = 'WARNING'
+RESULT_STATE_CRITICAL = 'CRITICAL'
+RESULT_STATE_UNKNOWN = 'UNKNOWN'
+
+OK_MESSAGE = 'Ok'
+
+DFS_DATA_DIR = '{{hdfs-site/dfs.datanode.data.dir}}'
+DATA_DIR_MOUNT_FILE = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
+
+logger = logging.getLogger()
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (DFS_DATA_DIR, DATA_DIR_MOUNT_FILE)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+
+  DataNode directories can be of the following formats and each needs to be supported:
+    /grid/dn/archive0
+    [SSD]/grid/dn/archive0
+    [ARCHIVE]file:///grid/dn/archive0
+  """
+  warnings = []
+  errors = []
+
+  if configurations is None:
+    return (RESULT_STATE_UNKNOWN, ['There were no configurations supplied to the script.'])
+
+  # Check required properties
+  if DFS_DATA_DIR not in configurations:
+    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(DFS_DATA_DIR)])
+
+  dfs_data_dir = configurations[DFS_DATA_DIR]
+
+  if dfs_data_dir is None:
+    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script and the value is null'.format(DFS_DATA_DIR)])
+
+  result_code = RESULT_STATE_OK
+  label = OK_MESSAGE
+  return (result_code, [label])
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_ha_namenode_health.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_ha_namenode_health.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_ha_namenode_health.py
new file mode 100644
index 0000000..7f03d88
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_ha_namenode_health.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+RESULT_STATE_OK = 'OK'
+RESULT_STATE_CRITICAL = 'CRITICAL'
+RESULT_STATE_UNKNOWN = 'UNKNOWN'
+RESULT_STATE_SKIPPED = 'SKIPPED'
+
+OK_MESSAGE = 'Ok'
+
+HDFS_SITE_KEY = '{{hdfs-site}}'
+NAMESERVICE_KEY = '{{hdfs-site/dfs.internal.nameservices}}'
+NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
+NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
+DFS_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
+
+KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
+KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
+
+logger = logging.getLogger('ambari_alerts')
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (HDFS_SITE_KEY, NAMESERVICE_KEY, NN_HTTP_ADDRESS_KEY, EXECUTABLE_SEARCH_PATHS,
+  NN_HTTPS_ADDRESS_KEY, DFS_POLICY_KEY, SMOKEUSER_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY)
+  
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+  if configurations is None:
+    return (RESULT_STATE_UNKNOWN, ['There were no configurations supplied to the script.'])
+
+  # if not in HA mode, then SKIP
+  if not NAMESERVICE_KEY in configurations:
+    return (RESULT_STATE_SKIPPED, ['NameNode HA is not enabled'])
+
+  # hdfs-site is required
+  if not HDFS_SITE_KEY in configurations:
+    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
+
+  result_code = RESULT_STATE_OK
+  label = OK_MESSAGE
+  return (result_code, [label])

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_metrics_deviation.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_metrics_deviation.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_metrics_deviation.py
new file mode 100644
index 0000000..0946c85
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_metrics_deviation.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+RESULT_STATE_OK = 'OK'
+RESULT_STATE_CRITICAL = 'CRITICAL'
+RESULT_STATE_WARNING = 'WARNING'
+RESULT_STATE_UNKNOWN = 'UNKNOWN'
+RESULT_STATE_SKIPPED = 'SKIPPED'
+
+OK_MESSAGE = 'Ok'
+
+HDFS_SITE_KEY = '{{hdfs-site}}'
+NAMESERVICE_KEY = '{{hdfs-site/dfs.internal.nameservices}}'
+NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
+NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
+DFS_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
+
+KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
+KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
+
+METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY = '{{ams-site/timeline.metrics.service.webapp.address}}'
+METRICS_COLLECTOR_VIP_HOST_KEY = '{{cluster-env/metrics_collector_vip_host}}'
+METRICS_COLLECTOR_VIP_PORT_KEY = '{{cluster-env/metrics_collector_vip_port}}'
+
+logger = logging.getLogger()
+
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (HDFS_SITE_KEY, NAMESERVICE_KEY, NN_HTTP_ADDRESS_KEY, DFS_POLICY_KEY,
+          EXECUTABLE_SEARCH_PATHS, NN_HTTPS_ADDRESS_KEY, SMOKEUSER_KEY,
+          KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY,
+          METRICS_COLLECTOR_VIP_HOST_KEY, METRICS_COLLECTOR_VIP_PORT_KEY,
+          METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY)
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations : a mapping of configuration key to value
+  parameters : a mapping of script parameter key to value
+  host_name : the name of this host where the alert is running
+
+  :type configurations dict
+  :type parameters dict
+  :type host_name str
+  """
+
+  #parse configuration
+  if configurations is None:
+    return (RESULT_STATE_UNKNOWN, ['There were no configurations supplied to the script.'])
+
+  # hdfs-site is required
+  if not HDFS_SITE_KEY in configurations:
+    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
+
+  result_code = RESULT_STATE_OK
+  label = OK_MESSAGE
+  return (result_code, [label])
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_upgrade_finalized.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_upgrade_finalized.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_upgrade_finalized.py
new file mode 100644
index 0000000..0dff200
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_upgrade_finalized.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import logging
+
+RESULT_STATE_OK = 'OK'
+
+OK_MESSAGE = 'Ok'
+
+NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
+NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
+NN_HTTP_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
+
+HDFS_SITE_KEY = '{{hdfs-site}}'
+KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
+KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+SMOKEUSER_KEY = "{{cluster-env/smokeuser}}"
+EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
+
+logger = logging.getLogger('ambari_alerts')
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+
+  :rtype tuple
+  """
+  return (HDFS_SITE_KEY, NN_HTTP_ADDRESS_KEY, NN_HTTPS_ADDRESS_KEY, NN_HTTP_POLICY_KEY, EXECUTABLE_SEARCH_PATHS,
+          KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY, SMOKEUSER_KEY)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations : a mapping of configuration key to value
+  parameters : a mapping of script parameter key to value
+  host_name : the name of this host where the alert is running
+
+  :type configurations dict
+  :type parameters dict
+  :type host_name str
+  """
+
+  if configurations is None:
+    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
+
+  # hdfs-site is required
+  if not HDFS_SITE_KEY in configurations:
+    return 'SKIPPED', ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)]
+
+  result_code = RESULT_STATE_OK
+  label = OK_MESSAGE
+  return (result_code, [label])
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py
new file mode 100644
index 0000000..36edc31
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py
@@ -0,0 +1,38 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+
+# Local Imports
+from resource_management.libraries.script.dummy import Dummy
+
+
+class DataNode(Dummy):
+  """
+  Dummy script that simulates a slave component.
+  """
+
+  def __init__(self):
+    super(DataNode, self).__init__()
+    self.component_name = "DATANODE"
+
+if __name__ == "__main__":
+  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/hdfs_client.py
new file mode 100644
index 0000000..4280c6c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/hdfs_client.py
@@ -0,0 +1,38 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+
+# Local Imports
+from resource_management.libraries.script.dummy import Dummy
+
+
+class HdfsClient(Dummy):
+  """
+  Dummy script that simulates a client component.
+  """
+
+  def __init__(self):
+    super(HdfsClient, self).__init__()
+    self.component_name = "HDFS_CLIENT"
+
+if __name__ == "__main__":
+  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py
new file mode 100644
index 0000000..1ad13b7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py
@@ -0,0 +1,38 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+
+# Local Imports
+from resource_management.libraries.script.dummy import Dummy
+
+
+class JournalNode(Dummy):
+  """
+  Dummy script that simulates a master component.
+  """
+
+  def __init__(self):
+    super(JournalNode, self).__init__()
+    self.component_name = "JOURNALNODE"
+
+if __name__ == "__main__":
+  JournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py
new file mode 100644
index 0000000..ded09cb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py
@@ -0,0 +1,54 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+import json
+
+# Local Imports
+from resource_management.libraries.script.dummy import Dummy
+
+
+class NameNode(Dummy):
+  """
+  Dummy script that simulates a master component.
+  """
+
+  def __init__(self):
+    super(NameNode, self).__init__()
+    self.component_name = "NAMENODE"
+
+  def rebalancehdfs(self, env):
+    print "Rebalance HDFS"
+
+    threshold = 10
+    if "namenode" in self.config["commandParams"]:
+      name_node_params = self.config["commandParams"]["namenode"]
+      if name_node_params is not None:
+        name_node_parameters = json.loads(name_node_params)
+        threshold = name_node_parameters['threshold']
+
+    print "Threshold: %s" % str(threshold)
+
+  def decommission(self):
+    print "Rebalance HDFS"
+
+if __name__ == "__main__":
+  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py
new file mode 100644
index 0000000..ab9855d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py
@@ -0,0 +1,38 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+
+# Local Imports
+from resource_management.libraries.script.dummy import Dummy
+
+
+class NFSGateway(Dummy):
+  """
+  Dummy script that simulates a slave component.
+  """
+
+  def __init__(self):
+    super(NFSGateway, self).__init__()
+    self.component_name = "NFS_GATEWAY"
+
+if __name__ == "__main__":
+  NFSGateway().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/service_check.py
new file mode 100644
index 0000000..270b082
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/service_check.py
@@ -0,0 +1,30 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+
+class ServiceCheck(Script):
+
+  def service_check(self, env):
+    print "Service Check"
+
+if __name__ == "__main__":
+  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py
new file mode 100644
index 0000000..8815aa3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py
@@ -0,0 +1,38 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+
+# Local Imports
+from resource_management.libraries.script.dummy import Dummy
+
+
+class SNameNode(Dummy):
+  """
+  Dummy script that simulates a slave component.
+  """
+
+  def __init__(self):
+    super(SNameNode, self).__init__()
+    self.component_name = "SECONDARY_NAMENODE"
+
+if __name__ == "__main__":
+  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/zkfc_slave.py
new file mode 100644
index 0000000..b431072
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/zkfc_slave.py
@@ -0,0 +1,38 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+
+# Local Imports
+from resource_management.libraries.script.dummy import Dummy
+
+
+class ZkfcSlave(Dummy):
+  """
+  Dummy script that simulates a slave component.
+  """
+
+  def __init__(self):
+    super(ZkfcSlave, self).__init__()
+    self.component_name = "ZKFC"
+
+if __name__ == "__main__":
+  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/quicklinks/quicklinks.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/quicklinks/quicklinks.json
new file mode 100644
index 0000000..a4216e3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/quicklinks/quicklinks.json
@@ -0,0 +1,76 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"dfs.http.policy",
+          "desired":"HTTPS_ONLY",
+          "site":"hdfs-site"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "namenode_ui",
+        "label": "NameNode UI",
+        "url":"%@://%@:%@",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      },
+      {
+        "name": "namenode_logs",
+        "label": "NameNode Logs",
+        "url":"%@://%@:%@/logs",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      },
+      {
+        "name": "namenode_jmx",
+        "label": "NameNode JMX",
+        "url":"%@://%@:%@/jmx",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      },
+      {
+        "name": "Thread Stacks",
+        "label": "Thread Stacks",
+        "url":"%@://%@:%@/stacks",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/themes/theme.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/themes/theme.json
new file mode 100644
index 0000000..6f2b797
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/themes/theme.json
@@ -0,0 +1,179 @@
+{
+  "name": "default",
+  "description": "Default theme for HDFS service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "2",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-namenode",
+                  "display-name": "NameNode",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-namenode-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-datanode",
+                  "display-name": "DataNode",
+                  "row-index": "0",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-datanode-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "hdfs-site/dfs.namenode.name.dir",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hadoop-env/namenode_heapsize",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.namenode.handler.count",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.data.dir",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hadoop-env/dtnode_heapsize",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.max.transfer.threads",
+          "subsection-name": "subsection-datanode-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "hdfs-site/dfs.namenode.name.dir",
+        "widget": {
+          "type": "directories"
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.namenode.handler.count",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hadoop-env/namenode_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.data.dir",
+        "widget": {
+          "type": "directories"
+        }
+      },
+      {
+        "config": "hadoop-env/dtnode_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.max.transfer.threads",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/widgets.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/widgets.json
new file mode 100644
index 0000000..4a645b0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/widgets.json
@@ -0,0 +1,649 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_hdfs_dashboard",
+      "display_name": "Standard HDFS Dashboard",
+      "section_name": "HDFS_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "NameNode GC count",
+          "description": "Count of total garbage collections and count of major type garbage collections of the JVM.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.GcCount._rate",
+              "metric_path": "metrics/jvm/gcCount._rate",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate",
+              "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep._rate",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "GC total count",
+              "value": "${jvm.JvmMetrics.GcCount._rate}"
+            },
+            {
+              "name": "GC count of type major collection",
+              "value": "${jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode GC time",
+          "description": "Total time taken by major type garbage collections in milliseconds.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate",
+              "metric_path": "metrics/jvm/GcTimeMillisConcurrentMarkSweep._rate",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "GC time in major collection",
+              "value": "${jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NN Connection Load",
+          "description": "Number of open RPC connections being managed by NameNode.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "rpc.rpc.client.NumOpenConnections",
+              "metric_path": "metrics/rpc/client/NumOpenConnections",
+              "category": "",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.datanode.NumOpenConnections",
+              "metric_path": "metrics/rpc/datanode/NumOpenConnections",
+              "category": "",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Open Client Connections",
+              "value": "${rpc.rpc.client.NumOpenConnections}"
+            },
+            {
+              "name": "Open Datanode Connections",
+              "value": "${rpc.rpc.datanode.NumOpenConnections}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode Heap",
+          "description": "Heap memory committed and Heap memory used with respect to time.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.MemHeapCommittedM",
+              "metric_path": "metrics/jvm/memHeapCommittedM",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "JVM heap committed",
+              "value": "${jvm.JvmMetrics.MemHeapCommittedM}"
+            },
+            {
+              "name": "JVM heap used",
+              "value": "${jvm.JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode Host Load",
+          "description": "Percentage of CPU and Memory resources being consumed on NameNode host.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system",
+              "metric_path": "metrics/cpu/cpu_system",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_user",
+              "metric_path": "metrics/cpu/cpu_user",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_nice",
+              "metric_path": "metrics/cpu/cpu_nice",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_idle",
+              "metric_path": "metrics/cpu/cpu_idle",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system + cpu_user + cpu_nice)/(cpu_system + cpu_user + cpu_nice + cpu_idle + cpu_wio)) * 100}"
+            },
+            {
+              "name": "Memory utilization",
+              "value": "${((mem_total - mem_free)/mem_total) * 100}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        },
+        {
+          "widget_name": "NameNode RPC",
+          "description": "Compares the average time spent for RPC request in a queue and RPC request being processed.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "rpc.rpc.client.RpcQueueTimeAvgTime",
+              "metric_path": "metrics/rpc/client/RpcQueueTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.client.RpcProcessingTimeAvgTime",
+              "metric_path": "metrics/rpc/client/RpcProcessingTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.datanode.RpcQueueTimeAvgTime",
+              "metric_path": "metrics/rpc/datanode/RpcQueueTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.datanode.RpcProcessingTimeAvgTime",
+              "metric_path": "metrics/rpc/datanode/RpcProcessingTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Client RPC Queue Wait time",
+              "value": "${rpc.rpc.client.RpcQueueTimeAvgTime}"
+            },
+            {
+              "name": "Client RPC Processing time",
+              "value": "${rpc.rpc.client.RpcProcessingTimeAvgTime}"
+            },
+            {
+              "name": "Datanode RPC Queue Wait time",
+              "value": "${rpc.rpc.datanode.RpcQueueTimeAvgTime}"
+            },
+            {
+              "name": "Datanode RPC Processing time",
+              "value": "${rpc.rpc.datanode.RpcProcessingTimeAvgTime}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "ms"
+          }
+        },
+        {
+          "widget_name": "NameNode Operations",
+          "description": "Rate per second of number of file operation over time.",
+          "widget_type": "GRAPH",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.namenode.TotalFileOps._rate",
+              "metric_path": "metrics/dfs/namenode/TotalFileOps._rate",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "NameNode File Operations",
+              "value": "${dfs.namenode.TotalFileOps._rate}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Failed disk volumes",
+          "description": "Number of Failed disk volumes across all DataNodes. Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum",
+              "metric_path": "metrics/dfs/datanode/NumFailedVolumes",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Failed disk volumes",
+              "value": "${FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum}"
+            }
+          ],
+          "properties": {
+            "display_unit": ""
+          }
+        },
+        {
+          "widget_name": "Blocks With Corrupted Replicas",
+          "description": "Number represents data blocks with at least one corrupted replica (but not all of them). Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
+              "metric_path": "metrics/dfs/FSNamesystem/CorruptBlocks",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Blocks With Corrupted Replicas",
+              "value": "${Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0",
+            "error_threshold": "50"
+          }
+        },
+        {
+          "widget_name": "Under Replicated Blocks",
+          "description": "Number represents file blocks that does not meet the replication factor criteria. Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
+              "metric_path": "metrics/dfs/FSNamesystem/UnderReplicatedBlocks",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Under Replicated Blocks",
+              "value": "${Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0",
+            "error_threshold": "50"
+          }
+        },
+        {
+          "widget_name": "HDFS Space Utilization",
+          "description": "Percentage of available space used in the DFS.",
+          "widget_type": "GAUGE",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
+              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
+              "metric_path": "metrics/dfs/datanode/Capacity",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Space Utilization",
+              "value": "${(FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0.75",
+            "error_threshold": "0.9"
+          }
+        }
+      ]
+    },
+    {
+      "layout_name": "default_hdfs_heatmap",
+      "section_name": "HDFS_HEATMAPS",
+      "display_name": "HDFS Heatmaps",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "HDFS Bytes Read",
+          "default_section_name": "HDFS_HEATMAPS",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesRead._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Bytes Read",
+              "value": "${dfs.datanode.BytesRead._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "1024"
+          }
+        },
+        {
+          "widget_name": "HDFS Bytes Written",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesWritten._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Bytes Written",
+              "value": "${dfs.datanode.BytesWritten._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "1024"
+          }
+        },
+        {
+          "widget_name": "DataNode Garbage Collection Time",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis",
+              "metric_path": "metrics/jvm/gcTimeMillis",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Garbage Collection Time",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "max_limit": "10000"
+          }
+        },
+        {
+          "widget_name": "DataNode JVM Heap Memory Used",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode JVM Heap Memory Used",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "DataNode JVM Heap Memory Committed",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM",
+              "metric_path": "metrics/jvm/memHeapCommittedM",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode JVM Heap Memory Committed",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "DataNode Process Disk I/O Utilization",
+          "default_section_name": "HDFS_HEATMAPS",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesRead._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.BytesWritten._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.TotalReadTime._rate",
+              "metric_path": "metrics/dfs/datanode/TotalReadTime._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.TotalWriteTime._rate",
+              "metric_path": "metrics/dfs/datanode/TotalWriteTime._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Process Disk I/O Utilization",
+              "value": "${((dfs.datanode.BytesRead._rate/dfs.datanode.TotalReadTime._rate)+(dfs.datanode.BytesWritten._rate/dfs.datanode.TotalWriteTime._rate))*50}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "DataNode Process Network I/O Utilization",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.RemoteBytesRead._rate",
+              "metric_path": "metrics/dfs/datanode/RemoteBytesRead._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.ReadsFromRemoteClient._rate",
+              "metric_path": "metrics/dfs/datanode/reads_from_remote_client._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.RemoteBytesWritten._rate",
+              "metric_path": "metrics/dfs/datanode/RemoteBytesWritten._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.WritesFromRemoteClient._rate",
+              "metric_path": "metrics/dfs/datanode/writes_from_remote_client._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Process Network I/O Utilization",
+              "value": "${((dfs.datanode.RemoteBytesRead._rate/dfs.datanode.ReadsFromRemoteClient._rate)+(dfs.datanode.RemoteBytesWritten._rate/dfs.datanode.WritesFromRemoteClient._rate))*50}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "HDFS Space Utilization",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
+              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
+              "metric_path": "metrics/dfs/datanode/Capacity",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Space Utilization",
+              "value": "${((FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-site.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-site.xml
new file mode 100644
index 0000000..7d521b8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/configuration/sleepy-site.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>success.percentage</name>
+    <value>100</value>
+    <description>The success percentage of any operation.</description>
+    <display-name>Success percentage</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>100</maximum>
+      <increment-step>10</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/metainfo.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/metainfo.xml
new file mode 100644
index 0000000..470d809
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/metainfo.xml
@@ -0,0 +1,57 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SLEEPY</name>
+      <displayName>Sleepy</displayName>
+      <comment>Timesout with some percentage</comment>
+      <version>1.0</version>
+      <components>
+
+        <component>
+          <name>SLEEPY</name>
+          <displayName>Sleepy</displayName>
+          <category>SLAVE</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/dwarf.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <restartRequiredAfterChange>true</restartRequiredAfterChange>
+
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/dwarf.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/dwarf.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/dwarf.py
new file mode 100644
index 0000000..370d03d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/dwarf.py
@@ -0,0 +1,38 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+
+# Local Imports
+from resource_management.libraries.script.dummy import Dummy
+
+
+class Sleepy(Dummy):
+  """
+  Dummy script that simulates a slave component.
+  """
+
+  def __init__(self):
+    super(Sleepy, self).__init__()
+    self.component_name = "SLEEPY"
+
+if __name__ == "__main__":
+  Sleepy().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/service_check.py
new file mode 100644
index 0000000..270b082
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/package/scripts/service_check.py
@@ -0,0 +1,30 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+
+class ServiceCheck(Script):
+
+  def service_check(self, env):
+    print "Service Check"
+
+if __name__ == "__main__":
+  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/08ca96e3/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/themes/theme.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/themes/theme.json
new file mode 100644
index 0000000..ae78184
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/themes/theme.json
@@ -0,0 +1,65 @@
+{
+  "name": "default",
+  "description": "Default theme",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-general",
+                  "display-name": "General",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-general",
+                      "display-name": "Features",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "sleepy-site/success.percentage",
+          "subsection-name": "subsection-general"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "sleepy-site/success.percentage",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}


Mime
View raw message