ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dmitriu...@apache.org
Subject [05/46] ambari git commit: AMBARI-18739. Perf: Create Rolling and Express Upgrade Packs (dlysnichenko)
Date Fri, 20 Jan 2017 10:19:07 GMT
http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_checkpoint_time.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_checkpoint_time.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_checkpoint_time.py
deleted file mode 100644
index 73bc168..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_checkpoint_time.py
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import logging
-
-from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
-
-ALERT_BEHAVIOUR_TYPE = "{{hdfs-alert-config/alert.behavior.type}}"
-
-ALERT_SUCCESS_PERCENTAGE = "{{hdfs-alert-config/alert.success.percentage}}"
-
-ALERT_TIMEOUT_RETURN_VALUE = "{{hdfs-alert-config/alert.timeout.return.value}}"
-ALERT_TIMEOUT_SECS = "{{hdfs-alert-config/alert.timeout.secs}}"
-
-ALERT_FLIP_INTERVAL_MINS = "{{hdfs-alert-config/alert.flip.interval.mins}}"
-
-logger = logging.getLogger('ambari_alerts')
-
-alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
-                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
-                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
-          ALERT_FLIP_INTERVAL_MINS)
-
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py
deleted file mode 100644
index 73bc168..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import logging
-
-from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
-
-ALERT_BEHAVIOUR_TYPE = "{{hdfs-alert-config/alert.behavior.type}}"
-
-ALERT_SUCCESS_PERCENTAGE = "{{hdfs-alert-config/alert.success.percentage}}"
-
-ALERT_TIMEOUT_RETURN_VALUE = "{{hdfs-alert-config/alert.timeout.return.value}}"
-ALERT_TIMEOUT_SECS = "{{hdfs-alert-config/alert.timeout.secs}}"
-
-ALERT_FLIP_INTERVAL_MINS = "{{hdfs-alert-config/alert.flip.interval.mins}}"
-
-logger = logging.getLogger('ambari_alerts')
-
-alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
-                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
-                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
-          ALERT_FLIP_INTERVAL_MINS)
-
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_nfs_gateway_process.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_nfs_gateway_process.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_nfs_gateway_process.py
deleted file mode 100644
index 73bc168..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_nfs_gateway_process.py
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import logging
-
-from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
-
-ALERT_BEHAVIOUR_TYPE = "{{hdfs-alert-config/alert.behavior.type}}"
-
-ALERT_SUCCESS_PERCENTAGE = "{{hdfs-alert-config/alert.success.percentage}}"
-
-ALERT_TIMEOUT_RETURN_VALUE = "{{hdfs-alert-config/alert.timeout.return.value}}"
-ALERT_TIMEOUT_SECS = "{{hdfs-alert-config/alert.timeout.secs}}"
-
-ALERT_FLIP_INTERVAL_MINS = "{{hdfs-alert-config/alert.flip.interval.mins}}"
-
-logger = logging.getLogger('ambari_alerts')
-
-alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
-                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
-                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
-          ALERT_FLIP_INTERVAL_MINS)
-
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_snamenode_process.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_snamenode_process.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_snamenode_process.py
deleted file mode 100644
index 73bc168..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_snamenode_process.py
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import logging
-
-from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
-
-ALERT_BEHAVIOUR_TYPE = "{{hdfs-alert-config/alert.behavior.type}}"
-
-ALERT_SUCCESS_PERCENTAGE = "{{hdfs-alert-config/alert.success.percentage}}"
-
-ALERT_TIMEOUT_RETURN_VALUE = "{{hdfs-alert-config/alert.timeout.return.value}}"
-ALERT_TIMEOUT_SECS = "{{hdfs-alert-config/alert.timeout.secs}}"
-
-ALERT_FLIP_INTERVAL_MINS = "{{hdfs-alert-config/alert.flip.interval.mins}}"
-
-logger = logging.getLogger('ambari_alerts')
-
-alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
-                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
-                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
-          ALERT_FLIP_INTERVAL_MINS)
-
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_upgrade_finalized.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_upgrade_finalized.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_upgrade_finalized.py
deleted file mode 100644
index 73bc168..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/alerts/alert_upgrade_finalized.py
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import logging
-
-from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
-
-ALERT_BEHAVIOUR_TYPE = "{{hdfs-alert-config/alert.behavior.type}}"
-
-ALERT_SUCCESS_PERCENTAGE = "{{hdfs-alert-config/alert.success.percentage}}"
-
-ALERT_TIMEOUT_RETURN_VALUE = "{{hdfs-alert-config/alert.timeout.return.value}}"
-ALERT_TIMEOUT_SECS = "{{hdfs-alert-config/alert.timeout.secs}}"
-
-ALERT_FLIP_INTERVAL_MINS = "{{hdfs-alert-config/alert.flip.interval.mins}}"
-
-logger = logging.getLogger('ambari_alerts')
-
-alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
-                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
-                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
-          ALERT_FLIP_INTERVAL_MINS)
-
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py
deleted file mode 100644
index 6fc338b..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/datanode.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-# Python Imports
-
-# Local Imports
-from resource_management.libraries.script.dummy import Dummy
-
-
-class DataNode(Dummy):
-  """
-  Dummy script that simulates a slave component.
-  """
-
-  def __init__(self):
-    super(DataNode, self).__init__()
-    self.component_name = "DATANODE"
-    self.principal_conf_name = "hdfs-site"
-    self.principal_name = "dfs.datanode.kerberos.principal"
-    self.keytab_conf_name = "hdfs-site"
-    self.keytab_name = "dfs.datanode.keytab.file"
-
-if __name__ == "__main__":
-  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/hdfs_client.py
deleted file mode 100644
index 4280c6c..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/hdfs_client.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-# Python Imports
-
-# Local Imports
-from resource_management.libraries.script.dummy import Dummy
-
-
-class HdfsClient(Dummy):
-  """
-  Dummy script that simulates a client component.
-  """
-
-  def __init__(self):
-    super(HdfsClient, self).__init__()
-    self.component_name = "HDFS_CLIENT"
-
-if __name__ == "__main__":
-  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py
deleted file mode 100644
index 96be630..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/journalnode.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-# Python Imports
-
-# Local Imports
-from resource_management.libraries.script.dummy import Dummy
-
-
-class JournalNode(Dummy):
-  """
-  Dummy script that simulates a master component.
-  """
-
-  def __init__(self):
-    super(JournalNode, self).__init__()
-    self.component_name = "JOURNALNODE"
-    self.principal_conf_name = "hdfs-site"
-    self.principal_name = "dfs.journalnode.kerberos.principal"
-    self.keytab_conf_name = "hdfs-site"
-    self.keytab_name = "dfs.journalnode.keytab.file"
-
-if __name__ == "__main__":
-  JournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py
deleted file mode 100644
index c3488e8..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/namenode.py
+++ /dev/null
@@ -1,58 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-# Python Imports
-import json
-
-# Local Imports
-from resource_management.libraries.script.dummy import Dummy
-
-
-class NameNode(Dummy):
-  """
-  Dummy script that simulates a master component.
-  """
-
-  def __init__(self):
-    super(NameNode, self).__init__()
-    self.component_name = "NAMENODE"
-    self.principal_conf_name = "hdfs-site"
-    self.principal_name = "dfs.namenode.kerberos.principal"
-    self.keytab_conf_name = "hdfs-site"
-    self.keytab_name = "dfs.namenode.keytab.file"
-
-  def rebalancehdfs(self, env):
-    print "Rebalance HDFS"
-
-    threshold = 10
-    if "namenode" in self.config["commandParams"]:
-      name_node_params = self.config["commandParams"]["namenode"]
-      if name_node_params is not None:
-        name_node_parameters = json.loads(name_node_params)
-        threshold = name_node_parameters['threshold']
-
-    print "Threshold: %s" % str(threshold)
-
-  def decommission(self):
-    print "Rebalance HDFS"
-
-if __name__ == "__main__":
-  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py
deleted file mode 100644
index b750522..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/nfsgateway.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-# Python Imports
-
-# Local Imports
-from resource_management.libraries.script.dummy import Dummy
-
-
-class NFSGateway(Dummy):
-  """
-  Dummy script that simulates a slave component.
-  """
-
-  def __init__(self):
-    super(NFSGateway, self).__init__()
-    self.component_name = "NFS_GATEWAY"
-    self.principal_conf_name = "hdfs-site"
-    self.principal_name = "nfs.kerberos.principal"
-    self.keytab_conf_name = "hdfs-site"
-    self.keytab_name = "nfs.keytab.file"
-
-if __name__ == "__main__":
-  NFSGateway().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/service_check.py
deleted file mode 100644
index 270b082..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/service_check.py
+++ /dev/null
@@ -1,30 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management.libraries.script.script import Script
-
-class ServiceCheck(Script):
-
-  def service_check(self, env):
-    print "Service Check"
-
-if __name__ == "__main__":
-  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py
deleted file mode 100644
index 91ce7da..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/snamenode.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-# Python Imports
-
-# Local Imports
-from resource_management.libraries.script.dummy import Dummy
-
-
-class SNameNode(Dummy):
-  """
-  Dummy script that simulates a slave component.
-  """
-
-  def __init__(self):
-    super(SNameNode, self).__init__()
-    self.component_name = "SECONDARY_NAMENODE"
-    self.principal_conf_name = "hdfs-site"
-    self.principal_name = "dfs.secondary.namenode.kerberos.principal"
-    self.keytab_conf_name = "hdfs-site"
-    self.keytab_name = "dfs.secondary.namenode.keytab.file"
-
-if __name__ == "__main__":
-  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/zkfc_slave.py
deleted file mode 100644
index b431072..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/package/scripts/zkfc_slave.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-# Python Imports
-
-# Local Imports
-from resource_management.libraries.script.dummy import Dummy
-
-
-class ZkfcSlave(Dummy):
-  """
-  Dummy script that simulates a slave component.
-  """
-
-  def __init__(self):
-    super(ZkfcSlave, self).__init__()
-    self.component_name = "ZKFC"
-
-if __name__ == "__main__":
-  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/quicklinks/quicklinks.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/quicklinks/quicklinks.json
deleted file mode 100644
index a4216e3..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/quicklinks/quicklinks.json
+++ /dev/null
@@ -1,76 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol":
-    {
-      "type":"https",
-      "checks":[
-        {
-          "property":"dfs.http.policy",
-          "desired":"HTTPS_ONLY",
-          "site":"hdfs-site"
-        }
-      ]
-    },
-
-    "links": [
-      {
-        "name": "namenode_ui",
-        "label": "NameNode UI",
-        "url":"%@://%@:%@",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "dfs.namenode.http-address",
-          "http_default_port": "50070",
-          "https_property": "dfs.namenode.https-address",
-          "https_default_port": "50470",
-          "regex": "\\w*:(\\d+)",
-          "site": "hdfs-site"
-        }
-      },
-      {
-        "name": "namenode_logs",
-        "label": "NameNode Logs",
-        "url":"%@://%@:%@/logs",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "dfs.namenode.http-address",
-          "http_default_port": "50070",
-          "https_property": "dfs.namenode.https-address",
-          "https_default_port": "50470",
-          "regex": "\\w*:(\\d+)",
-          "site": "hdfs-site"
-        }
-      },
-      {
-        "name": "namenode_jmx",
-        "label": "NameNode JMX",
-        "url":"%@://%@:%@/jmx",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "dfs.namenode.http-address",
-          "http_default_port": "50070",
-          "https_property": "dfs.namenode.https-address",
-          "https_default_port": "50470",
-          "regex": "\\w*:(\\d+)",
-          "site": "hdfs-site"
-        }
-      },
-      {
-        "name": "Thread Stacks",
-        "label": "Thread Stacks",
-        "url":"%@://%@:%@/stacks",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "dfs.namenode.http-address",
-          "http_default_port": "50070",
-          "https_property": "dfs.namenode.https-address",
-          "https_default_port": "50470",
-          "regex": "\\w*:(\\d+)",
-          "site": "hdfs-site"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/themes/theme.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/themes/theme.json
deleted file mode 100644
index 6f2b797..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/themes/theme.json
+++ /dev/null
@@ -1,179 +0,0 @@
-{
-  "name": "default",
-  "description": "Default theme for HDFS service",
-  "configuration": {
-    "layouts": [
-      {
-        "name": "default",
-        "tabs": [
-          {
-            "name": "settings",
-            "display-name": "Settings",
-            "layout": {
-              "tab-columns": "2",
-              "tab-rows": "1",
-              "sections": [
-                {
-                  "name": "section-namenode",
-                  "display-name": "NameNode",
-                  "row-index": "0",
-                  "column-index": "0",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-namenode-col1",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                },
-                {
-                  "name": "section-datanode",
-                  "display-name": "DataNode",
-                  "row-index": "0",
-                  "column-index": "1",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-datanode-col1",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                }
-              ]
-            }
-          }
-        ]
-      }
-    ],
-    "placement": {
-      "configuration-layout": "default",
-      "configs": [
-        {
-          "config": "hdfs-site/dfs.namenode.name.dir",
-          "subsection-name": "subsection-namenode-col1"
-        },
-        {
-          "config": "hadoop-env/namenode_heapsize",
-          "subsection-name": "subsection-namenode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.namenode.handler.count",
-          "subsection-name": "subsection-namenode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
-          "subsection-name": "subsection-namenode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.datanode.data.dir",
-          "subsection-name": "subsection-datanode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
-          "subsection-name": "subsection-datanode-col1"
-        },
-        {
-          "config": "hadoop-env/dtnode_heapsize",
-          "subsection-name": "subsection-datanode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.datanode.max.transfer.threads",
-          "subsection-name": "subsection-datanode-col1"
-        }
-      ]
-    },
-    "widgets": [
-      {
-        "config": "hdfs-site/dfs.namenode.name.dir",
-        "widget": {
-          "type": "directories"
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "percent"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.namenode.handler.count",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hadoop-env/namenode_heapsize",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "GB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.datanode.data.dir",
-        "widget": {
-          "type": "directories"
-        }
-      },
-      {
-        "config": "hadoop-env/dtnode_heapsize",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "GB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.datanode.max.transfer.threads",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      }
-    ]
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/widgets.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/widgets.json
deleted file mode 100644
index 4a645b0..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/widgets.json
+++ /dev/null
@@ -1,649 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_hdfs_dashboard",
-      "display_name": "Standard HDFS Dashboard",
-      "section_name": "HDFS_SUMMARY",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "NameNode GC count",
-          "description": "Count of total garbage collections and count of major type garbage collections of the JVM.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "jvm.JvmMetrics.GcCount._rate",
-              "metric_path": "metrics/jvm/gcCount._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate",
-              "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "GC total count",
-              "value": "${jvm.JvmMetrics.GcCount._rate}"
-            },
-            {
-              "name": "GC count of type major collection",
-              "value": "${jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "NameNode GC time",
-          "description": "Total time taken by major type garbage collections in milliseconds.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate",
-              "metric_path": "metrics/jvm/GcTimeMillisConcurrentMarkSweep._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "GC time in major collection",
-              "value": "${jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate}"
-            }
-          ],
-          "properties": {
-            "display_unit": "ms",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "NN Connection Load",
-          "description": "Number of open RPC connections being managed by NameNode.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "rpc.rpc.client.NumOpenConnections",
-              "metric_path": "metrics/rpc/client/NumOpenConnections",
-              "category": "",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "rpc.rpc.datanode.NumOpenConnections",
-              "metric_path": "metrics/rpc/datanode/NumOpenConnections",
-              "category": "",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "Open Client Connections",
-              "value": "${rpc.rpc.client.NumOpenConnections}"
-            },
-            {
-              "name": "Open Datanode Connections",
-              "value": "${rpc.rpc.datanode.NumOpenConnections}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "NameNode Heap",
-          "description": "Heap memory committed and Heap memory used with respect to time.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "jvm.JvmMetrics.MemHeapCommittedM",
-              "metric_path": "metrics/jvm/memHeapCommittedM",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "jvm.JvmMetrics.MemHeapUsedM",
-              "metric_path": "metrics/jvm/memHeapUsedM",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "JVM heap committed",
-              "value": "${jvm.JvmMetrics.MemHeapCommittedM}"
-            },
-            {
-              "name": "JVM heap used",
-              "value": "${jvm.JvmMetrics.MemHeapUsedM}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "NameNode Host Load",
-          "description": "Percentage of CPU and Memory resources being consumed on NameNode host.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "cpu_system",
-              "metric_path": "metrics/cpu/cpu_system",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "cpu_user",
-              "metric_path": "metrics/cpu/cpu_user",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "cpu_nice",
-              "metric_path": "metrics/cpu/cpu_nice",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "cpu_idle",
-              "metric_path": "metrics/cpu/cpu_idle",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "cpu_wio",
-              "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "mem_total",
-              "metric_path": "metrics/memory/mem_total",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "mem_free",
-              "metric_path": "metrics/memory/mem_free",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "CPU utilization",
-              "value": "${((cpu_system + cpu_user + cpu_nice)/(cpu_system + cpu_user + cpu_nice + cpu_idle + cpu_wio)) * 100}"
-            },
-            {
-              "name": "Memory utilization",
-              "value": "${((mem_total - mem_free)/mem_total) * 100}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1",
-            "display_unit": "%"
-          }
-        },
-        {
-          "widget_name": "NameNode RPC",
-          "description": "Compares the average time spent for RPC request in a queue and RPC request being processed.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "rpc.rpc.client.RpcQueueTimeAvgTime",
-              "metric_path": "metrics/rpc/client/RpcQueueTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "rpc.rpc.client.RpcProcessingTimeAvgTime",
-              "metric_path": "metrics/rpc/client/RpcProcessingTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "rpc.rpc.datanode.RpcQueueTimeAvgTime",
-              "metric_path": "metrics/rpc/datanode/RpcQueueTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "rpc.rpc.datanode.RpcProcessingTimeAvgTime",
-              "metric_path": "metrics/rpc/datanode/RpcProcessingTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "Client RPC Queue Wait time",
-              "value": "${rpc.rpc.client.RpcQueueTimeAvgTime}"
-            },
-            {
-              "name": "Client RPC Processing time",
-              "value": "${rpc.rpc.client.RpcProcessingTimeAvgTime}"
-            },
-            {
-              "name": "Datanode RPC Queue Wait time",
-              "value": "${rpc.rpc.datanode.RpcQueueTimeAvgTime}"
-            },
-            {
-              "name": "Datanode RPC Processing time",
-              "value": "${rpc.rpc.datanode.RpcProcessingTimeAvgTime}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1",
-            "display_unit": "ms"
-          }
-        },
-        {
-          "widget_name": "NameNode Operations",
-          "description": "Rate per second of number of file operation over time.",
-          "widget_type": "GRAPH",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "dfs.namenode.TotalFileOps._rate",
-              "metric_path": "metrics/dfs/namenode/TotalFileOps._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "NameNode File Operations",
-              "value": "${dfs.namenode.TotalFileOps._rate}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Failed disk volumes",
-          "description": "Number of Failed disk volumes across all DataNodes. Its indicative of HDFS bad health.",
-          "widget_type": "NUMBER",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum",
-              "metric_path": "metrics/dfs/datanode/NumFailedVolumes",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Failed disk volumes",
-              "value": "${FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum}"
-            }
-          ],
-          "properties": {
-            "display_unit": ""
-          }
-        },
-        {
-          "widget_name": "Blocks With Corrupted Replicas",
-          "description": "Number represents data blocks with at least one corrupted replica (but not all of them). Its indicative of HDFS bad health.",
-          "widget_type": "NUMBER",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
-              "metric_path": "metrics/dfs/FSNamesystem/CorruptBlocks",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "Blocks With Corrupted Replicas",
-              "value": "${Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks}"
-            }
-          ],
-          "properties": {
-            "warning_threshold": "0",
-            "error_threshold": "50"
-          }
-        },
-        {
-          "widget_name": "Under Replicated Blocks",
-          "description": "Number represents file blocks that does not meet the replication factor criteria. Its indicative of HDFS bad health.",
-          "widget_type": "NUMBER",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
-              "metric_path": "metrics/dfs/FSNamesystem/UnderReplicatedBlocks",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "Under Replicated Blocks",
-              "value": "${Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks}"
-            }
-          ],
-          "properties": {
-            "warning_threshold": "0",
-            "error_threshold": "50"
-          }
-        },
-        {
-          "widget_name": "HDFS Space Utilization",
-          "description": "Percentage of available space used in the DFS.",
-          "widget_type": "GAUGE",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
-              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
-              "metric_path": "metrics/dfs/datanode/Capacity",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "HDFS Space Utilization",
-              "value": "${(FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity}"
-            }
-          ],
-          "properties": {
-            "warning_threshold": "0.75",
-            "error_threshold": "0.9"
-          }
-        }
-      ]
-    },
-    {
-      "layout_name": "default_hdfs_heatmap",
-      "section_name": "HDFS_HEATMAPS",
-      "display_name": "HDFS Heatmaps",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "HDFS Bytes Read",
-          "default_section_name": "HDFS_HEATMAPS",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "dfs.datanode.BytesRead._rate",
-              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "HDFS Bytes Read",
-              "value": "${dfs.datanode.BytesRead._rate}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "1024"
-          }
-        },
-        {
-          "widget_name": "HDFS Bytes Written",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "dfs.datanode.BytesWritten._rate",
-              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "HDFS Bytes Written",
-              "value": "${dfs.datanode.BytesWritten._rate}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "1024"
-          }
-        },
-        {
-          "widget_name": "DataNode Garbage Collection Time",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis",
-              "metric_path": "metrics/jvm/gcTimeMillis",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode Garbage Collection Time",
-              "value": "${Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis}"
-            }
-          ],
-          "properties": {
-            "display_unit": "ms",
-            "max_limit": "10000"
-          }
-        },
-        {
-          "widget_name": "DataNode JVM Heap Memory Used",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM",
-              "metric_path": "metrics/jvm/memHeapUsedM",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode JVM Heap Memory Used",
-              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "512"
-          }
-        },
-        {
-          "widget_name": "DataNode JVM Heap Memory Committed",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM",
-              "metric_path": "metrics/jvm/memHeapCommittedM",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode JVM Heap Memory Committed",
-              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "512"
-          }
-        },
-        {
-          "widget_name": "DataNode Process Disk I/O Utilization",
-          "default_section_name": "HDFS_HEATMAPS",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "dfs.datanode.BytesRead._rate",
-              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.BytesWritten._rate",
-              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.TotalReadTime._rate",
-              "metric_path": "metrics/dfs/datanode/TotalReadTime._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.TotalWriteTime._rate",
-              "metric_path": "metrics/dfs/datanode/TotalWriteTime._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode Process Disk I/O Utilization",
-              "value": "${((dfs.datanode.BytesRead._rate/dfs.datanode.TotalReadTime._rate)+(dfs.datanode.BytesWritten._rate/dfs.datanode.TotalWriteTime._rate))*50}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "DataNode Process Network I/O Utilization",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "dfs.datanode.RemoteBytesRead._rate",
-              "metric_path": "metrics/dfs/datanode/RemoteBytesRead._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.ReadsFromRemoteClient._rate",
-              "metric_path": "metrics/dfs/datanode/reads_from_remote_client._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.RemoteBytesWritten._rate",
-              "metric_path": "metrics/dfs/datanode/RemoteBytesWritten._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.WritesFromRemoteClient._rate",
-              "metric_path": "metrics/dfs/datanode/writes_from_remote_client._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode Process Network I/O Utilization",
-              "value": "${((dfs.datanode.RemoteBytesRead._rate/dfs.datanode.ReadsFromRemoteClient._rate)+(dfs.datanode.RemoteBytesWritten._rate/dfs.datanode.WritesFromRemoteClient._rate))*50}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "HDFS Space Utilization",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
-              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
-              "metric_path": "metrics/dfs/datanode/Capacity",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "HDFS Space Utilization",
-              "value": "${((FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/kerberos.json
index f0bf38c..f22274f 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/kerberos.json
@@ -38,7 +38,7 @@
           "name": "SLEEPY",
           "identities": [
             {
-              "name": "/HDFS/NAMENODE/hdfs"
+              "name": "/FAKEHDFS/FAKENAMENODE/hdfs"
             },
             {
               "name": "sleepy_sleepy",

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/metainfo.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/metainfo.xml
index 5b4dbd4..06a7cdf 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SLEEPY/metainfo.xml
@@ -30,6 +30,7 @@
           <displayName>Sleepy</displayName>
           <category>SLAVE</category>
           <cardinality>0+</cardinality>
+          <versionAdvertised>false</versionAdvertised>
           <commandScript>
             <script>scripts/dwarf.py</script>
             <scriptType>PYTHON</scriptType>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/kerberos.json
index 6ee0af6..7be70f7 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/kerberos.json
@@ -38,7 +38,7 @@
           "name": "SNOW_WHITE",
           "identities": [
             {
-              "name": "/HDFS/NAMENODE/hdfs"
+              "name": "/FAKEHDFS/FAKENAMENODE/hdfs"
             },
             {
               "name": "snow_white_snow",

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/metainfo.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/metainfo.xml
index 21e685a..ff4b62d 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/SNOW/metainfo.xml
@@ -30,6 +30,7 @@
           <displayName>Snow White</displayName>
           <category>MASTER</category>
           <cardinality>1</cardinality>
+          <versionAdvertised>false</versionAdvertised>
           <commandScript>
             <script>scripts/snow_white.py</script>
             <scriptType>PYTHON</scriptType>


Mime
View raw message