ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dmitriu...@apache.org
Subject [32/46] ambari git commit: AMBARI-18739. Perf: Create Rolling and Express Upgrade Packs (dlysnichenko)
Date Fri, 20 Jan 2017 10:19:34 GMT
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_master_process.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_master_process.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_master_process.py
deleted file mode 100644
index 7662c1f..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_master_process.py
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import logging
-
-from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
-
-ALERT_BEHAVIOUR_TYPE = "{{hbase-alert-config/alert.behavior.type}}"
-
-ALERT_SUCCESS_PERCENTAGE = "{{hbase-alert-config/alert.success.percentage}}"
-
-ALERT_TIMEOUT_RETURN_VALUE = "{{hbase-alert-config/alert.timeout.return.value}}"
-ALERT_TIMEOUT_SECS = "{{hbase-alert-config/alert.timeout.secs}}"
-
-ALERT_FLIP_INTERVAL_MINS = "{{hbase-alert-config/alert.flip.interval.mins}}"
-
-logger = logging.getLogger('ambari_alerts')
-
-alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
-                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
-                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
-          ALERT_FLIP_INTERVAL_MINS)
-
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_regionserver_process.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_regionserver_process.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_regionserver_process.py
deleted file mode 100644
index 7662c1f..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/alerts/hbase_regionserver_process.py
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import logging
-
-from resource_management.libraries.functions.simulate_perf_cluster_alert_behaviour import simulate_perf_cluster_alert_behaviour
-
-ALERT_BEHAVIOUR_TYPE = "{{hbase-alert-config/alert.behavior.type}}"
-
-ALERT_SUCCESS_PERCENTAGE = "{{hbase-alert-config/alert.success.percentage}}"
-
-ALERT_TIMEOUT_RETURN_VALUE = "{{hbase-alert-config/alert.timeout.return.value}}"
-ALERT_TIMEOUT_SECS = "{{hbase-alert-config/alert.timeout.secs}}"
-
-ALERT_FLIP_INTERVAL_MINS = "{{hbase-alert-config/alert.flip.interval.mins}}"
-
-logger = logging.getLogger('ambari_alerts')
-
-alert_behaviour_properties = {"alert_behaviour_type" : ALERT_BEHAVIOUR_TYPE, "alert_success_percentage" : ALERT_SUCCESS_PERCENTAGE,
-                              "alert_timeout_return_value" : ALERT_TIMEOUT_RETURN_VALUE, "alert_timeout_secs" : ALERT_TIMEOUT_SECS,
-                              "alert_flip_interval_mins" : ALERT_FLIP_INTERVAL_MINS}
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (ALERT_BEHAVIOUR_TYPE, ALERT_SUCCESS_PERCENTAGE, ALERT_TIMEOUT_RETURN_VALUE, ALERT_TIMEOUT_SECS,
-          ALERT_FLIP_INTERVAL_MINS)
-
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  return simulate_perf_cluster_alert_behaviour(alert_behaviour_properties, configurations)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_client.py
deleted file mode 100644
index c4943fe..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_client.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-# Python Imports
-
-# Local Imports
-from resource_management.libraries.script.dummy import Dummy
-
-
-class HBaseClient(Dummy):
-  """
-  Dummy script that simulates a client component.
-  """
-
-  def __init__(self):
-    super(HBaseClient, self).__init__()
-    self.component_name = "HBASE_CLIENT"
-
-if __name__ == "__main__":
-  HBaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_master.py
deleted file mode 100644
index 3761fcf..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_master.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-# Python Imports
-
-# Local Imports
-from resource_management.libraries.script.dummy import Dummy
-
-
-class HBaseMaster(Dummy):
-  """
-  Dummy script that simulates a master component.
-  """
-
-  def __init__(self):
-    super(HBaseMaster, self).__init__()
-    self.component_name = "HBASE_MASTER"
-    self.principal_conf_name = "hbase-site"
-    self.principal_name = "hbase.master.kerberos.principal"
-    self.keytab_conf_name = "hbase-site"
-    self.keytab_name = "hbase.master.keytab.file"
-
-  def decommission(self, env):
-    print "Decommission"
-
-if __name__ == "__main__":
-  HBaseMaster().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_regionserver.py
deleted file mode 100644
index 101c36f..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/hbase_regionserver.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-# Python Imports
-
-# Local Imports
-from resource_management.libraries.script.dummy import Dummy
-
-
-class HBaseRegionServer(Dummy):
-  """
-  Dummy script that simulates a slave component.
-  """
-
-  def __init__(self):
-    super(HBaseRegionServer, self).__init__()
-    self.component_name = "HBASE_REGIONSERVER"
-    self.principal_conf_name = "hbase-site"
-    self.principal_name = "hbase.regionserver.kerberos.principal"
-    self.keytab_conf_name = "hbase-site"
-    self.keytab_name = "hbase.regionserver.keytab.file"
-
-  def decommission(self, env):
-    print "Decommission"
-
-if __name__ == "__main__":
-  HBaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/phoenix_queryserver.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/phoenix_queryserver.py
deleted file mode 100644
index 76a49d9..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/phoenix_queryserver.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-# Python Imports
-
-# Local Imports
-from resource_management.libraries.script.dummy import Dummy
-
-
-class PhoenixQueryServer(Dummy):
-  """
-  Dummy script that simulates a slave component.
-  """
-
-  def __init__(self):
-    super(PhoenixQueryServer, self).__init__()
-    self.component_name = "PHOENIX_QUERY_SERVER"
-    self.principal_conf_name = "hbase-site"
-    self.principal_name = "phoenix.queryserver.kerberos.principal"
-    self.keytab_conf_name = "hbase-site"
-    self.keytab_name = "phoenix.queryserver.keytab.file"
-
-if __name__ == "__main__":
-  PhoenixQueryServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/service_check.py
deleted file mode 100644
index 66571f8..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/package/scripts/service_check.py
+++ /dev/null
@@ -1,30 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management.libraries.script.script import Script
-
-class ServiceCheck(Script):
-
-  def service_check(self, env):
-    print "Service Check"
-
-if __name__ == "__main__":
-  ServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/quicklinks/quicklinks.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/quicklinks/quicklinks.json
deleted file mode 100644
index 5568122..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/quicklinks/quicklinks.json
+++ /dev/null
@@ -1,97 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol":
-    {
-      "type":"http"
-    },
-
-    "links": [
-      {
-        "name": "hbase_master_ui",
-        "label": "HBase Master UI",
-        "url":"%@://%@:%@/master-status",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "hbase.master.info.port",
-          "http_default_port": "60010",
-          "https_property": "hbase.master.info.port",
-          "https_default_port": "60443",
-          "regex": "",
-          "site": "hbase-site"
-        }
-      },
-      {
-        "name": "hbase_logs",
-        "label": "HBase Logs",
-        "url":"%@://%@:%@/logs",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "hbase.master.info.port",
-          "http_default_port": "60010",
-          "https_property": "hbase.master.info.port",
-          "https_default_port": "60443",
-          "regex": "",
-          "site": "hbase-site"
-        }
-      },
-      {
-        "name": "zookeeper_info",
-        "label": "Zookeeper Info",
-        "url":"%@://%@:%@/zk.jsp",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "hbase.master.info.port",
-          "http_default_port": "60010",
-          "https_property": "hbase.master.info.port",
-          "https_default_port": "60443",
-          "regex": "",
-          "site": "hbase-site"
-        }
-      },
-      {
-        "name": "hbase_master_jmx",
-        "label": "HBase Master JMX",
-        "url":"%@://%@:%@/jmx",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "hbase.master.info.port",
-          "http_default_port": "60010",
-          "https_property": "hbase.master.info.port",
-          "https_default_port": "60443",
-          "regex": "",
-          "site": "hbase-site"
-        }
-      },
-      {
-        "name": "debug_dump",
-        "label": "Debug Dump",
-        "url":"%@://%@:%@/dump",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "hbase.master.info.port",
-          "http_default_port": "60010",
-          "https_property": "hbase.master.info.port",
-          "https_default_port": "60443",
-          "regex": "",
-          "site": "hbase-site"
-        }
-      },
-      {
-        "name": "thread_stacks",
-        "label": "Thread Stacks",
-        "url":"%@://%@:%@/stacks",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "hbase.master.info.port",
-          "http_default_port": "60010",
-          "https_property": "hbase.master.info.port",
-          "https_default_port": "60443",
-          "regex": "",
-          "site": "hbase-site"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/themes/theme.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/themes/theme.json
deleted file mode 100644
index 7a5732b..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/themes/theme.json
+++ /dev/null
@@ -1,411 +0,0 @@
-{
-  "name": "default",
-  "description": "Default theme for HBASE service",
-  "configuration": {
-    "layouts": [
-      {
-        "name": "default",
-        "tabs": [
-          {
-            "name": "settings",
-            "display-name": "Settings",
-            "layout": {
-              "tab-columns": "3",
-              "tab-rows": "3",
-              "sections": [
-                {
-                  "name": "section-hbase-memory",
-                  "display-name": "Server",
-                  "row-index": "0",
-                  "column-index": "0",
-                  "row-span": "1",
-                  "column-span": "2",
-                  "section-columns": "2",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-hbase-memory-col1",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    },
-                    {
-                      "name": "subsection-hbase-memory-col2",
-                      "row-index": "0",
-                      "column-index": "1",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                },
-                {
-                  "name": "section-hbase-client",
-                  "display-name": "Client",
-                  "row-index": "0",
-                  "column-index": "2",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-hbase-client-col1",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                },
-                {
-                  "name": "section-hbase-disk",
-                  "display-name": "Disk",
-                  "row-index": "1",
-                  "column-index": "0",
-                  "row-span": "1",
-                  "column-span": "3",
-                  "section-columns": "3",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-hbase-disk-col1",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    },
-                    {
-                      "name": "subsection-hbase-disk-col2",
-                      "row-index": "0",
-                      "column-index": "1",
-                      "row-span": "1",
-                      "column-span": "1"
-                    },
-                    {
-                      "name": "subsection-hbase-disk-col3",
-                      "row-index": "0",
-                      "column-index": "2",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                },
-                {
-                  "name": "section-hbase-timeouts",
-                  "display-name": "Timeouts",
-                  "row-index": "2",
-                  "column-index": "0",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-hbase-timeouts-col1",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                },
-                {
-                  "name": "section-hbase-security",
-                  "display-name": "Security",
-                  "row-index": "2",
-                  "column-index": "1",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-hbase-security-col1",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                },
-                {
-                  "name": "section-hbase-phoenix",
-                  "display-name": "Phoenix SQL",
-                  "row-index": "2",
-                  "column-index": "2",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-hbase-phoenix-col1",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                }
-              ]
-            }
-          }
-        ]
-      }
-    ],
-    "placement": {
-      "configuration-layout": "default",
-      "configs": [
-        {
-          "config": "hbase-env/hbase_master_heapsize",
-          "subsection-name": "subsection-hbase-memory-col1"
-        },
-        {
-          "config": "hbase-env/hbase_regionserver_heapsize",
-          "subsection-name": "subsection-hbase-memory-col1"
-        },
-        {
-          "config": "hbase-site/hfile.block.cache.size",
-          "subsection-name": "subsection-hbase-memory-col1"
-        },
-        {
-          "config": "hbase-site/hbase.regionserver.global.memstore.upperLimit",
-          "subsection-name": "subsection-hbase-memory-col1"
-        },
-        {
-          "config": "hbase-site/hbase.hregion.memstore.flush.size",
-          "subsection-name": "subsection-hbase-memory-col2"
-        },
-        {
-          "config": "hbase-site/hbase.hregion.memstore.block.multiplier",
-          "subsection-name": "subsection-hbase-memory-col2"
-        },
-        {
-          "config": "hbase-site/hbase.regionserver.handler.count",
-          "subsection-name": "subsection-hbase-memory-col2"
-        },
-        {
-          "config": "hbase-site/hbase.client.retries.number",
-          "subsection-name": "subsection-hbase-client-col1"
-        },
-        {
-          "config": "hbase-site/hbase.client.keyvalue.maxsize",
-          "subsection-name": "subsection-hbase-client-col1"
-        },
-        {
-          "config": "hbase-site/hbase.hregion.max.filesize",
-          "subsection-name": "subsection-hbase-disk-col1"
-        },
-        {
-          "config": "hbase-site/hbase.hregion.majorcompaction",
-          "subsection-name": "subsection-hbase-disk-col2"
-        },
-        {
-          "config": "hbase-site/hbase.hstore.compaction.max",
-          "subsection-name": "subsection-hbase-disk-col3"
-        },
-        {
-          "config": "hbase-site/zookeeper.session.timeout",
-          "subsection-name": "subsection-hbase-timeouts-col1"
-        },
-        {
-          "config": "hbase-site/hbase.rpc.timeout",
-          "subsection-name": "subsection-hbase-timeouts-col1"
-        },
-        {
-          "config": "hbase-site/hbase.security.authentication",
-          "subsection-name": "subsection-hbase-security-col1"
-        },
-        {
-          "config": "hbase-site/hbase.security.authorization",
-          "subsection-name": "subsection-hbase-security-col1"
-        },
-        {
-          "config": "hbase-env/phoenix_sql_enabled",
-          "subsection-name": "subsection-hbase-phoenix-col1"
-        },
-        {
-          "config": "hbase-site/phoenix.query.timeoutMs",
-          "subsection-name": "subsection-hbase-phoenix-col1"
-        }
-      ]
-    },
-    "widgets": [
-      {
-        "config": "hbase-env/hbase_master_heapsize",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "GB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hbase-env/hbase_regionserver_heapsize",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "GB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hbase-site/hfile.block.cache.size",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "percent"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hbase-site/hbase.regionserver.global.memstore.upperLimit",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "percent"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hbase-site/hbase.hregion.memstore.flush.size",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "MB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hbase-site/hbase.hregion.memstore.block.multiplier",
-        "widget": {
-          "type": "combo"
-        }
-      },
-      {
-        "config": "hbase-site/hbase.regionserver.handler.count",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hbase-site/hbase.client.retries.number",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hbase-site/hbase.client.keyvalue.maxsize",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "MB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hbase-site/hbase.hregion.max.filesize",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "GB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hbase-site/hbase.hregion.majorcompaction",
-        "widget": {
-          "type": "time-interval-spinner",
-          "units": [
-            {
-              "unit-name": "days,hours"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hbase-site/hbase.hstore.compaction.max",
-        "widget": {
-          "type": "combo"
-        }
-      },
-      {
-        "config": "hbase-site/zookeeper.session.timeout",
-        "widget": {
-          "type": "time-interval-spinner",
-          "units": [
-            {
-              "unit-name": "minutes,seconds"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hbase-site/hbase.rpc.timeout",
-        "widget": {
-          "type": "time-interval-spinner",
-          "units": [
-            {
-              "unit-name": "minutes,seconds"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hbase-site/hbase.security.authentication",
-        "widget": {
-          "type": "toggle"
-        }
-      },
-      {
-        "config": "hbase-site/hbase.security.authorization",
-        "widget": {
-          "type": "toggle"
-        }
-      },
-      {
-        "config": "hbase-env/phoenix_sql_enabled",
-        "widget": {
-          "type": "toggle"
-        }
-      },
-      {
-        "config": "hbase-site/phoenix.query.timeoutMs",
-        "widget": {
-          "type": "time-interval-spinner",
-          "units": [
-            {
-              "unit-name": "minutes,seconds"
-            }
-          ]
-        }
-      }
-    ]
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/widgets.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/widgets.json
deleted file mode 100644
index ae47833..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HBASE/widgets.json
+++ /dev/null
@@ -1,510 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_hbase_dashboard",
-      "display_name": "Standard HBase Dashboard",
-      "section_name": "HBASE_SUMMARY",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Reads and Writes",
-          "description": "Rate (per second) of read and write requests on all regions in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "regionserver.Server.Get_num_ops._rate",
-              "metric_path": "metrics/hbase/regionserver/Server/Get_num_ops._rate",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            },
-            {
-              "name": "regionserver.Server.ScanNext_num_ops._rate",
-              "metric_path": "metrics/hbase/regionserver/Server/ScanNext_num_ops._rate",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            },
-            {
-              "name": "regionserver.Server.Append_num_ops._rate",
-              "metric_path": "metrics/hbase/regionserver/Server/Append_num_ops._rate",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            },
-            {
-              "name": "regionserver.Server.Delete_num_ops._rate",
-              "metric_path": "metrics/hbase/regionserver/Server/Delete_num_ops._rate",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            },
-            {
-              "name": "regionserver.Server.Increment_num_ops._rate",
-              "metric_path": "metrics/hbase/regionserver/Server/Increment_num_ops._rate",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            },
-            {
-              "name": "regionserver.Server.Mutate_num_ops._rate",
-              "metric_path": "metrics/hbase/regionserver/Server/Mutate_num_ops._rate",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Read Requests",
-              "value": "${regionserver.Server.Get_num_ops._rate + regionserver.Server.ScanNext_num_ops._rate}"
-            },
-            {
-              "name": "Write Requests",
-              "value": "${regionserver.Server.Append_num_ops._rate + regionserver.Server.Delete_num_ops._rate + regionserver.Server.Increment_num_ops._rate + regionserver.Server.Mutate_num_ops._rate}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Read Latency",
-          "description": "maximum of 95% read latency.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "regionserver.Server.Get_95th_percentile._max",
-              "metric_path": "metrics/hbase/regionserver/Server/Get_95th_percentile._max",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            },
-            {
-              "name": "regionserver.Server.ScanNext_95th_percentile._max",
-              "metric_path": "metrics/hbase/regionserver/Server/ScanNext_95th_percentile._max",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Cluster wide maximum of 95% Get Latency",
-              "value": "${regionserver.Server.Get_95th_percentile._max}"
-            },
-            {
-              "name": "Cluster wide maximum of 95% ScanNext Latency",
-              "value": "${regionserver.Server.ScanNext_95th_percentile._max}"
-            }
-          ],
-          "properties": {
-            "display_unit": "ms",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Write Latency",
-          "description": "maximum of 95% write latency.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "regionserver.Server.Mutate_95th_percentile._max",
-              "metric_path": "metrics/hbase/regionserver/Server/Mutate_95th_percentile._max",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            },
-            {
-              "name": "regionserver.Server.Increment_95th_percentile._max",
-              "metric_path": "metrics/hbase/regionserver/Server/Increment_95th_percentile._max",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            },
-            {
-              "name": "regionserver.Server.Append_95th_percentile._max",
-              "metric_path": "metrics/hbase/regionserver/Server/Append_95th_percentile._max",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            },
-            {
-              "name": "regionserver.Server.Delete_95th_percentile._max",
-              "metric_path": "metrics/hbase/regionserver/Server/Delete_95th_percentile._max",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Cluster wide maximum of 95% Mutate Latency",
-              "value": "${regionserver.Server.Mutate_95th_percentile._max}"
-            },
-            {
-              "name": "Cluster wide maximum of 95% Increment Latency",
-              "value": "${regionserver.Server.Increment_95th_percentile._max}"
-            },
-            {
-              "name": "Cluster wide maximum of 95% Append Latency",
-              "value": "${regionserver.Server.Append_95th_percentile._max}"
-            },
-            {
-              "name": "Cluster wide maximum of 95% Delete Latency",
-              "value": "${regionserver.Server.Delete_95th_percentile._max}"
-            }
-          ],
-          "properties": {
-            "display_unit": "ms",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Open Connections",
-          "description": "Count of open connections across all RegionServer. This is indicative of RegionServer load in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "regionserver.RegionServer.numOpenConnections._sum",
-              "metric_path": "metrics/hbase/ipc/IPC/numOpenConnections._sum",
-              "category": "",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Open Connections",
-              "value": "${regionserver.RegionServer.numOpenConnections._sum}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Request Handlers",
-          "description": "Count of Active handlers vs count of calls waiting in the general queue.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "regionserver.RegionServer.numActiveHandler._sum",
-              "metric_path": "metrics/hbase/ipc/IPC/numActiveHandler._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            },
-            {
-              "name": "regionserver.RegionServer.numCallsInGeneralQueue._sum",
-              "metric_path": "metrics/hbase/ipc/IPC/numCallsInGeneralQueue._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Active Handlers",
-              "value": "${regionserver.RegionServer.numActiveHandler._sum}"
-            },
-            {
-              "name": "Calls in General Queue",
-              "value": "${regionserver.RegionServer.numCallsInGeneralQueue._sum}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Files Local",
-          "description": "Average percentage of local files to RegionServer in the cluster.",
-          "widget_type": "NUMBER",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "regionserver.Server.percentFilesLocal",
-              "metric_path": "metrics/hbase/regionserver/Server/percentFilesLocal",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Files Local",
-              "value": "${regionserver.Server.percentFilesLocal}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "error_threshold":"25",
-            "warning_threshold": "75"
-          }
-        },
-        {
-          "widget_name": "Blocked Updates",
-          "description": "Number of milliseconds updates have been blocked so the memstore can be flushed.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "regionserver.Server.updatesBlockedTime._rate",
-              "metric_path": "metrics/hbase/regionserver/Server/updatesBlockedTime._rate",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Updates Blocked Time",
-              "value": "${regionserver.Server.updatesBlockedTime._rate}"
-            }
-          ],
-          "properties": {
-            "display_unit": "ms",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster CPU",
-          "description": "Percentage of CPU utilized across all RegionServer hosts.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "cpu_system._sum",
-              "metric_path": "metrics/cpu/cpu_system._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            },
-            {
-              "name": "cpu_user._sum",
-              "metric_path": "metrics/cpu/cpu_user._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            },
-            {
-              "name": "cpu_nice._sum",
-              "metric_path": "metrics/cpu/cpu_nice._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            },
-            {
-              "name": "cpu_idle._sum",
-              "metric_path": "metrics/cpu/cpu_idle._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            },
-            {
-              "name": "cpu_wio._sum",
-              "metric_path": "metrics/cpu/cpu_wio._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            }
-          ],
-          "values": [
-            {
-              "name": "CPU utilization",
-              "value": "${((cpu_system._sum + cpu_user._sum + cpu_nice._sum)/(cpu_system._sum + cpu_user._sum + cpu_nice._sum + cpu_idle._sum + cpu_wio._sum)) * 100}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1",
-            "display_unit": "%"
-          }
-        },
-        {
-          "widget_name": "Cluster Network",
-          "description": "Average of Network IO utilized across all RegionServer hosts.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "pkts_in._avg",
-              "metric_path": "metrics/network/pkts_in._avg",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            },
-            {
-              "name": "pkts_out._avg",
-              "metric_path": "metrics/network/pkts_out._avg",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Packets In",
-              "value": "${pkts_in._avg}"
-            },
-            {
-              "name": "Packets Out",
-              "value": "${pkts_out._avg}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster Disk",
-          "description": "Sum of disk throughput for all RegionServer hosts.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "read_bps._sum",
-              "metric_path": "metrics/disk/read_bps._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            },
-            {
-              "name": "write_bps._sum",
-              "metric_path": "metrics/disk/write_bps._sum",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Read throughput",
-              "value": "${read_bps._sum/1048576}"
-            },
-            {
-              "name": "Write throughput",
-              "value": "${write_bps._sum/1048576}"
-            }
-          ],
-          "properties": {
-            "display_unit": "Mbps",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        }
-      ]
-    },
-    {
-      "layout_name": "default_hbase_heatmap",
-      "display_name": "HBase Heatmaps",
-      "section_name": "HBASE_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "HBase Compaction Queue Size",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength",
-              "metric_path": "metrics/hbase/regionserver/compactionQueueSize",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            }
-          ],
-          "values": [
-            {
-              "name": "HBase Compaction Queue Size",
-              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength} "
-            }
-          ],
-          "properties": {
-            "display_unit": "",
-            "max_limit": "10"
-          }
-        },
-        {
-          "widget_name": "HBase Memstore Sizes",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize",
-              "metric_path": "metrics/hbase/regionserver/memstoreSize",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            }
-          ],
-          "values": [
-            {
-              "name": "HBase Memstore Sizes",
-              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize}"
-            }
-          ],
-          "properties": {
-            "display_unit": "B",
-            "max_limit": "104857600"
-          }
-        },
-        {
-          "widget_name": "HBase Read Request Count",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount",
-              "metric_path": "metrics/hbase/regionserver/readRequestsCount",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            }
-          ],
-          "values": [
-            {
-              "name": "HBase Read Request Count",
-              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount}"
-            }
-          ],
-          "properties": {
-            "max_limit": "200"
-          }
-        },
-        {
-          "widget_name": "HBase Write Request Count",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount",
-              "metric_path": "metrics/hbase/regionserver/writeRequestsCount",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            }
-          ],
-          "values": [
-            {
-              "name": "HBase Write Request Count",
-              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount}"
-            }
-          ],
-          "properties": {
-            "max_limit": "200"
-          }
-        },
-        {
-          "widget_name": "HBase Regions",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount",
-              "metric_path": "metrics/hbase/regionserver/regions",
-              "service_name": "HBASE",
-              "component_name": "HBASE_REGIONSERVER"
-            }
-          ],
-          "values": [
-            {
-              "name": "HBase Regions",
-              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount}"
-            }
-          ],
-          "properties": {
-            "max_limit": "10"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/alerts.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/alerts.json
deleted file mode 100644
index 34cea4c..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/alerts.json
+++ /dev/null
@@ -1,120 +0,0 @@
-{
-  "HDFS":{
-    "NAMENODE": [
-
-      {
-        "name": "upgrade_finalized_state",
-        "label": "HDFS Upgrade Finalized State",
-        "description": "This service-level alert is triggered if HDFS is not in the finalized state",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "PERF/1.0/services/HDFS/package/alerts/alert_upgrade_finalized.py",
-          "parameters": []
-        }
-      },
-
-      {
-        "name": "namenode_last_checkpoint",
-        "label": "NameNode Last Checkpoint",
-        "description": "This service-level alert will trigger if the last time that the NameNode performed a checkpoint was too long ago. It will also trigger if the number of uncommitted transactions is beyond a certain threshold.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "PERF/1.0/services/HDFS/package/alerts/alert_checkpoint_time.py",
-          "parameters": [
-            {
-              "name": "connection.timeout",
-              "display_name": "Connection Timeout",
-              "value": 5.0,
-              "type": "NUMERIC",
-              "description": "The maximum time before this alert is considered to be CRITICAL",
-              "units": "seconds",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "checkpoint.time.warning.threshold",
-              "display_name": "Checkpoint Warning",
-              "value": 200,
-              "type": "PERCENT",
-              "description": "The percentage of the last checkpoint time greater than the interval in order to trigger a warning alert.",
-              "units": "%",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "checkpoint.time.critical.threshold",
-              "display_name": "Checkpoint Critical",
-              "value": 200,
-              "type": "PERCENT",
-              "description": "The percentage of the last checkpoint time greater than the interval in order to trigger a critical alert.",
-              "units": "%",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "checkpoint.txns.multiplier.warning.threshold",
-              "display_name": "Uncommitted transactions Warning",
-              "value": 2.0,
-              "type": "NUMERIC",
-              "description": "The multiplier to use against dfs.namenode.checkpoint.period compared to the difference between last transaction id and most recent transaction id beyond which to trigger a warning alert.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "checkpoint.txns.multiplier.critical.threshold",
-              "display_name": "Uncommitted transactions Critical",
-              "value": 4.0,
-              "type": "NUMERIC",
-              "description": "The multiplier to use against dfs.namenode.checkpoint.period compared to the difference between last transaction id and most recent transaction id beyond which to trigger a critical alert.",
-              "threshold": "CRITICAL"
-            }
-          ]
-        }
-      }
-    ],
-    "SECONDARY_NAMENODE": [
-      {
-        "name": "secondary_namenode_process",
-        "label": "Secondary NameNode Process",
-        "description": "This host-level alert is triggered if the Secondary NameNode process cannot be confirmed to be up and listening on the network.",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "PERF/1.0/services/HDFS/package/alerts/alert_snamenode_process.py"
-        }
-      }
-    ],
-    "NFS_GATEWAY": [
-      {
-        "name": "nfsgateway_process",
-        "label": "NFS Gateway Process",
-        "description": "This host-level alert is triggered if the NFS Gateway process cannot be confirmed to be up and listening on the network.",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "PERF/1.0/services/HDFS/package/alerts/alert_nfs_gateway_process.py"
-        }
-      }
-    ],
-    "DATANODE": [
-      {
-        "name": "datanode_unmounted_data_dir",
-        "label": "DataNode Unmounted Data Dir",
-        "description": "This host-level alert is triggered if one of the data directories on a host was previously on a mount point and became unmounted. If the mount history file does not exist, then report an error if a host has one or more mounted data directories as well as one or more unmounted data directories on the root partition. This may indicate that a data directory is writing to the root partition, which is undesirable.",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "PERF/1.0/services/HDFS/package/alerts/alert_datanode_unmounted_data_dir.py"
-        }
-      }
-    ]
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index 8375be1..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,225 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
-<!-- Put site-specific property overrides in this file. -->
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
-  <!-- These properties exist in common-services. -->
-  <property>
-    <name>ha.failover-controller.active-standby-elector.zk.op.retries</name>
-    <value>120</value>
-    <description>ZooKeeper Failover Controller retries setting for your environment</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <!-- i/o properties -->
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-      The size of this buffer should probably be a multiple of hardware
-      page size (4096 on Intel x86), and it determines how much data is
-      buffered during read and write operations.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-    <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>A list of the compression codec classes that can be used
-      for compression/decompression.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <!-- file system properties -->
-  <property>
-    <name>fs.defaultFS</name>
-    <!-- cluster variant -->
-    <value>hdfs://localhost:8020</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for HDFS.</description>
-    <final>true</final>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes after which the checkpoint gets deleted.
-      If zero, the trash feature is disabled.
-      This option may be configured both on the server and the client.
-      If trash is disabled server side then the client side configuration is checked.
-      If trash is enabled on the server side then the value configured on the server is used and the client configuration value is ignored.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-      connections will be inspected for idleness.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-      connection to the server.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ipc.server.tcpnodelay</name>
-    <value>true</value>
-    <description>Turn on/off Nagle's algorithm for the TCP socket
-      connection on
-      the server. Setting to true disables the algorithm and may
-      decrease latency
-      with a cost of more/smaller packets.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>mapreduce.jobtracker.webinterface.trusted</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-      actions, such as kill job, delete file, etc., that should
-      not be exposed to public. Enable this option if the interfaces
-      are only reachable by those who have the right authorization.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hadoop.security.authentication</name>
-    <value>simple</value>
-    <description>
-      Set the authentication for the cluster. Valid values are: simple or
-      kerberos.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hadoop.security.authorization</name>
-    <value>false</value>
-    <description>
-      Enable authorization for different protocols.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value>DEFAULT</value>
-    <description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
-      So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-      "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-      The translations rules have 3 sections:
-      base     filter    substitution
-      The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-      [1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-      [2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-      [2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-      The filter is a regex in parens that must the generated string for the rule to apply.
-
-      "(.*%admin)" will take any string that ends in "%admin"
-      "(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-      Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-      "s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-      "s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-      "s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-      So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-      RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-      DEFAULT
-
-      To also translate the names with a second component, you'd make the rules:
-
-      RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-      RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-      DEFAULT
-
-      If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-      RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-      DEFAULT
-    </description>
-    <value-attributes>
-      <type>multiLine</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>net.topology.script.file.name</name>
-    <value>/etc/hadoop/conf/topology_script.py</value>
-    <description>
-      Location of topology script used by Hadoop to determine the rack location of nodes.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- Properties from HDP 2.2 and higher -->
-  <property>
-    <name>hadoop.http.authentication.simple.anonymous.allowed</name>
-    <value>true</value>
-    <description>
-      Indicates if anonymous requests are allowed when using &apos;simple&apos; authentication.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hadoop.security.key.provider.path</name>
-    <value/>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hadoop-env</type>
-        <name>keyserver_host</name>
-      </property>
-      <property>
-        <type>hadoop-env</type>
-        <name>keyserver_port</name>
-      </property>
-      <property>
-        <type>kms-env</type>
-        <name>kms_port</name>
-      </property>
-      <property>
-        <type>ranger-kms-site</type>
-        <name>ranger.service.https.attrib.ssl.enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c362fd0/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hadoop-env.xml
deleted file mode 100644
index 3be63be..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hadoop-env.xml
+++ /dev/null
@@ -1,419 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <!-- These properties exist in common services. -->
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-    <display-name>Hadoop Log Dir Prefix</display-name>
-    <value-attributes>
-      <type>directory</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <display-name>Hadoop PID Dir Prefix</display-name>
-    <description>Hadoop PID Dir Prefix</description>
-    <value-attributes>
-      <type>directory</type>
-      <overridable>false</overridable>
-      <editable-only-at-install>true</editable-only-at-install>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hadoop_root_logger</name>
-    <value>INFO,RFA</value>
-    <display-name>Hadoop Root Logger</display-name>
-    <description>Hadoop Root Logger</description>
-    <value-attributes>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-    <display-name>Hadoop maximum Java heap size</display-name>
-    <value-attributes>
-      <type>int</type>
-      <unit>MB</unit>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-    <display-name>NameNode Java heap size</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>268435456</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hdfs-site</type>
-        <name>dfs.datanode.data.dir</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
-    <display-name>NameNode new generation size</display-name>
-    <depends-on>
-      <property>
-        <type>hadoop-env</type>
-        <name>namenode_heapsize</name>
-      </property>
-    </depends-on>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>16384</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>NameNode maximum new generation size</description>
-    <display-name>NameNode maximum new generation size</display-name>
-    <depends-on>
-      <property>
-        <type>hadoop-env</type>
-        <name>namenode_heapsize</name>
-      </property>
-    </depends-on>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>16384</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>namenode_opt_permsize</name>
-    <value>128</value>
-    <description>NameNode permanent generation size</description>
-    <display-name>NameNode permanent generation size</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>2096</maximum>
-      <unit>MB</unit>
-      <increment-step>128</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>namenode_opt_maxpermsize</name>
-    <value>256</value>
-    <description>NameNode maximum permanent generation size</description>
-    <display-name>NameNode maximum permanent generation size</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>2096</maximum>
-      <unit>MB</unit>
-      <increment-step>128</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-    <display-name>DataNode maximum Java heap size</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>268435456</maximum>
-      <unit>MB</unit>
-      <increment-step>128</increment-step>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <display-name>Proxy User Group</display-name>
-    <value>users</value>
-    <property-type>GROUP</property-type>
-    <description>Proxy user group.</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hdfs_user</name>
-    <display-name>HDFS User</display-name>
-    <value>hdfs</value>
-    <property-type>USER</property-type>
-    <description>User to run HDFS as</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hdfs_tmp_dir</name>
-    <value>/tmp</value>
-    <description>HDFS tmp Dir</description>
-    <display-name>HDFS tmp Dir</display-name>
-    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
-    <value-attributes>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hdfs_user_nofile_limit</name>
-    <value>128000</value>
-    <description>Max open files limit setting for HDFS user.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hdfs_user_nproc_limit</name>
-    <value>65536</value>
-    <description>Max number of processes limit setting for HDFS user.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>namenode_backup_dir</name>
-    <description>Local directory for storing backup copy of NameNode images during upgrade</description>
-    <value>/tmp/upgrades</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hdfs_user_keytab</name>
-    <description>HDFS keytab path</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hdfs_principal_name</name>
-    <description>HDFS principal name</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These properties exist in HDP 2.2 and higher. -->
-  <property>
-    <name>keyserver_host</name>
-    <value> </value>
-    <display-name>Key Server Host</display-name>
-    <description>Hostnames where Key Management Server is installed</description>
-    <value-attributes>
-      <type>string</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>keyserver_port</name>
-    <value/>
-    <display-name>Key Server Port</display-name>
-    <description>Port number where Key Management Server is available</description>
-    <value-attributes>
-      <type>int</type>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These properties exist in HDP 2.3 and higher. -->
-  <!-- hadoop-env.sh -->
-  <property>
-    <name>content</name>
-    <display-name>hadoop-env template</display-name>
-    <description>This is the jinja template for hadoop-env.sh file</description>
-    <value>
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-export JAVA_HOME={{java_home}}
-export HADOOP_HOME_WARN_SUPPRESS=1
-
-# Hadoop home directory
-export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-# Hadoop Configuration Directory
-
-{# this is different for HDP1 #}
-# Path to jsvc required by secure HDP 2.0 datanode
-export JSVC_HOME={{jsvc_path}}
-
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-
-export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
-
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
-
-# Command specific options appended to HADOOP_OPTS when specified
-HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
-
-HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
-
-{% if java_version &lt; 8 %}
-SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
-export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
-export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
-
-export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
-
-{% else %}
-SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
-export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
-export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
-
-export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-{% endif %}
-
-HADOOP_NFS3_OPTS="-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}"
-HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
-
-
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
-
-# Extra ssh options.  Empty by default.
-export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
-
-# History server logs
-export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
-
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-# host:path where hadoop code should be rsync'd from.  Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
-export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-# History server pid
-export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
-
-YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
-
-# A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-
-# export HADOOP_NICENESS=10
-
-# Add database libraries
-JAVA_JDBC_LIBS=""
-if [ -d "/usr/share/java" ]; then
-  for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2&gt;/dev/null`
-  do
-    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-  done
-fi
-
-# Add libraries to the hadoop classpath - some may not need a colon as they already include it
-export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}
-
-# Setting path to hdfs command line
-export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-
-# Mostly required for hadoop 2.0
-export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}
-
-export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
-
-
-# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. 
-# Makes sense to fix only when runing DN as root 
-if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  {% if is_datanode_max_locked_memory_set %}
-  ulimit -l {{datanode_max_locked_memory}}
-  {% endif %}
-  ulimit -n {{hdfs_user_nofile_limit}}
-fi
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>nfsgateway_heapsize</name>
-    <display-name>NFSGateway maximum Java heap size</display-name>
-    <value>1024</value>
-    <description>Maximum Java heap size for NFSGateway (Java option -Xmx)</description>
-    <value-attributes>
-      <type>int</type>
-      <unit>MB</unit>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>


Mime
View raw message