ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From maha...@apache.org
Subject [42/51] [partial] ambari git commit: Revert "[RTC 136620]: Introduce BigInsights stacks on Ambari 2.4 branch"
Date Wed, 17 Aug 2016 05:41:31 GMT
http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-site.xml
deleted file mode 100644
index 00d0a2c..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-site.xml
+++ /dev/null
@@ -1,384 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value>file:///var/lib/ambari-metrics-collector/hbase</value>
-    <description>
-      Ambari Metrics service uses HBase as default storage backend. Set the rootdir for
-      HBase to either local filesystem path if using Ambari Metrics in embedded mode or
-      to a HDFS dir, example: hdfs://namenode.example.org:8020/amshbase.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value>/var/lib/ambari-metrics-collector/hbase-tmp</value>
-    <description>
-      Temporary directory on the local filesystem.
-      Change this setting to point to a location more permanent
-      than '/tmp' (The '/tmp' directory is often cleared on
-      machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.local.dir</name>
-    <value>${hbase.tmp.dir}/local</value>
-    <description>Directory on the local filesystem to be used as a local storage
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>false</value>
-    <description>
-      The mode the cluster will be in. Possible values are false for
-      standalone mode and true for distributed mode. If false, startup will run
-      all HBase and ZooKeeper daemons together in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.wait.on.regionservers.mintostart</name>
-    <value>1</value>
-    <description>
-      Ensure that HBase Master waits for # many region server to start.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value>{{zookeeper_quorum_hosts}}</value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-      For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-      By default this is set to localhost for local and pseudo-distributed modes
-      of operation. For a fully-distributed setup, this should be set to a full
-      list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-      this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-    <final>true</final>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value>0.0.0.0</value>
-    <description>The bind address for the HBase Master web UI</description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value>61310</value>
-    <description>The port for the HBase Master web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value>61330</value>
-    <description>The port for the HBase RegionServer web UI.</description>
-  </property>
-  <property>
-    <name>hbase.master.port</name>
-    <value>61300</value>
-    <description>The port for the HBase Master web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.port</name>
-    <value>61320</value>
-    <description>The port for the HBase RegionServer web UI.</description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value>0</value>
-    <description>
-      The time (in milliseconds) between 'major' compactions of all
-      HStoreFiles in a region.
-      0 to disable automated major compactions.
-    </description>
-  </property>
-  <property>
-    <name>phoenix.query.spoolThresholdBytes</name>
-    <value>20971520</value>
-    <description>
-      Threshold size in bytes after which results from parallelly executed
-      query results are spooled to disk. Default is 20 mb.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.dataDir</name>
-    <value>${hbase.tmp.dir}/zookeeper</value>
-    <description>
-      Property from ZooKeeper's config zoo.cfg.
-      The directory where the snapshot is stored.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value>10000</value>
-    <description>
-      Number of rows that will be fetched when calling next on a scanner
-      if it is not served from (local, client) memory.
-    </description>
-  </property>
-  <property>
-    <name>hbase.normalizer.enabled</name>
-    <value>true</value>
-    <description>If set to true, Master will try to keep region size
-    within each table approximately the same.</description>
-  </property>
-  <property>
-    <name>hbase.normalizer.period</name>
-    <value>600000</value>
-    <description>Period in ms at which the region normalizer runs in the Master.</description>
-  </property>
-  <property>
-    <name>hbase.master.normalizer.class</name>
-    <value>org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer</value>
-    <description>
-      Class used to execute the region normalization when the period occurs.
-      See the class comment for more on how it works
-      http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value>0.3</value>
-    <description>
-      Percentage of maximum heap (-Xmx setting) to allocate to block cache
-      used by a StoreFile. Default of 0.4 means allocate 40%.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value>0.5</value>
-    <description>
-      Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value>0.4</value>
-    <description>
-      When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>phoenix.groupby.maxCacheSize</name>
-    <value>307200000</value>
-    <description>
-      Size in bytes of pages cached during GROUP BY spilling. Default is 100Mb.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>4294967296</value>
-    <description>
-      Maximum HFile size. If the sum of the sizes of a region’s HFiles has grown
-      to exceed this value, the region is split in two. Default is 10Gb.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value>4</value>
-    <description>
-      Block updates if memstore has hbase.hregion.memstore.block.multiplier
-      times hbase.hregion.memstore.flush.size bytes. Useful preventing runaway
-      memstore during spikes in update traffic.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.flusher.count</name>
-    <value>2</value>
-    <description>
-      The number of flush threads. With fewer threads, the MemStore flushes
-      will be queued. With more threads, the flushes will be executed in parallel,
-      increasing the load on HDFS, and potentially causing more compactions.
-    </description>
-  </property>
-  <property>
-    <name>phoenix.query.timeoutMs</name>
-    <value>1200000</value>
-    <description>
-      Number of milliseconds after which a query will timeout on the client.
-      Default is 10 min.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.timeout.period</name>
-    <value>900000</value>
-    <description>
-      Client scanner lease period in milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.thread.compaction.large</name>
-    <value>2</value>
-    <description>
-      Configuration key for the large compaction threads.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.thread.compaction.small</name>
-    <value>3</value>
-    <description>
-      Configuration key for the small compaction threads.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>61181</value>
-  </property>
-  <property>
-    <name>hbase.zookeeper.peerport</name>
-    <value>61288</value>
-  </property>
-  <property>
-    <name>hbase.zookeeper.leaderport</name>
-    <value>61388</value>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value>200</value>
-    <description>
-      If more than this number of StoreFiles exist in any one Store
-      (one StoreFile is written per flush of MemStore), updates are blocked for
-      this region until a compaction is completed, or until
-      hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value>134217728</value>
-    <description>
-      Memstore will be flushed to disk if size of the memstore exceeds this
-      number of bytes. Value is checked by a thread that runs every
-      hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>phoenix.query.spoolThresholdBytes</name>
-    <value>12582912</value>
-  </property>
-  <property>
-    <name>hbase.snapshot.enabled</name>
-    <value>false</value>
-    <description>Enable/Disable HBase snapshots.</description>
-  </property>
-  <property>
-    <name>hbase.replication</name>
-    <value>false</value>
-    <description>Enable/Disable HBase replication.</description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value>120000</value>
-    <description>ZooKeeper session timeout in milliseconds.</description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout.localHBaseCluster</name>
-    <value>120000</value>
-    <description>
-      ZooKeeper session timeout in milliseconds for
-      pseudo distributed mode.
-    </description>
-  </property>
-  <property>
-    <name>phoenix.sequence.saltBuckets</name>
-    <value>2</value>
-    <description>
-      Controls the number of pre-allocated regions for SYSTEM.SEQUENCE table.
-    </description>
-  </property>
-  <property>
-    <name>phoenix.query.maxGlobalMemoryPercentage</name>
-    <value>15</value>
-    <description>
-      Percentage of total heap memory (i.e. Runtime.getRuntime().maxMemory())
-      that all threads may use.
-    </description>
-  </property>
-  <property>
-    <name>phoenix.spool.directory</name>
-    <value>${hbase.tmp.dir}/phoenix-spool</value>
-    <description>
-      Set directory for Phoenix spill files. If possible set this to a
-      different mount point from the one for hbase.rootdir in embedded mode.
-    </description>
-  </property>
-  <property>
-    <name>phoenix.mutate.batchSize</name>
-    <value>10000</value>
-    <description>
-      The number of rows that are batched together and automatically committed
-      during the execution of an UPSERT SELECT or DELETE statement.
-      This affects performance of group by aggregators if they are being used.
-    </description>
-  </property>
-  <property>
-    <name>phoenix.query.rowKeyOrderSaltedTable</name>
-    <value>true</value>
-    <description>
-      When set, we disallow user specified split points on salted table to ensure
-      that each bucket will only contains entries with the same salt byte.
-      When this property is turned on, the salted table would behave just like
-      a normal table and would return items in rowkey order for scans
-    </description>
-  </property>
-  <property>
-    <name>phoenix.coprocessor.maxServerCacheTimeToLiveMs</name>
-    <value>60000</value>
-    <description>
-      Maximum living time (in milliseconds) of server caches. A cache entry
-      expires after this amount of time has passed since last access. Consider
-      adjusting this parameter when a server-side IOException(
-      “Could not find hash cache for joinId”) happens. Getting warnings like
-      “Earlier hash cache(s) might have expired on servers” might also be a
-      sign that this number should be increased.
-    </description>
-  </property>
-  <property>
-    <name>phoenix.coprocessor.maxMetaDataCacheSize</name>
-    <value>20480000</value>
-    <description>
-      Max size in bytes of total server-side metadata cache after which
-      evictions will begin to occur based on least recent access time.
-      Default is 20Mb
-    </description>
-  </property>
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value>true</value>
-    <description>Enable/Disable short circuit read for your client.
-      Hadoop servers should be configured to allow short circuit read
-      for the hbase user for this to take effect
-    </description>
-    <depends-on>
-      <property>
-        <type>ams-hbase-site</type>
-        <name>hbase.cluster.distributed</name>
-      </property>
-      <property>
-        <type>ams-hbase-site</type>
-        <name>hbase.rootdir</name>
-      </property>
-    </depends-on>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-log4j.xml
deleted file mode 100644
index 2b0a4cf..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-log4j.xml
+++ /dev/null
@@ -1,65 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~ or more contributor license agreements.  See the NOTICE file
-  ~ distributed with this work for additional information
-  ~ regarding copyright ownership.  The ASF licenses this file
-  ~ to you under the Apache License, Version 2.0 (the
-  ~ "License"); you may not use this file except in compliance
-  ~ with the License.  You may obtain a copy of the License at
-  ~
-  ~     http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing, software
-  ~ distributed under the License is distributed on an "AS IS" BASIS,
-  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~ See the License for the specific language governing permissions and
-  ~ limitations under the License.
-  -->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Define some default values that can be overridden by system properties
-ams.log.dir=.
-ams.log.file=ambari-metrics-collector.log
-
-# Root logger option
-log4j.rootLogger=INFO,file
-
-# Direct log messages to a log file
-log4j.appender.file=org.apache.log4j.RollingFileAppender
-log4j.appender.file.File=${ams.log.dir}/${ams.log.file}
-log4j.appender.file.MaxFileSize=80MB
-log4j.appender.file.MaxBackupIndex=60
-log4j.appender.file.layout=org.apache.log4j.PatternLayout
-log4j.appender.file.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-    </value>
-    <value-attributes>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-site.xml
deleted file mode 100644
index cc9c27a..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-site.xml
+++ /dev/null
@@ -1,527 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>timeline.metrics.service.operation.mode</name>
-    <value>embedded</value>
-    <display-name>Metrics Service operation mode</display-name>
-    <description>
-      Service Operation modes:
-      1) embedded: Metrics stored on local FS, HBase in Standalone mode
-      2) distributed: HBase daemons writing to HDFS
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.service.webapp.address</name>
-    <value>0.0.0.0:6188</value>
-    <description>
-      The address of the metrics service web application.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.service.rpc.address</name>
-    <value>0.0.0.0:60200</value>
-    <description>
-      The address of the metrics service rpc listeners.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.aggregator.checkpoint.dir</name>
-    <value>/var/lib/ambari-metrics-collector/checkpoint</value>
-    <display-name>Aggregator checkpoint directory</display-name>
-    <description>
-      Directory to store aggregator checkpoints. Change to a permanent
-      location so that checkpoint ar not lost.
-    </description>
-    <value-attributes>
-      <type>directory</type>
-    </value-attributes>
-  </property>
-  <property>
-    <name>timeline.metrics.host.aggregator.minute.interval</name>
-    <value>300</value>
-    <display-name>Minute host aggregator interval</display-name>
-    <description>
-      Time in seconds to sleep for the minute resolution host based
-      aggregator. Default resolution is 5 minutes.
-    </description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-  </property>
-  <property>
-    <name>timeline.metrics.host.aggregator.hourly.interval</name>
-    <value>3600</value>
-    <display-name>Hourly host aggregator interval</display-name>
-    <description>
-      Time in seconds to sleep for the hourly resolution host based
-      aggregator. Default resolution is 1 hour.
-    </description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-  </property>
-  <property>
-    <name>timeline.metrics.daily.aggregator.minute.interval</name>
-    <value>86400</value>
-    <description>
-      Time in seconds to sleep for the day resolution host based
-      aggregator. Default resolution is 24 hours.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregator.hourly.interval</name>
-    <value>3600</value>
-    <display-name>Hourly cluster aggregator Interval</display-name>
-    <description>
-      Time in seconds to sleep for the hourly resolution cluster wide
-      aggregator. Default is 1 hour.
-    </description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregator.daily.interval</name>
-    <value>86400</value>
-    <description>
-      Time in seconds to sleep for the day resolution cluster wide
-      aggregator. Default is 24 hours.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregator.minute.interval</name>
-    <value>300</value>
-    <display-name>Minute cluster aggregator interval</display-name>
-    <description>
-      Time in seconds to sleep for the minute resolution cluster wide
-      aggregator. Default resolution is 5 minutes.
-    </description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregator.second.interval</name>
-    <value>120</value>
-    <display-name>Second cluster aggregator interval</display-name>
-    <description>
-      Time in seconds to sleep for the second resolution cluster wide
-      aggregator. Default resolution is 2 minutes.
-    </description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-  </property>
-  <property>
-    <name>timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier</name>
-    <value>1</value>
-    <description>
-      Multiplier value * interval = Max allowed checkpoint lag. Effectively
-      if aggregator checkpoint is greater than max allowed checkpoint delay,
-      the checkpoint will be discarded by the aggregator.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.host.aggregator.hourly.checkpointCutOffMultiplier</name>
-    <value>2</value>
-    <display-name>Hourly host aggregator checkpoint cutOff multiplier</display-name>
-    <description>
-      Multiplier value * interval = Max allowed checkpoint lag. Effectively
-      if aggregator checkpoint is greater than max allowed checkpoint delay,
-      the checkpoint will be discarded by the aggregator.
-    </description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-  </property>
-  <property>
-    <name>timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier</name>
-    <value>2</value>
-    <display-name>Minute host aggregator checkpoint cutOff multiplier</display-name>
-    <description>
-      Multiplier value * interval = Max allowed checkpoint lag. Effectively
-      if aggregator checkpoint is greater than max allowed checkpoint delay,
-      the checkpoint will be discarded by the aggregator.
-    </description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier</name>
-    <value>2</value>
-    <display-name>Hourly cluster aggregator checkpoint cutOff multiplier</display-name>
-    <description>
-      Multiplier value * interval = Max allowed checkpoint lag. Effectively
-      if aggregator checkpoint is greater than max allowed checkpoint delay,
-      the checkpoint will be discarded by the aggregator.
-    </description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregator.second.checkpointCutOffMultiplier</name>
-    <value>2</value>
-    <display-name>Second cluster aggregator checkpoint cutOff multiplier</display-name>
-    <description>
-      Multiplier value * interval = Max allowed checkpoint lag. Effectively
-      if aggregator checkpoint is greater than max allowed checkpoint delay,
-      the checkpoint will be discarded by the aggregator.
-    </description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier</name>
-    <value>2</value>
-    <display-name>Minute cluster aggregator checkpoint cutOff multiplier</display-name>
-    <description>
-      Multiplier value * interval = Max allowed checkpoint lag. Effectively
-      if aggregator checkpoint is greater than max allowed checkpoint delay,
-      the checkpoint will be discarded by the aggregator.
-    </description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier</name>
-    <value>1</value>
-    <description>
-      Multiplier value * interval = Max allowed checkpoint lag. Effectively
-      if aggregator checkpoint is greater than max allowed checkpoint delay,
-      the checkpoint will be discarded by the aggregator.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.host.aggregator.daily.disabled</name>
-    <value>false</value>
-    <description>
-      Disable host based daily aggregations.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.host.aggregator.hourly.disabled</name>
-    <value>false</value>
-    <display-name>Disable Hourly host aggregator</display-name>
-    <description>
-      Disable host based hourly aggregations.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.host.aggregator.minute.disabled</name>
-    <value>false</value>
-    <display-name>Disable Minute host aggregator</display-name>
-    <description>
-      Disable host based minute aggregations.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregator.daily.disabled</name>
-    <value>false</value>
-    <description>
-      Disable cluster based daily aggregations.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregator.hourly.disabled</name>
-    <display-name>Disable Hourly cluster aggregator</display-name>
-    <value>false</value>
-    <description>
-      Disable cluster based hourly aggregations.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregator.minute.disabled</name>
-    <value>false</value>
-    <display-name>Disable minute cluster aggregator</display-name>
-    <description>
-      Disable cluster based minute aggregations.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregator.second.disabled</name>
-    <value>false</value>
-    <display-name>Disable second cluster aggregator</display-name>
-    <description>
-      Disable cluster based second aggregations.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregator.second.timeslice.interval</name>
-    <value>30</value>
-    <display-name>Second cluster aggregator timeslice interval</display-name>
-    <description>
-      Lowest resolution of desired data for cluster level second aggregates.
-    </description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-  </property>
-  <property>
-    <name>timeline.metrics.host.aggregator.daily.ttl</name>
-    <value>31536000</value>
-    <description>
-      Host based daily resolution data purge interval. Default is 1 year.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.host.aggregator.hourly.ttl</name>
-    <value>2592000</value>
-    <description>
-      Host based hourly resolution data purge interval. Default is 30 days.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.host.aggregator.minute.ttl</name>
-    <value>604800</value>
-    <description>
-      Host based minute resolution data purge interval. Default is 7 days.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregator.second.ttl</name>
-    <value>2592000</value>
-    <description>
-      Cluster wide second resolution data purge interval. Default is 7 days.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregator.minute.ttl</name>
-    <value>7776000</value>
-    <description>
-      Cluster wide minute resolution data purge interval. Default is 30 days.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregator.hourly.ttl</name>
-    <value>31536000</value>
-    <description>
-      Cluster wide hourly resolution data purge interval. Default is 1 year.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregator.daily.ttl</name>
-    <value>63072000</value>
-    <description>
-      Cluster wide daily resolution data purge interval. Default is 2 years.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.host.aggregator.ttl</name>
-    <value>86400</value>
-    <description>
-      1 minute resolution data purge interval. Default is 1 day.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.hbase.data.block.encoding</name>
-    <value>FAST_DIFF</value>
-    <description>
-      Codecs are enabled on a table by setting the DATA_BLOCK_ENCODING property.
-      Default encoding is FAST_DIFF. This can be changed only before creating
-      tables.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.hbase.compression.scheme</name>
-    <value>SNAPPY</value>
-    <description>
-      Compression codes need to be installed and available before setting the
-      scheme. Default compression is SNAPPY. Disable by setting to None.
-      This can be changed only before creating tables.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.service.default.result.limit</name>
-    <value>15840</value>
-    <description>
-      Max result limit on number of rows returned. Calculated as follows:
-      22 aggregate metrics/min * 2 * 60 * 6 : Retrieve 10 SECOND data for 2 hours.
-    </description>
-    <display-name>Metrics service default result limit</display-name>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-  </property>
-  <property>
-    <name>timeline.metrics.service.checkpointDelay</name>
-    <value>60</value>
-    <display-name>Metrics service checkpoint delay</display-name>
-    <description>
-      Time in seconds to sleep on the first run or when the checkpoint is
-      too old.
-    </description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-  </property>
-  <property>
-    <name>timeline.metrics.service.resultset.fetchSize</name>
-    <display-name>Metrics service resultset fetchSize</display-name>
-    <value>2000</value>
-    <description>
-      JDBC resultset prefect size for aggregator queries.
-    </description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-  </property>
-  <!-- Phoenix properties that would manifest in the hbase-site.xml on the client side -->
-  <property>
-    <name>phoenix.query.maxGlobalMemoryPercentage</name>
-    <value>25</value>
-    <description>
-      Percentage of total heap memory (i.e. Runtime.getRuntime().maxMemory())
-      that all threads may use.
-    </description>
-  </property>
-  <property>
-    <name>phoenix.spool.directory</name>
-    <value>/tmp</value>
-    <description>
-      Set directory for Phoenix spill files. If possible set this to a
-      different mount point from the one for hbase.rootdir in embedded mode.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.service.cluster.aggregator.appIds</name>
-    <value>datanode,nodemanager,hbase</value>
-    <description>
-      List of application ids to use for aggregating host level metrics for
-      an application. Example: bytes_read across Yarn Nodemanagers.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.service.use.groupBy.aggregators</name>
-    <value>true</value>
-    <description>
-      Use a groupBy aggregated query to perform host level aggregations vs
-      in-memory aggregations.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.host.aggregate.splitpoints</name>
-    <value> </value>
-    <description>
-      Pre-split regions using the split points corresponding to this property
-      for the precision table that stores seconds aggregate data.
-    </description>
-    <depends-on>
-      <property>
-        <type>ams-hbase-site</type>
-        <name>hbase.regionserver.global.memstore.upperLimit</name>
-      </property>
-      <property>
-        <type>ams-hbase-site</type>
-        <name>hbase.hregion.memstore.flush.size</name>
-      </property>
-      <property>
-        <type>ams-hbase-env</type>
-        <name>hbase_master_heapsize</name>
-      </property>
-      <property>
-        <type>ams-hbase-env</type>
-        <name>hbase_regionserver_heapsize</name>
-      </property>
-    </depends-on>
-  </property>
-  <property>
-    <name>timeline.metrics.cluster.aggregate.splitpoints</name>
-    <value> </value>
-    <description>
-      Pre-split regions using the split points corresponding to this property
-      for the aggregate table that stores seconds aggregate data across hosts.
-    </description>
-    <depends-on>
-      <property>
-        <type>ams-hbase-site</type>
-        <name>hbase.regionserver.global.memstore.upperLimit</name>
-      </property>
-      <property>
-        <type>ams-hbase-site</type>
-        <name>hbase.hregion.memstore.flush.size</name>
-      </property>
-      <property>
-        <type>ams-hbase-env</type>
-        <name>hbase_master_heapsize</name>
-      </property>
-      <property>
-        <type>ams-hbase-env</type>
-        <name>hbase_regionserver_heapsize</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>timeline.metrics.sink.report.interval</name>
-    <value>60</value>
-    <description>
-      Time in seconds to sleep before report metrics to collector.
-      Default resolution is 1 minute.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.sink.collection.period</name>
-    <value>60</value>
-    <description>
-      The interval between two service metrics data exports.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.service.watcher.initial.delay</name>
-    <value>600</value>
-    <description>
-      The time to delay first watcher check execution
-      Default resolution is 10 minutes.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.service.watcher.delay</name>
-    <value>30</value>
-    <description>
-      The delay between the termination of one
-      watcher check execution and the commencement of the next
-      Default resolution is 30 seconds.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.service.watcher.timeout</name>
-    <value>30</value>
-    <description>
-      The maximum time to wait for a single watcher check execution
-      Default resolution is 30 seconds.
-    </description>
-  </property>
-  <property>
-    <name>timeline.metrics.hbase.fifo.compaction.enabled</name>
-    <value>true</value>
-    <description>
-      Enable Compaction policy for lower precision and minute aggregate tables.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/kerberos.json
deleted file mode 100755
index 03c3f93..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/kerberos.json
+++ /dev/null
@@ -1,122 +0,0 @@
-{
-  "services": [
-    {
-      "name": "AMBARI_METRICS",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/hdfs"
-        }
-      ],
-      "components": [
-        {
-          "name": "METRICS_COLLECTOR",
-          "identities": [
-            {
-              "name": "ams_hbase_master_hbase",
-              "principal": {
-                "value": "amshbase/_HOST@${realm}",
-                "type": "service",
-                "configuration": "ams-hbase-security-site/hbase.master.kerberos.principal",
-                "local_username": "${ams-env/ambari_metrics_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/ams-hbase.master.keytab",
-                "owner": {
-                  "name": "${ams-env/ambari_metrics_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "ams-hbase-security-site/hbase.master.keytab.file"
-              }
-            },
-            {
-              "name": "ams_hbase_regionserver_hbase",
-              "principal": {
-                "value": "amshbase/_HOST@${realm}",
-                "type": "service",
-                "configuration": "ams-hbase-security-site/hbase.regionserver.kerberos.principal",
-                "local_username": "${ams-env/ambari_metrics_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/ams-hbase.regionserver.keytab",
-                "owner": {
-                  "name": "${ams-env/ambari_metrics_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "ams-hbase-security-site/hbase.regionserver.keytab.file"
-              }
-            },
-            {
-              "name": "ams_collector",
-              "principal": {
-                "value": "amshbase/_HOST@${realm}",
-                "type": "service",
-                "configuration": "ams-hbase-security-site/hbase.myclient.principal",
-                "local_username": "${ams-env/ambari_metrics_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/ams.collector.keytab",
-                "owner": {
-                  "name": "${ams-env/ambari_metrics_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "ams-hbase-security-site/hbase.myclient.keytab"
-              }
-            },
-            {
-              "name": "ams_zookeeper",
-              "principal": {
-                "value": "zookeeper/_HOST@${realm}",
-                "type": "service",
-                "configuration": "ams-hbase-security-site/ams.zookeeper.principal",
-                "local_username": "${ams-env/ambari_metrics_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/zk.service.ams.keytab",
-                "owner": {
-                  "name": "${ams-env/ambari_metrics_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "ams-hbase-security-site/ams.zookeeper.keytab"
-              }
-            }
-          ],
-          "configurations": [
-            {
-              "ams-hbase-security-site": {
-                "hbase.security.authentication": "kerberos",
-                "hbase.security.authorization": "true",
-                "hadoop.security.authentication": "kerberos",
-                "hbase.coprocessor.master.classes": "org.apache.hadoop.hbase.security.access.AccessController",
-                "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.AccessController",
-                "zookeeper.znode.parent": "/ams-hbase-secure",
-                "hbase.zookeeper.property.kerberos.removeHostFromPrincipal": "true",
-                "hbase.zookeeper.property.kerberos.removeRealmFromPrincipal": "true",
-                "hbase.zookeeper.property.authProvider.1": "org.apache.zookeeper.server.auth.SASLAuthenticationProvider",
-                "hbase.zookeeper.property.jaasLoginRenew": "3600000"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/metainfo.xml
deleted file mode 100755
index 23d2224..0000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/metainfo.xml
+++ /dev/null
@@ -1,146 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>AMBARI_METRICS</name>
-      <displayName>Ambari Metrics</displayName>
-      <version>0.1.0</version>
-      <comment>A system for metrics collection that provides storage and retrieval capability for metrics collected from the cluster
-      </comment>
-      <components>
-        <component>
-          <name>METRICS_COLLECTOR</name>
-          <displayName>Metrics Collector</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <versionAdvertised>false</versionAdvertised>
-          <timelineAppid>AMS-HBASE</timelineAppid>
-          <dependencies>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/metrics_collector.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-        </component>
-        <component>
-          <name>METRICS_MONITOR</name>
-          <displayName>Metrics Monitor</displayName>
-          <category>SLAVE</category>
-          <cardinality>ALL</cardinality>
-          <versionAdvertised>false</versionAdvertised>
-          <auto-deploy>
-            <enabled>true</enabled>
-          </auto-deploy>
-          <commandScript>
-            <script>scripts/metrics_monitor.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>redhat7,redhat6,suse11</osFamily>
-          <packages>
-            <package>
-              <name>ambari-metrics-collector</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-            <package>
-              <name>ambari-metrics-monitor</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-            <package>
-              <name>ambari-metrics-hadoop-sink</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-            <package>
-              <name>gcc</name>
-            </package>
-            <package>
-              <name>snappy</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>debian7,ubuntu12,ubuntu14</osFamily>
-          <packages>
-            <package>
-              <name>ambari-metrics-assembly</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-            <package>
-              <name>gcc</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>winsrv6</osFamily>
-          <packages>
-            <package>
-              <name>ambari-metrics-collector.msi</name>
-            </package>
-            <package>
-              <name>ambari-metrics-monitor.msi</name>
-            </package>
-            <package>
-              <name>ambari-metrics-hadoop-sink.msi</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>600</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>ams-site</config-type>
-        <config-type>ams-log4j</config-type>
-        <config-type>ams-env</config-type>
-        <config-type>ams-hbase-policy</config-type>
-        <config-type>ams-hbase-site</config-type>
-        <config-type>ams-hbase-security-site</config-type>
-        <config-type>ams-hbase-env</config-type>
-        <config-type>ams-hbase-log4j</config-type>
-      </configuration-dependencies>
-
-      <excluded-config-types>
-        <config-type>storm-site</config-type>
-      </excluded-config-types>
-
-    </service>
-  </services>
-</metainfo>


Mime
View raw message