ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jmar...@apache.org
Subject [46/58] [partial] ambari git commit: [RTC 136620]: Introduce BigInsights stacks on Ambari 2.4 branch
Date Wed, 17 Aug 2016 00:33:40 GMT
http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..d92e473
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,166 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hadoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+
+{% if java_version &lt; 8 %}
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
+{% else %}
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:MetaspaceSize={{namenode_opt_permsize}} -XX:MaxMetaspaceSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxMetaspaceSize=512m $HADOOP_CLIENT_OPTS"
+{% endif %}
+
+export HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS
+
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_NFS3_OPTS="-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+#if [ -d "/usr/lib/tez" ]; then
+#  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+#fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/iop/current/hadoop-client/lib/native/Linux-amd64-64
+
+#Hadoop logging options. Modify and uncomment to change logging level
+#export HADOOP_ROOT_LOGGER={{hadoop_root_logger}}
+    </value>
+  </property>
+  <property>
+    <name>nfsgateway_heapsize</name>
+    <display-name>NFSGateway maximum Java heap size</display-name>
+    <value>1024</value>
+    <description>Maximum Java heap size for NFSGateway (Java option -Xmx)</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..1a40673
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>nfs.file.dump.dir</name>
+    <value>/tmp/.hdfs-nfs</value>
+    <display-name>NFSGateway dump directory</display-name>
+    <description>
+      This directory is used to temporarily save out-of-order writes before
+      writing to HDFS. For each file, the out-of-order writes are dumped after
+      they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
+      One needs to make sure the directory has enough space.
+    </description>
+    <value-attributes>
+        <type>directory</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>nfs.exports.allowed.hosts</name>
+    <value>* rw</value>
+    <display-name>Allowed hosts</display-name>
+  </property>
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..0e995b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/metainfo.xml
@@ -0,0 +1,127 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <version>2.7.1</version>
+
+	  <components>
+		<component>
+			<name>NFS_GATEWAY</name>
+			<displayName>NFSGateway</displayName>
+			<cardinality>0+</cardinality>
+			<versionAdvertised>false</versionAdvertised>
+			<category>SLAVE</category>
+			<commandScript>
+				<script>scripts/nfsgateway.py</script>
+				<scriptType>PYTHON</scriptType>
+				<timeout>1200</timeout>
+			</commandScript>
+			<dependencies>
+				<dependency>
+					<name>HDFS/HDFS_CLIENT</name>
+					<scope>host</scope>
+					<auto-deploy>
+						<enabled>true</enabled>
+					</auto-deploy>
+				</dependency>
+			</dependencies>
+		</component>
+	  </components>
+      
+      <osSpecifics> 
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>rpcbind</name>
+            </package>
+          </packages>
+        </osSpecific>
+        
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_4_1_*</name>
+            </package>
+            <package>
+              <name>hadoop-lzo</name>
+              <!--name>hadoop_4_1_*-lzo</name-->
+            </package>
+          </packages>
+        </osSpecific>
+        
+        <osSpecific>
+          <osFamily>redhat7,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>snappy</name>
+            </package>
+            <package>
+              <name>lzo</name>
+            </package>
+            <package>
+              <name>hadoop-lzo-native</name>
+            </package>
+            <package>
+              <name>hadoop_4_1_*-libhdfs</name>
+            </package>
+            <package>
+              <name>ambari-log4j</name>
+            </package>
+          </packages>
+        </osSpecific>
+        
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>libsnappy1</name>
+            </package>
+            <package>
+              <name>libsnappy-dev</name>
+            </package>
+            <package>
+              <name>liblzo2-2</name>
+            </package>
+            <package>
+              <name>hadoop-hdfs</name>
+            </package>
+            <package>
+              <name>libhdfs0</name>
+            </package>
+            <package>
+              <name>libhdfs0-dev</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      
+      <themes>
+        <theme>
+           <fileName>theme.json</fileName>
+           <default>true</default>
+        </theme>
+      </themes>
+      
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/themes/theme.json b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/themes/theme.json
new file mode 100644
index 0000000..6f2b797
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/themes/theme.json
@@ -0,0 +1,179 @@
+{
+  "name": "default",
+  "description": "Default theme for HDFS service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "2",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-namenode",
+                  "display-name": "NameNode",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-namenode-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-datanode",
+                  "display-name": "DataNode",
+                  "row-index": "0",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-datanode-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "hdfs-site/dfs.namenode.name.dir",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hadoop-env/namenode_heapsize",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.namenode.handler.count",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.data.dir",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hadoop-env/dtnode_heapsize",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.max.transfer.threads",
+          "subsection-name": "subsection-datanode-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "hdfs-site/dfs.namenode.name.dir",
+        "widget": {
+          "type": "directories"
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.namenode.handler.count",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hadoop-env/namenode_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.data.dir",
+        "widget": {
+          "type": "directories"
+        }
+      },
+      {
+        "config": "hadoop-env/dtnode_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.max.transfer.threads",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/widgets.json b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/widgets.json
new file mode 100644
index 0000000..85082d0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/widgets.json
@@ -0,0 +1,644 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_hdfs_dashboard",
+      "display_name": "Standard HDFS Dashboard",
+      "section_name": "HDFS_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "NameNode GC count",
+          "description": "Count of total garbage collections and count of major type garbage collections of the JVM.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.GcCount",
+              "metric_path": "metrics/jvm/gcCount",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep",
+              "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "GC total count",
+              "value": "${jvm.JvmMetrics.GcCount}"
+            },
+            {
+              "name": "GC count of type major collection",
+              "value": "${jvm.JvmMetrics.GcCountConcurrentMarkSweep}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode GC time",
+          "description": "Total time taken by major type garbage collections in milliseconds.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep",
+              "metric_path": "metrics/jvm/GcTimeMillisConcurrentMarkSweep",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "GC time in major collection",
+              "value": "${jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NN Connection Load",
+          "description": "Number of open RPC connections being managed by NameNode.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "rpc.rpc.NumOpenConnections",
+              "metric_path": "metrics/rpc/NumOpenConnections",
+              "category": "",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Open Connections",
+              "value": "${rpc.rpc.NumOpenConnections}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode Heap",
+          "description": "Heap memory committed and Heap memory used with respect to time.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.MemHeapCommittedM",
+              "metric_path": "metrics/jvm/memHeapCommittedM",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "JVM heap committed",
+              "value": "${jvm.JvmMetrics.MemHeapCommittedM}"
+            },
+            {
+              "name": "JVM heap used",
+              "value": "${jvm.JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode Host Load",
+          "description": "Percentage of CPU and Memory resources being consumed on NameNode host.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system",
+              "metric_path": "metrics/cpu/cpu_system",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_user",
+              "metric_path": "metrics/cpu/cpu_user",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_nice",
+              "metric_path": "metrics/cpu/cpu_nice",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_idle",
+              "metric_path": "metrics/cpu/cpu_idle",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system + cpu_user + cpu_nice)/(cpu_system + cpu_user + cpu_nice + cpu_idle + cpu_wio)) * 100}"
+            },
+            {
+              "name": "Memory utilization",
+              "value": "${((mem_total - mem_free - mem_cached)/mem_total) * 100}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        },
+        {
+          "widget_name": "NameNode RPC",
+          "description": "Compares the average time spent for RPC request in a queue and RPC request being processed.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "rpc.rpc.RpcQueueTimeAvgTime",
+              "metric_path": "metrics/rpc/RpcQueueTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "metric_path": "metrics/rpc/RpcProcessingTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "RPC Queue Wait time",
+              "value": "${rpc.rpc.RpcQueueTimeAvgTime}"
+            },
+            {
+              "name": "RPC Processing time",
+              "value": "${rpc.rpc.RpcProcessingTimeAvgTime}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "ms"
+          }
+        },
+        {
+          "widget_name": "NameNode Operations",
+          "description": "Total number of file operation over time.",
+          "widget_type": "GRAPH",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.namenode.TotalFileOps",
+              "metric_path": "metrics/dfs/namenode/TotalFileOps",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "NameNode File Operations",
+              "value": "${dfs.namenode.TotalFileOps}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Failed disk volumes",
+          "description": "Number of Failed disk volumes across all DataNodes. Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "dfs.datanode.VolumeFailures._sum",
+              "metric_path": "metrics/dfs/datanode/VolumeFailures._sum",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Failed disk volumes",
+              "value": "${dfs.datanode.VolumeFailures._sum}"
+            }
+          ],
+          "properties": {
+            "display_unit": ""
+          }
+        },
+        {
+          "widget_name": "Corrupted Blocks",
+          "description": "Number represents data blocks that have become corrupted or missing. Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
+              "metric_path": "metrics/dfs/FSNamesystem/CorruptBlocks",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Corrupted Blocks",
+              "value": "${Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0",
+            "error_threshold": "50"
+          }
+        },
+        {
+          "widget_name": "Under Replicated Blocks",
+          "description": "Number represents file blocks that does not meet the replication factor criteria. Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
+              "metric_path": "metrics/dfs/FSNamesystem/UnderReplicatedBlocks",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Under Replicated Blocks",
+              "value": "${Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0",
+            "error_threshold": "50"
+          }
+        }, 
+        {
+          "widget_name": "DataNode JVM Heap Memory Used",
+          "description": "DataNode JVM Heap Memory Used",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode JVM Heap Memory Used",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "HDFS Space Utilization",
+          "description": "Percentage of available space used in the DFS.",
+          "widget_type": "GAUGE",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.DfsUsed",
+              "metric_path": "metrics/dfs/datanode/DfsUsed",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
+              "metric_path": "metrics/dfs/datanode/Capacity",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Space Utilization",
+              "value": "${FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.DfsUsed/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0.75",
+            "error_threshold": "0.9"
+          }
+        }
+      ]
+    },
+    {
+      "layout_name": "default_hdfs_heatmap",
+      "section_name": "HDFS_HEATMAPS",
+      "display_name": "HDFS Heatmaps",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "HDFS Bytes Read",
+          "default_section_name": "HDFS_HEATMAPS",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesRead",
+              "metric_path": "metrics/dfs/datanode/bytes_read",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Bytes Read",
+              "value": "${dfs.datanode.BytesRead}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "1024"
+          }
+        },
+        {
+          "widget_name": "HDFS Bytes Written",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesWritten",
+              "metric_path": "metrics/dfs/datanode/bytes_written",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Bytes Written",
+              "value": "${dfs.datanode.BytesWritten}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "1024"
+          }
+        },
+        {
+          "widget_name": "DataNode Garbage Collection Time",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis",
+              "metric_path": "metrics/jvm/gcTimeMillis",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Garbage Collection Time",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "max_limit": "10000"
+          }
+        },
+        {
+          "widget_name": "DataNode JVM Heap Memory Used",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode JVM Heap Memory Used",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "DataNode JVM Heap Memory Committed",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM",
+              "metric_path": "metrics/jvm/memHeapCommittedM",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode JVM Heap Memory Committed",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "DataNode Process Disk I/O Utilization",
+          "default_section_name": "HDFS_HEATMAPS",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesRead",
+              "metric_path": "metrics/dfs/datanode/bytes_read",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.BytesWritten",
+              "metric_path": "metrics/dfs/datanode/bytes_written",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.TotalReadTime",
+              "metric_path": "metrics/dfs/datanode/total_read_time",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.TotalWriteTime",
+              "metric_path": "metrics/dfs/datanode/total_write_time",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Process Disk I/O Utilization",
+              "value": "${(((dfs.datanode.BytesRead/dfs.datanode.TotalReadTime)+(dfs.datanode.BytesWritten/dfs.datanode.TotalWriteTime))*50}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "DataNode Process Network I/O Utilization",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.RemoteBytesRead",
+              "metric_path": "metrics/dfs/datanode/remote_bytes_read",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.ReadsFromRemoteClient",
+              "metric_path": "metrics/dfs/datanode/reads_from_remote_client",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.RemoteBytesWritten",
+              "metric_path": "metrics/dfs/datanode/remote_bytes_written",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.WritesFromRemoteClient",
+              "metric_path": "metrics/dfs/datanode/writes_from_remote_client",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Process Network I/O Utilization",
+              "value": "${((dfs.datanode.RemoteBytesRead/dfs.datanode.ReadsFromRemoteClient)+(dfs.datanode.RemoteBytesWritten/dfs.datanode.WritesFromRemoteClient))*50}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "HDFS Space Utilization",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.DfsUsed",
+              "metric_path": "metrics/dfs/datanode/DfsUsed",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
+              "metric_path": "metrics/dfs/datanode/Capacity",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Space Utilization",
+              "value": "${(FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.DfsUsed/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/configuration/hive-env.xml
new file mode 100644
index 0000000..2014c78
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/configuration/hive-env.xml
@@ -0,0 +1,189 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  
+  <!-- hive-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hive-env.sh file</description>
+    <value>
+ if [ "$SERVICE" = "cli" ]; then
+   {% if java_version &lt; 8 %}
+    if [ -z "$DEBUG" ]; then
+      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+    else
+      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+    fi
+  {% else %}
+    if [ -z "$DEBUG" ]; then
+      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit"
+    else
+      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+    fi
+  {% endif %}
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE="{{hive_heapsize}}"
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+
+# Set JAVA HOME
+export JAVA_HOME={{java64_home}}
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{hive_config_dir}}
+
+# Folder containing extra libraries required for hive compilation/execution can be controlled by:
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+elif [ -d "{{hcat_lib}}" ]; then
+  export HIVE_AUX_JARS_PATH={{hcat_lib}}
+fi
+
+# Set HIVE_AUX_JARS_PATH
+export HIVE_AUX_JARS_PATH={{hbase_lib}}/hbase-client.jar,\
+{{hbase_lib}}/hbase-common.jar,\
+{{hbase_lib}}/hbase-hadoop2-compat.jar,\
+{{hbase_lib}}/hbase-prefix-tree.jar,\
+{{hbase_lib}}/hbase-protocol.jar,\
+{{hbase_lib}}/hbase-server.jar,\
+{{hbase_lib}}/htrace-core-3.1.0-incubating.jar,\
+${HIVE_AUX_JARS_PATH}
+
+export METASTORE_PORT={{hive_metastore_port}}
+    </value>
+  </property>
+ 
+  <property>
+    <name>hive_security_authorization</name>
+    <display-name>Choose Authorization</display-name>
+    <value>None</value>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>None</value>
+          <label>None</label>
+        </entry>
+        <entry>
+          <value>SQLStdAuth</value>
+          <label>SQLStdAuth</label>
+        </entry>
+      </entries>
+    </value-attributes>
+  </property>
+  
+  <property>
+    <name>hive_exec_orc_storage_strategy</name>
+    <display-name>ORC Storage Strategy</display-name>
+    <value>SPEED</value>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>SPEED</value>
+          <label>Speed</label>
+        </entry>
+        <entry>
+          <value>COMPRESSION</value>
+          <label>Compression</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive_txn_acid</name>
+    <display-name>ACID Transactions</display-name>
+    <value>off</value>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>on</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>off</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  
+  <property>
+    <name>hive.heapsize</name>
+    <value>1024</value>
+    <description>Hive Java heap size</description>
+    <display-name>HiveServer2 Heap Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>512</minimum>
+      <maximum>2048</maximum>
+      <unit>MB</unit>
+      <increment-step>512</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.client.heapsize</name>
+    <value>512</value>
+    <description>Hive Client Java heap size</description>
+    <display-name>Client Heap Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>512</minimum>
+      <maximum>2048</maximum>
+      <unit>MB</unit>
+      <increment-step>512</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.metastore.heapsize</name>
+    <value>1024</value>
+    <description>Hive Metastore Java heap size</description>
+    <display-name>Metastore Heap Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>512</minimum>
+      <maximum>2048</maximum>
+      <unit>MB</unit>
+      <increment-step>512</increment-step>
+    </value-attributes>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/configuration/hive-site.xml
new file mode 100644
index 0000000..e1a2114
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,338 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<configuration supports_final="true">
+  
+  <property>
+    <name>hive.security.authenticator.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
+    <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
+  </property>
+  
+  <property>
+    <name>hive.security.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>the hive client authorization manager class name.
+    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.server2.enable.doAs</name>
+    <value>true</value>
+    <description>
+      Setting this property to true will have HiveServer2 execute
+      Hive operations as the user making the calls to it.
+    </description>
+    <display-name>Run as end user instead of Hive user</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+ 
+  <property>
+    <name>hive.exec.reducers.bytes.per.reducer</name>
+    <value>256000000</value>
+    <description>size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers.</description>
+    <display-name>Data per Reducer</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>64</minimum>
+      <maximum>4294967296</maximum>
+      <unit>B</unit>
+      <step-increment></step-increment>
+    </value-attributes>
+  </property>
+    <property>
+    <name>hive.security.metastore.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>
+      authorization manager class name to be used in the metastore for authorization.
+      The user defined authorization class should implement interface
+      org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
+    </description>
+     <display-name>Hive Authorization Manager</display-name>
+    <value-attributes>
+      <type>string</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask.size</name>
+    <value>2147483648</value>
+    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
+      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
+      converted to a mapjoin(there is no conditional task). The default is 10MB.
+    </description>
+    <display-name>For Map Join, per Map memory threshold</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1073741824</minimum>
+      <maximum>17179869184</maximum>
+      <unit>B</unit>
+      <increment-step></increment-step>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.support.dynamic.service.discovery</name>
+    <value>false</value>
+    <description>Whether HiveServer2 supports dynamic service discovery for its clients.
+      To support this, each instance of HiveServer2 currently uses ZooKeeper to register itself,
+      when it is brought up. JDBC/ODBC clients should use the ZooKeeper ensemble: hive.zookeeper.quorum
+      in their connection string.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.checkinterval</name>
+    <value>100000</value>
+    <description>Number of entries added to the group by aggregation hash before a recomputation of average entry size is performed.</description>
+  </property>
+ 
+  <property>
+    <name>hive.cbo.enable</name>
+    <value>true</value>
+    <description>Flag to control enabling Cost Based Optimizations using Calcite framework.</description>
+    <display-name>Enable Cost Based Optimizer</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hive.exec.orc.default.stripe.size</name>
+    <value>67108864</value>
+    <description>Define the default ORC stripe size</description>
+    <display-name>Default ORC Stripe Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>8388608</minimum>
+      <maximum>268435456</maximum>
+      <unit>B</unit>
+      <increment-step>8388608</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hive.exec.orc.default.compress</name>
+    <value>ZLIB</value>
+    <description>Define the default compression codec for ORC file</description>
+    <display-name>ORC Compression Algorithm</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>ZLIB</value>
+          <label>zlib Compression Library</label>
+        </entry>
+        <entry>
+          <value>SNAPPY</value>
+          <label>Snappy Compression Library</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+    <property>
+    <name>hive.stats.fetch.column.stats</name>
+    <value>false</value>
+    <description>
+      Annotation of operator tree with statistics information requires column statistics.
+      Column statistics are fetched from metastore. Fetching column statistics for each needed column
+      can be expensive when the number of columns is high. This flag can be used to disable fetching
+      of column statistics from metastore.
+    </description>
+    <display-name>Fetch column stats at compiler</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>cost_based_optimizer</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hive.server2.authentication</name>
+    <description>Authentication mode, default NONE. Options are NONE, NOSASL, KERBEROS, LDAP, PAM and CUSTOM</description>
+    <value>NONE</value>
+    <display-name>HiveServer2 Authentication</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>NONE</value>
+          <label>None</label>
+        </entry>
+        <entry>
+          <value>LDAP</value>
+          <label>LDAP</label>
+        </entry>
+        <entry>
+          <value>KERBEROS</value>
+          <label>Kerberos</label>
+        </entry>
+        <entry>
+          <value>PAM</value>
+          <label>PAM</label>
+        </entry>
+        <entry>
+          <value>CUSTOM</value>
+          <label>Custom</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hive.server2.use.SSL</name>
+    <value>false</value>
+    <description/>
+    <display-name>Use SSL</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hive.execution.engine</name>
+    <value>mr</value>
+    <description>
+      Chooses execution engine. Option is: mr (Map reduce, default) 
+    </description>
+    <display-name>hive.execution.engine</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>mr</value>
+          <label>MapReduce</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+    <property>
+    <name>hive.compactor.initiator.on</name>
+    <value>false</value>
+    <description>Whether to run the compactor's initiator thread in this metastore instance or not. If there is more than one instance of the thrift metastore this should only be set to true for one of them.</description>
+    <display-name>Run Compactor</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hive.compactor.worker.threads</name>
+    <value>0</value>
+    <description>Number of compactor worker threads to run on this metastore instance. Can be different values on different metastore instances.</description>
+    <display-name>Number of threads used by Compactor</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>20</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
+  </property>
+    <property>
+    <name>hive.default.fileformat</name>
+    <value>TextFile</value>
+    <description>Default file format for CREATE TABLE statement.</description>
+    <display-name>Default File Format</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>ORC</value>
+          <description>The Optimized Row Columnar (ORC) file format provides a highly efficient way to store Hive data. It was designed to overcome limitations of the other Hive file formats. Using ORC files improves performance when Hive is reading, writing, and processing data.</description>
+        </entry>
+        <entry>
+          <value>TextFile</value>
+          <description>Text file format saves Hive data as normal text.</description>
+        </entry>
+      </entries>
+    </value-attributes>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/metainfo.xml
new file mode 100644
index 0000000..db7d590
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/metainfo.xml
@@ -0,0 +1,106 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <version>1.2.1</version>
+
+      <components>
+        <component>
+          <name>HIVE_METASTORE</name>
+          <cardinality>1+</cardinality>          
+        </component>
+
+        <component>
+          <name>HIVE_SERVER</name>
+          <cardinality>1+</cardinality>
+        </component>
+        
+        <component>
+          <name>WEBHCAT_SERVER</name>
+          <cardinality>1+</cardinality>          
+        </component>        
+      </components>
+      
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hive_4_1_*</name>
+            </package>
+            <package>
+              <name>hive_4_1_*-hcatalog</name>
+            </package>
+            <package>
+              <name>hive_4_1_*-webhcat</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat6,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat7</osFamily>
+          <packages>
+            <package>
+              <name>mariadb</name>
+            </package>
+            <package>
+              <name>mariadb-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/themes/theme.json b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/themes/theme.json
new file mode 100644
index 0000000..5143782
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/themes/theme.json
@@ -0,0 +1,327 @@
+{
+  "name": "default",
+  "description": "Default theme for HIVE service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-rows": 2,
+              "tab-columns": 2,
+              "sections": [
+                 {
+                  "name": "acid-transactions",
+                  "display-name": "ACID Transactions",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "acid-transactions-row1-col1-1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "security",
+                  "display-name": "Security",
+                  "row-index": "0",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "security-row1-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "optimization",
+                  "display-name": "Optimization",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "2",
+                  "subsections": [
+                    {
+                      "name": "optimization-row1-col1",
+                      "display-name": "Execute Engine",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "optimization-row1-col2",
+                      "display-name": "CBO",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "optimization-row2-col1",
+                      "display-name": "Storage",
+                      "row-index": "1",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "optimization-row2-col2",
+                      "display-name": "Memory",
+                      "row-index": "1",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "hive-site/hive.exec.orc.default.stripe.size",
+          "subsection-name": "optimization-row2-col1"
+        },
+        {
+          "config": "hive-site/hive.exec.orc.default.compress",
+          "subsection-name": "optimization-row2-col1"
+        },
+        {
+          "config": "hive-env/hive_exec_orc_storage_strategy",
+          "subsection-name": "optimization-row2-col1"
+        },
+        {
+          "config": "hive-env/hive.heapsize",
+          "subsection-name": "optimization-row2-col2"
+        },
+        {
+          "config": "hive-env/hive.metastore.heapsize",
+          "subsection-name": "optimization-row2-col2"
+        },
+        {
+          "config": "hive-env/hive.client.heapsize",
+          "subsection-name": "optimization-row2-col2"
+        },
+        {
+          "config": "hive-site/hive.auto.convert.join.noconditionaltask.size",
+          "subsection-name": "optimization-row2-col2"
+        },
+        {
+          "config": "hive-site/hive.exec.reducers.bytes.per.reducer",
+          "subsection-name": "optimization-row2-col2"
+        },
+        {
+          "config": "hive-env/hive_txn_acid",
+          "subsection-name": "acid-transactions-row1-col1-1"
+        },
+        {
+          "config": "hive-site/hive.compactor.initiator.on",
+          "subsection-name": "acid-transactions-row1-col1-1"
+        },
+        {
+          "config": "hive-site/hive.compactor.worker.threads",
+          "subsection-name": "acid-transactions-row1-col1-1"
+        },
+        {
+          "config": "hive-site/hive.execution.engine",
+          "subsection-name": "optimization-row1-col1"
+        },
+        {
+          "config": "hive-site/hive.cbo.enable",
+          "subsection-name": "optimization-row1-col2"
+        },
+        {
+          "config": "hive-site/hive.stats.fetch.column.stats",
+          "subsection-name": "optimization-row1-col2"
+        },
+        {
+          "config": "hive-env/hive_security_authorization",
+          "subsection-name": "security-row1-col1"
+        },
+        {
+          "config": "hive-site/hive.server2.enable.doAs",
+          "subsection-name": "security-row1-col1"
+        },
+        {
+          "config": "hive-site/hive.server2.authentication",
+          "subsection-name": "security-row1-col1"
+        },
+        {
+          "config": "hive-site/hive.server2.use.SSL",
+          "subsection-name": "security-row1-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "hive-site/hive.exec.orc.default.stripe.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-site/hive.exec.orc.default.compress",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hive-env/hive_exec_orc_storage_strategy",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hive-env/hive.heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-env/hive.metastore.heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-env/hive.client.heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-site/hive.auto.convert.join.noconditionaltask.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-site/hive.exec.reducers.bytes.per.reducer",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-env/hive_txn_acid",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hive-site/hive.compactor.initiator.on",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hive-site/hive.compactor.worker.threads",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-site/hive.execution.engine",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hive-site/hive.cbo.enable",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hive-site/hive.stats.fetch.column.stats",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hive-site/hive.server2.enable.doAs",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hive-env/hive_security_authorization",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hive-site/hive.server2.authentication",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hive-site/hive.server2.use.SSL",
+        "widget": {
+          "type": "toggle"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KAFKA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KAFKA/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KAFKA/metainfo.xml
new file mode 100644
index 0000000..8716f2f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KAFKA/metainfo.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KAFKA</name>
+      <version>0.8.2.1</version>
+      <extends>common-services/KAFKA/0.8.1</extends>
+    </service>
+  </services>
+</metainfo>
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KERBEROS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KERBEROS/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KERBEROS/metainfo.xml
new file mode 100644
index 0000000..678cace
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KERBEROS/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KERBEROS</name>
+      <version>1.10.3</version>
+      <extends>common-services/KERBEROS/1.10.3-10</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KNOX/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KNOX/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KNOX/metainfo.xml
new file mode 100644
index 0000000..4c110cc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KNOX/metainfo.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KNOX</name>
+      <displayName>Knox</displayName>
+      <comment>Provides a single point of authentication and access for Apache Hadoop services in a cluster</comment>
+      <version>0.6.0</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>knox_4_1_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>knox_4_1_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/OOZIE/configuration/oozie-site.xml
new file mode 100644
index 0000000..ff82a35
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/OOZIE/configuration/oozie-site.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<configuration supports_final="true">
+    <property>
+        <name>oozie.services</name>
+        <value>
+            org.apache.oozie.service.SchedulerService,
+            org.apache.oozie.service.InstrumentationService,
+            org.apache.oozie.service.MemoryLocksService,
+            org.apache.oozie.service.UUIDService,
+            org.apache.oozie.service.ELService,
+            org.apache.oozie.service.AuthorizationService,
+            org.apache.oozie.service.UserGroupInformationService,
+            org.apache.oozie.service.HadoopAccessorService,
+            org.apache.oozie.service.JobsConcurrencyService,
+            org.apache.oozie.service.URIHandlerService,
+            org.apache.oozie.service.DagXLogInfoService,
+            org.apache.oozie.service.SchemaService,
+            org.apache.oozie.service.LiteWorkflowAppService,
+            org.apache.oozie.service.JPAService,
+            org.apache.oozie.service.StoreService,
+            org.apache.oozie.service.SLAStoreService,
+            org.apache.oozie.service.DBLiteWorkflowStoreService,
+            org.apache.oozie.service.CallbackService,
+            org.apache.oozie.service.ActionService,
+            org.apache.oozie.service.ShareLibService,
+            org.apache.oozie.service.CallableQueueService,
+            org.apache.oozie.service.ActionCheckerService,
+            org.apache.oozie.service.RecoveryService,
+            org.apache.oozie.service.PurgeService,
+            org.apache.oozie.service.CoordinatorEngineService,
+            org.apache.oozie.service.BundleEngineService,
+            org.apache.oozie.service.DagEngineService,
+            org.apache.oozie.service.CoordMaterializeTriggerService,
+            org.apache.oozie.service.StatusTransitService,
+            org.apache.oozie.service.PauseTransitService,
+            org.apache.oozie.service.GroupsService,
+            org.apache.oozie.service.ProxyUserService,
+            org.apache.oozie.service.XLogStreamingService,
+            org.apache.oozie.service.JvmPauseMonitorService,
+            org.apache.oozie.service.SparkConfigurationService
+        </value>
+        <description>
+            All services to be created and managed by Oozie Services singleton.
+            Class names must be separated by commas.
+        </description>
+    </property>	
+</configuration>	
\ No newline at end of file


Mime
View raw message