incubator-ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From smoha...@apache.org
Subject [2/9] AMBARI-2794. Update stack version for 1.2.5. (smohanty)
Date Fri, 02 Aug 2013 00:38:12 GMT
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b7316c60/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-site.xml
new file mode 100644
index 0000000..be9c023
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-site.xml
@@ -0,0 +1,367 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.rootdir</name>
+    <value></value>
+    <description>The directory shared by region servers and into
+    which HBase persists.  The URL should be 'fully-qualified'
+    to include the filesystem scheme.  For example, to specify the
+    HDFS directory '/hbase' where the HDFS instance's namenode is
+    running at namenode.example.org on port 9000, set this value to:
+    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
+    into /tmp.  Change this configuration else all data will be lost
+    on machine restart.
+    </description>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+    <description>The mode the cluster will be in. Possible values are
+      false for standalone mode and true for distributed mode.  If
+      false, startup will run all HBase and ZooKeeper daemons together
+      in the one JVM.
+    </description>
+  </property>
+  <property>
+    <name>hbase.tmp.dir</name>
+    <value></value>
+    <description>Temporary directory on the local filesystem.
+    Change this setting to point to a location more permanent
+    than '/tmp' (The '/tmp' directory is often cleared on
+    machine restart).
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.bindAddress</name>
+    <value></value>
+    <description>The bind address for the HBase Master web UI
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value></value>
+    <description>The port for the HBase Master web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value></value>
+    <description>The port for the HBase RegionServer web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.upperLimit</name>
+    <value></value>
+    <description>Maximum size of all memstores in a region server before new
+      updates are blocked and flushes are forced. Defaults to 40% of heap
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value></value>
+    <description>Count of RPC Listener instances spun up on RegionServers.
+    Same property is used by the Master for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value></value>
+    <description>The time (in miliseconds) between 'major' compactions of all
+    HStoreFiles in a region.  Default: 1 day.
+    Set to 0 to disable automated major compactions.
+    </description>
+  </property>
+  
+  <property>
+    <name>hbase.regionserver.global.memstore.lowerLimit</name>
+    <value></value>
+    <description>When memstores are being forced to flush to make room in
+      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+      This value equal to hbase.regionserver.global.memstore.upperLimit causes
+      the minimum possible flushing to occur when updates are blocked due to
+      memstore limiting.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value></value>
+    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
+    time hbase.hregion.flush.size bytes.  Useful preventing
+    runaway memstore during spikes in update traffic.  Without an
+    upper-bound, memstore fills such that when it flushes the
+    resultant flush files take a long time to compact or split, or
+    worse, we OOME
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.flush.size</name>
+    <value></value>
+    <description>
+    Memstore will be flushed to disk if size of the memstore
+    exceeds this number of bytes.  Value is checked by a thread that runs
+    every hbase.server.thread.wakefrequency.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.mslab.enabled</name>
+    <value></value>
+    <description>
+      Enables the MemStore-Local Allocation Buffer,
+      a feature which works to prevent heap fragmentation under
+      heavy write loads. This can reduce the frequency of stop-the-world
+      GC pauses on large heaps.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value></value>
+    <description>
+    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
+    grown to exceed this value, the hosting HRegion is split in two.
+    Default: 1G.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value></value>
+    <description>Number of rows that will be fetched when calling next
+    on a scanner if it is not served from (local, client) memory. Higher
+    caching values will enable faster scanners but will eat up more memory
+    and some calls of next may take longer and longer times when the cache is empty.
+    Do not set this value such that the time between invocations is greater
+    than the scanner timeout; i.e. hbase.regionserver.lease.period
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout</name>
+    <value>30000</value>
+    <description>ZooKeeper session timeout.
+      HBase passes this to the zk quorum as suggested maximum time for a
+      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
+      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+      "The client sends a requested timeout, the server responds with the
+      timeout that it can give the client. " In milliseconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.keyvalue.maxsize</name>
+    <value></value>
+    <description>Specifies the combined maximum allowed size of a KeyValue
+    instance. This is to set an upper boundary for a single entry saved in a
+    storage file. Since they cannot be split it helps avoiding that a region
+    cannot be split any further because the data is too large. It seems wise
+    to set this to a fraction of the maximum region size. Setting it to zero
+    or less disables the check.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.compactionThreshold</name>
+    <value></value>
+    <description>
+    If more than this number of HStoreFiles in any one HStore
+    (one HStoreFile is written per flush of memstore) then a compaction
+    is run to rewrite all HStoreFiles files as one.  Larger numbers
+    put off compaction but when it runs, it takes longer to complete.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.blockingStoreFiles</name>
+    <value></value>
+    <description>
+    If more than this number of StoreFiles in any one Store
+    (one StoreFile is written per flush of MemStore) then updates are
+    blocked for this HRegion until a compaction is completed, or
+    until hbase.hstore.blockingWaitTime has been exceeded.
+    </description>
+  </property>
+  <property>
+    <name>hfile.block.cache.size</name>
+    <value></value>
+    <description>
+        Percentage of maximum heap (-Xmx setting) to allocate to block cache
+        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
+        Set to 0 to disable but it's not recommended.
+    </description>
+  </property>
+
+  <!-- The following properties configure authentication information for
+       HBase processes when using Kerberos security.  There are no default
+       values, included here for documentation purposes -->
+  <property>
+    <name>hbase.master.keytab.file</name>
+    <value></value>
+    <description>Full path to the kerberos keytab file to use for logging in
+    the configured HMaster server principal.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.kerberos.principal</name>
+    <value></value>
+    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
+    that should be used to run the HMaster process.  The principal name should
+    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
+    portion, it will be replaced with the actual hostname of the running
+    instance.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.keytab.file</name>
+    <value></value>
+    <description>Full path to the kerberos keytab file to use for logging in
+    the configured HRegionServer server principal.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.kerberos.principal</name>
+    <value></value>
+    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
+    that should be used to run the HRegionServer process.  The principal name
+    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
+    hostname portion, it will be replaced with the actual hostname of the
+    running instance.  An entry for this principal must exist in the file
+    specified in hbase.regionserver.keytab.file
+    </description>
+  </property>
+
+  <!-- Additional configuration specific to HBase security -->
+  <property>
+    <name>hbase.superuser</name>
+    <value>hbase</value>
+    <description>List of users or groups (comma-separated), who are allowed
+    full privileges, regardless of stored ACLs, across the cluster.
+    Only used when HBase security is enabled.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.security.authentication</name>
+    <value>simple</value>
+  </property>
+
+  <property>
+    <name>hbase.rpc.engine</name>
+    <value>org.apache.hadoop.hbase.ipc.WritableRpcEngine</value>
+  </property>
+
+  <property>
+    <name>hbase.security.authorization</name>
+    <value>false</value>
+    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.region.classes</name>
+    <value></value>
+    <description>A comma-separated list of Coprocessors that are loaded by
+    default on all tables. For any override coprocessor method, these classes
+    will be called in order. After implementing your own Coprocessor, just put
+    it in HBase's classpath and add the fully qualified class name here.
+    A coprocessor can also be loaded on demand by setting HTableDescriptor.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.master.classes</name>
+    <value></value>
+    <description>A comma-separated list of
+      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+      loaded by default on the active HMaster process. For any implemented
+      coprocessor methods, the listed classes will be called in order. After
+      implementing your own MasterObserver, just put it in HBase's classpath
+      and add the fully qualified class name here.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>2181</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+
+  <!--
+  The following three properties are used together to create the list of
+  host:peer_port:leader_port quorum servers for ZooKeeper.
+  -->
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value></value>
+    <description>Comma separated list of servers in the ZooKeeper Quorum.
+    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+    By default this is set to localhost for local and pseudo-distributed modes
+    of operation. For a fully-distributed setup, this should be set to a full
+    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+    this is the list of servers which we will start/stop ZooKeeper on.
+    </description>
+  </property>
+  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
+
+  <property>
+    <name>dfs.support.append</name>
+    <value></value>
+    <description>Does HDFS allow appends to files?
+    This is an hdfs config. set in here so the hdfs client will do append support.
+    You must ensure that this config. is true serverside too when running hbase
+    (You will have to restart your cluster after setting it).
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.client.read.shortcircuit</name>
+    <value></value>
+    <description>Enable/Disable short circuit read for your client.
+    Hadoop servers should be configured to allow short circuit read
+    for the hbase user for this to take effect
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.client.read.shortcircuit.skip.checksum</name>
+    <value></value>
+    <description>Enable/disbale skipping the checksum check</description>
+  </property>
+  
+  <property>
+    <name>hbase.zookeeper.useMulti</name>
+    <value>true</value>
+    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
+    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
+    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).ยท
+    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
+    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
+    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.znode.parent</name>
+    <value>/hbase-unsecure</value>
+    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
+      files that are configured with a relative path will go under this node.
+      By default, all of HBase's ZooKeeper file path are configured with a
+      relative path, so they will all go under this directory unless changed.
+    </description>
+  </property>
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b7316c60/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..59de58e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/metainfo.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <user>mapred</user>
+    <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
+    <version>0.94.6.1.3.1.0</version>
+
+    <components>
+        <component>
+            <name>HBASE_MASTER</name>
+            <category>MASTER</category>
+        </component>
+
+        <component>
+            <name>HBASE_REGIONSERVER</name>
+            <category>SLAVE</category>
+        </component>
+
+        <component>
+            <name>HBASE_CLIENT</name>
+            <category>CLIENT</category>
+        </component>
+    </components>
+
+</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b7316c60/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HCATALOG/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HCATALOG/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HCATALOG/configuration/global.xml
new file mode 100644
index 0000000..dd89409
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HCATALOG/configuration/global.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hcat_log_dir</name>
+    <value>/var/log/webhcat</value>
+    <description>WebHCat Log Dir.</description>
+  </property>
+  <property>
+    <name>hcat_pid_dir</name>
+    <value>/etc/run/webhcat</value>
+    <description>WebHCat Pid Dir.</description>
+  </property>
+  <property>
+    <name>hcat_user</name>
+    <value>hcat</value>
+    <description>HCat User.</description>
+  </property>
+  <property>
+    <name>webhcat_user</name>
+    <value>hcat</value>
+    <description>WebHCat User.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b7316c60/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HCATALOG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HCATALOG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HCATALOG/metainfo.xml
new file mode 100644
index 0000000..9a3f995
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HCATALOG/metainfo.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <user>root</user>
+    <comment>This is comment for HCATALOG service</comment>
+    <version>0.11.0.1.3.1.0</version>
+
+    <components>
+        <component>
+            <name>HCAT</name>
+            <category>CLIENT</category>
+        </component>
+    </components>
+
+</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b7316c60/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/core-site.xml
new file mode 100644
index 0000000..94ffbbb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/core-site.xml
@@ -0,0 +1,253 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+ <!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+ 
+        http://www.apache.org/licenses/LICENSE-2.0
+ 
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+ 
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- i/o properties -->
+
+  <property>
+    <name>io.file.buffer.size</name>
+    <value>131072</value>
+    <description>The size of buffer for use in sequence files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+  </property>
+
+  <property>
+    <name>io.serializations</name>
+    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+  </property>
+
+  <property>
+    <name>io.compression.codecs</name>
+    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
+    <description>A list of the compression codec classes that can be used
+                 for compression/decompression.</description>
+  </property>
+
+
+  <property>
+    <name>io.compression.codec.lzo.class</name>
+    <value>com.hadoop.compression.lzo.LzoCodec</value>
+    <description>The implementation for lzo codec.</description>
+  </property>
+
+
+<!-- file system properties -->
+
+  <property>
+    <name>fs.default.name</name>
+    <!-- cluster variant -->
+    <value></value>
+    <description>The name of the default file system.  Either the
+  literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>fs.trash.interval</name>
+    <value>360</value>
+    <description>Number of minutes between trash checkpoints.
+  If zero, the trash feature is disabled.
+  </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.dir</name>
+    <value></value>
+    <description>Determines where on the local filesystem the DFS secondary
+        name node should store the temporary images to merge.
+        If this is a comma-delimited list of directories then the image is
+        replicated in all of the directories for redundancy.
+    </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.edits.dir</name>
+    <value>${fs.checkpoint.dir}</value>
+    <description>Determines where on the local filesystem the DFS secondary
+        name node should store the temporary edits to merge.
+        If this is a comma-delimited list of directoires then teh edits is
+        replicated in all of the directoires for redundancy.
+        Default value is same as fs.checkpoint.dir
+    </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.period</name>
+    <value>21600</value>
+    <description>The number of seconds between two periodic checkpoints.
+  </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.size</name>
+    <value>536870912</value>
+    <description>The size of the current edit log (in bytes) that triggers
+       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
+  </description>
+  </property>
+
+  <!-- ipc properties: copied from kryptonite configuration -->
+  <property>
+    <name>ipc.client.idlethreshold</name>
+    <value>8000</value>
+    <description>Defines the threshold number of connections after which
+               connections will be inspected for idleness.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connection.maxidletime</name>
+    <value>30000</value>
+    <description>The maximum time after which a client will bring down the
+               connection to the server.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connect.max.retries</name>
+    <value>50</value>
+    <description>Defines the maximum number of retries for IPC connections.</description>
+  </property>
+
+  <!-- Web Interface Configuration -->
+  <property>
+    <name>webinterface.private.actions</name>
+    <value>false</value>
+    <description> If set to true, the web interfaces of JT and NN may contain
+                actions, such as kill job, delete file, etc., that should
+                not be exposed to public. Enable this option if the interfaces
+                are only reachable by those who have the right authorization.
+  </description>
+  </property>
+
+ <property>
+   <name>hadoop.security.authentication</name>
+   <value></value>
+   <description>
+   Set the authentication for the cluster. Valid values are: simple or
+   kerberos.
+   </description>
+ </property>
+<property>
+  <name>hadoop.security.authorization</name>
+  <value></value>
+  <description>
+     Enable authorization for different protocols.
+  </description>
+</property>
+
+  <property>
+    <name>hadoop.security.auth_to_local</name>
+    <value></value>
+<description>The mapping from kerberos principal names to local OS user names.
+  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+      base     filter    substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+    </description>
+  </property>
+
+<!--
+<property>
+  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").groups</name>
+  <value></value>
+  <description>
+     Proxy group for Hadoop.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").hosts</name>
+  <value></value>
+  <description>
+     Proxy host for Hadoop.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").groups</name>
+  <value></value>
+  <description>
+     Proxy group for Hadoop.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").hosts</name>
+  <value></value>
+  <description>
+     Proxy host for Hadoop.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").groups</name>
+  <value></value>
+  <description>
+    Proxy group for templeton.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").hosts</name>
+  <value></value>
+  <description>
+    Proxy host for templeton.
+  </description>
+</property>
+-->
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b7316c60/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/global.xml
new file mode 100644
index 0000000..f10b9f9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/global.xml
@@ -0,0 +1,187 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>namenode_host</name>
+    <value></value>
+    <description>NameNode Host.</description>
+  </property>
+  <property>
+    <name>dfs_name_dir</name>
+    <value>/hadoop/hdfs/namenode</value>
+    <description>NameNode Directories.</description>
+  </property>
+  <property>
+    <name>snamenode_host</name>
+    <value></value>
+    <description>Secondary NameNode.</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_dir</name>
+    <value>/hadoop/hdfs/namesecondary</value>
+    <description>Secondary NameNode checkpoint dir.</description>
+  </property>
+  <property>
+    <name>datanode_hosts</name>
+    <value></value>
+    <description>List of Datanode Hosts.</description>
+  </property>
+  <property>
+    <name>dfs_data_dir</name>
+    <value>/hadoop/hdfs/data</value>
+    <description>Data directories for Data Nodes.</description>
+  </property>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>dfs_webhdfs_enabled</name>
+    <value>true</value>
+    <description>WebHDFS enabled</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>NameNode new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>640</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>datanode_du_reserved</name>
+    <value>1</value>
+    <description>Reserved space for HDFS</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>dfs_datanode_failed_volume_tolerated</name>
+    <value>0</value>
+    <description>DataNode volumes failure toleration</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_period</name>
+    <value>21600</value>
+    <description>HDFS Maximum Checkpoint Delay</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_size</name>
+    <value>0.5</value>
+    <description>FS Checkpoint Size.</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>dfs_exclude</name>
+    <value></value>
+    <description>HDFS Exclude hosts.</description>
+  </property>
+  <property>
+    <name>dfs_include</name>
+    <value></value>
+    <description>HDFS Include hosts.</description>
+  </property>
+  <property>
+    <name>dfs_replication</name>
+    <value>3</value>
+    <description>Default Block Replication.</description>
+  </property>
+  <property>
+    <name>dfs_block_local_path_access_user</name>
+    <value>hbase</value>
+    <description>Default Block Replication.</description>
+  </property>
+  <property>
+    <name>dfs_datanode_address</name>
+    <value>50010</value>
+    <description>Port for datanode address.</description>
+  </property>
+  <property>
+    <name>dfs_datanode_http_address</name>
+    <value>50075</value>
+    <description>Port for datanode address.</description>
+  </property>
+  <property>
+    <name>dfs_datanode_data_dir_perm</name>
+    <value>750</value>
+    <description>Datanode dir perms.</description>
+  </property>
+
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>kadmin_pw</name>
+    <value></value>
+    <description>Kerberos realm admin password</description>
+  </property>
+  <property>
+    <name>keytab_path</name>
+    <value>/etc/security/keytabs</value>
+    <description>Kerberos keytab path.</description>
+  </property>
+
+    <property>
+    <name>namenode_formatted_mark_dir</name>
+    <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
+    <description>Formatteed Mark Directory.</description>
+  </property>
+    <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User and Groups.</description>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b7316c60/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hadoop-policy.xml
new file mode 100644
index 0000000..900da99
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hadoop-policy.xml
@@ -0,0 +1,134 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientProtocol, which is used by user code
+    via the DistributedFileSystem.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+    for block recovery.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DatanodeProtocol, which is used by datanodes to
+    communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+    for updating generation timestamp.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.namenode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NamenodeProtocol, the protocol used by the secondary
+    namenode to communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.tracker.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
+    communicate with the jobtracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.submission.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for JobSubmissionProtocol, used by job clients to
+    communciate with the jobtracker for job submission, querying job status etc.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.task.umbilical.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+    tasks to communicate with the parent tasktracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+ <property>
+    <name>security.admin.operations.protocol.acl</name>
+    <value></value>
+    <description>ACL for AdminOperationsProtocol. Used for admin commands.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
+    <value></value>
+    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+    users mappings. The ACL is a comma-separated list of user and
+    group names. The user and group list is separated by a blank. For
+    e.g. "alice,bob users,wheel".  A special value of "*" means all
+    users are allowed.</description>
+  </property>
+
+<property>
+    <name>security.refresh.policy.protocol.acl</name>
+    <value></value>
+    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+    dfsadmin and mradmin commands to refresh the security policy in-effect.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b7316c60/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..6246def
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,446 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+  <property>
+    <name>dfs.name.dir</name>
+    <!-- cluster variant -->
+    <value></value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value></value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value></value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+
+ <property>
+    <name>dfs.datanode.socket.write.timeout</name>
+    <value>0</value>
+    <description>DFS Client write socket timeout</description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value></value>
+    <description>#of failed disks dn would tolerate</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.block.local-path-access.user</name>
+    <value></value>
+    <description>the user who is allowed to perform short
+    circuit reads.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value></value>
+    <description>Determines where on the local filesystem an DFS data node
+  should store its blocks.  If this is a comma-delimited
+  list of directories, then data will be stored in all named
+  directories, typically on different devices.
+  Directories that do not exist are ignored.
+  </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value></value>
+    <description>Names a file that contains a list of hosts that are
+    not permitted to connect to the namenode.  The full pathname of the
+    file must be specified.  If the value is empty, no hosts are
+    excluded.</description>
+  </property>
+
+  <property>
+    <name>dfs.hosts</name>
+    <value></value>
+    <description>Names a file that contains a list of hosts that are
+    permitted to connect to the namenode. The full pathname of the file
+    must be specified.  If the value is empty, all hosts are
+    permitted.</description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value></value>
+    <description>Default block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.safemode.threshold.pct</name>
+    <value>1.0f</value>
+    <description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
+  </property>
+
+  <property>
+    <name>dfs.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value></value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value></value>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.http.address</name>
+    <value></value>
+<description>The name of the default file system.  Either the
+literal string "local" or a host:port for NDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value></value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.datanode.du.pct</name>
+<value>0.85f</value>
+<description>When calculating remaining space, only use this percentage of the real available space
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>4096</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value></value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value></value>
+    <description>
+        Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+<!--
+  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value></value>
+     <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value></value>
+    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.secondary.http.address</name>
+    <value></value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50490</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value></value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value></value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value></value>
+ <description>
+        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value></value>
+ <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value></value>
+  <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value></value>
+ <description>
+        The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.https.address</name>
+    <value></value>
+  <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value></value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+  <name>dfs.access.time.precision</name>
+  <value>0</value>
+  <description>The access time for HDFS file is precise upto this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+  </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+  <name>ipc.server.read.threadpool.size</name>
+  <value>5</value>
+  <description></description>
+</property>
+
+<property>
+  <name>dfs.datanode.failed.volumes.tolerated</name>
+  <value>0</value>
+  <description>Number of failed disks datanode would tolerate</description>
+</property>
+
+  <property>
+    <name>dfs.namenode.avoid.read.stale.datanode</name>
+    <value>true</value>
+    <description>
+      Indicate whether or not to avoid reading from stale datanodes whose
+      heartbeat messages have not been received by the namenode for more than a
+      specified time interval.
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.avoid.write.stale.datanode</name>
+    <value>true</value>
+    <description>
+      Indicate whether or not to avoid writing to stale datanodes whose
+      heartbeat messages have not been received by the namenode for more than a
+      specified time interval.
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.write.stale.datanode.ratio</name>
+    <value>1.0f</value>
+    <description>When the ratio of number stale datanodes to total datanodes marked is greater
+      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.stale.datanode.interval</name>
+    <value>30000</value>
+    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b7316c60/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..8b61e0a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/metainfo.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <user>root</user>
+    <comment>Apache Hadoop Distributed File System</comment>
+    <version>1.2.0.1.3.1.0</version>
+
+    <components>
+        <component>
+            <name>NAMENODE</name>
+            <category>MASTER</category>
+        </component>
+
+        <component>
+            <name>DATANODE</name>
+            <category>SLAVE</category>
+        </component>
+
+        <component>
+            <name>SECONDARY_NAMENODE</name>
+            <category>MASTER</category>
+        </component>
+
+        <component>
+            <name>HDFS_CLIENT</name>
+            <category>CLIENT</category>
+        </component>
+    </components>
+
+
+</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b7316c60/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/global.xml
new file mode 100644
index 0000000..d9adc80
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/global.xml
@@ -0,0 +1,125 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hivemetastore_host</name>
+    <value></value>
+    <description>Hive Metastore host.</description>
+  </property>
+  <property>
+    <name>hive_database</name>
+    <value></value>
+    <description>Hive database name.</description>
+  </property>
+  <property>
+    <name>hive_existing_mysql_database</name>
+    <value></value>
+    <description>Hive database name.</description>
+  </property>
+  <property>
+    <name>hive_existing_mysql_host</name>
+    <value></value>
+    <description></description>
+  </property>
+  <property>
+    <name>hive_existing_oracle_database</name>
+    <value></value>
+    <description>Hive database name.</description>
+  </property>
+  <property>
+    <name>hive_existing_oracle_host</name>
+    <value></value>
+    <description></description>
+  </property>
+  <property>
+    <name>hive_ambari_database</name>
+    <value>MySQL</value>
+    <description>Database type.</description>
+  </property>  
+  <property>
+    <name>hive_ambari_host</name>
+    <value></value>
+    <description>Database hostname.</description>
+  </property>
+  <property>
+    <name>hive_database_name</name>
+    <value></value>
+    <description>Database hname</description>
+  </property>    
+  <property>
+    <name>hive_metastore_user_name</name>
+    <value>hive</value>
+    <description>Database username to use to connect to the database.</description>
+  </property>    
+  <property>
+    <name>hive_metastore_user_passwd</name>
+    <value></value>
+    <description>Database password to use to connect to the database.</description>
+  </property>    
+  <property>
+    <name>hive_metastore_port</name>
+    <value>9083</value>
+    <description>Hive Metastore port.</description>
+  </property>    
+  <property>
+    <name>hive_lib</name>
+    <value>/usr/lib/hive/lib/</value>
+    <description>Hive Library.</description>
+  </property>    
+  <property>
+    <name>hive_dbroot</name>
+    <value>/usr/lib/hive/lib/</value>
+    <description>Hive DB Directory.</description>
+  </property>      
+  <property>
+    <name>hive_conf_dir</name>
+    <value>/etc/hive/conf</value>
+    <description>Hive Conf Dir.</description>
+  </property>
+  <property>
+    <name>hive_log_dir</name>
+    <value>/var/log/hive</value>
+    <description>Directory for Hive Log files.</description>
+  </property>
+  <property>
+    <name>hive_pid_dir</name>
+    <value>/var/run/hive</value>
+    <description>Hive PID Dir.</description>
+  </property>
+  <property>
+    <name>mysql_connector_url</name>
+    <value>${download_url}/mysql-connector-java-5.1.18.zip</value>
+    <description>Hive PID Dir.</description>
+  </property>
+  <property>
+    <name>hive_aux_jars_path</name>
+    <value>/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar</value>
+    <description>Hive auxiliary jar path.</description>
+  </property>
+  <property>
+    <name>hive_user</name>
+    <value>hive</value>
+    <description>Hive User.</description>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b7316c60/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/hive-site.xml
new file mode 100644
index 0000000..40fa0a7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,243 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<configuration>
+  <property>
+    <name>hive.metastore.local</name>
+    <value>false</value>
+    <description>controls whether to connect to remove metastore server or
+    open a new metastore server in Hive Client JVM</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionURL</name>
+    <value></value>
+    <description>JDBC connect string for a JDBC metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionDriverName</name>
+    <value>com.mysql.jdbc.Driver</value>
+    <description>Driver class name for a JDBC metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionUserName</name>
+    <value></value>
+    <description>username to use against metastore database</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionPassword</name>
+    <value></value>
+    <description>password to use against metastore database</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.warehouse.dir</name>
+    <value>/apps/hive/warehouse</value>
+    <description>location of default database for the warehouse</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.sasl.enabled</name>
+    <value></value>
+    <description>If true, the metastore thrift interface will be secured with SASL.
+     Clients must authenticate with Kerberos.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.keytab.file</name>
+    <value></value>
+    <description>The path to the Kerberos Keytab file containing the metastore
+     thrift server's service principal.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.principal</name>
+    <value></value>
+    <description>The service principal for the metastore thrift server. The special
+    string _HOST will be replaced automatically with the correct host name.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.cache.pinobjtypes</name>
+    <value>Table,Database,Type,FieldSchema,Order</value>
+    <description>List of comma separated metastore object types that should be pinned in the cache</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.uris</name>
+    <value></value>
+    <description>URI for client to contact metastore server</description>
+  </property>
+
+  <property>
+    <name>hive.semantic.analyzer.factory.impl</name>
+    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
+    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
+  </property>
+
+  <property>
+    <name>hadoop.clientside.fs.operations</name>
+    <value>true</value>
+    <description>FS operations are owned by client</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.client.socket.timeout</name>
+    <value>60</value>
+    <description>MetaStore Client socket timeout in seconds</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.execute.setugi</name>
+    <value>true</value>
+    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.enabled</name>
+    <value>true</value>
+    <description>enable or disable the hive client authorization</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.manager</name>
+    <value>org.apache.hcatalog.security.HdfsAuthorizationProvider</value>
+    <description>the hive client authorization manager class name.
+    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.server2.enable.doAs</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.hdfs.impl.disable.cache</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.file.impl.disable.cache</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.enforce.bucketing</name>
+    <value>true</value>
+    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
+  </property>
+
+  <property>
+    <name>hive.enforce.sorting</name>
+    <value>true</value>
+    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr</name>
+    <value>true</value>
+    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.mapred.reduce.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>Whether speculative execution for reducers should be turned on.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join</name>
+    <value>true</value>
+    <description>Whether Hive enable the optimization about converting common
+      join into mapjoin based on the input file size.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join</name>
+    <value>true</value>
+    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
+      the criteria for sort-merge join.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask</name>
+    <value>true</value>
+    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
+      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
+      specified size, the join is directly converted to a mapjoin (there is no conditional task).
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask.size</name>
+    <value>1000000000</value>
+    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
+      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
+      converted to a mapjoin(there is no conditional task). The default is 10MB.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication.min.reducer</name>
+    <value>1</value>
+    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
+      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
+      The optimization will be disabled if number of reducers is less than specified value.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.mapjoin.mapreduce</name>
+    <value>true</value>
+    <description>If hive.auto.convert.join is off, this parameter does not take
+      affect. If it is on, and if there are map-join jobs followed by a map-reduce
+      job (for e.g a group by), each map-only job is merged with the following
+      map-reduce job.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.mapjoin.bucket.cache.size</name>
+    <value>10000</value>
+    <description>
+      Size per reducer.The default is 1G, i.e if the input size is 10G, it
+      will use 10 reducers.
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b7316c60/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/metainfo.xml
new file mode 100644
index 0000000..b185e13
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/metainfo.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <user>root</user>
+    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
+    <version>0.11.0.1.3.1.0</version>
+
+    <components>        
+        <component>
+            <name>HIVE_METASTORE</name>
+            <category>MASTER</category>
+        </component>
+        <component>
+            <name>HIVE_SERVER</name>
+            <category>MASTER</category>
+        </component>
+        <component>
+            <name>MYSQL_SERVER</name>
+            <category>MASTER</category>
+        </component>
+        <component>
+            <name>HIVE_CLIENT</name>
+            <category>CLIENT</category>
+        </component>
+    </components>
+
+
+</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b7316c60/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/configuration/global.xml
new file mode 100644
index 0000000..c49480f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/configuration/global.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hue_pid_dir</name>
+    <value>/var/run/hue</value>
+    <description>Hue Pid Dir.</description>
+  </property>
+  <property>
+    <name>hue_log_dir</name>
+    <value>/var/log/hue</value>
+    <description>Hue Log Dir.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b7316c60/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/configuration/hue-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/configuration/hue-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/configuration/hue-site.xml
new file mode 100644
index 0000000..6eb52a2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/configuration/hue-site.xml
@@ -0,0 +1,290 @@
+<?xml version="1.0"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more# Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with# contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.# this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0# The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with# (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at# the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0#     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software# Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,# distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and# See the License for the specific language governing permissions and
+   limitations under the License.# limitations under the License.
+-->
+
+<configuration>
+  <!-- General Hue server configuration properties -->
+  <property>
+      <name>send_debug_messages</name>
+      <value>1</value>
+      <description></description>
+  </property>
+
+  <property>
+    <name>database_logging</name>
+    <value>0</value>
+    <description>To show database transactions, set database_logging to 1.
+      default, database_logging=0</description>
+  </property>
+
+  <property>
+    <name>secret_key</name>
+    <value></value>
+    <description>This is used for secure hashing in the session store.</description>
+  </property>
+
+  <property>
+    <name>http_host</name>
+    <value>0.0.0.0</value>
+    <description>Webserver listens on this address and port</description>
+  </property>
+
+  <property>
+    <name>http_port</name>
+    <value>8000</value>
+    <description>Webserver listens on this address and port</description>
+  </property>
+
+  <property>
+    <name>time_zone</name>
+    <value>America/Los_Angeles</value>
+    <description>Time zone name</description>
+  </property>
+
+  <property>
+    <name>django_debug_mode</name>
+    <value>1</value>
+    <description>Turn off debug</description>
+  </property>
+
+  <property>
+    <name>use_cherrypy_server</name>
+    <value>false</value>
+    <description>Set to true to use CherryPy as the webserver, set to false
+      to use Spawning as the webserver. Defaults to Spawning if
+      key is not specified.</description>
+  </property>
+
+  <property>
+    <name>http_500_debug_mode</name>
+    <value>1</value>
+    <description>Turn off backtrace for server error</description>
+  </property>
+
+  <property>
+    <name>server_user</name>
+    <value></value>
+    <description>Webserver runs as this user</description>
+  </property>
+
+  <property>
+    <name>server_group</name>
+    <value></value>
+    <description>Webserver runs as this user</description>
+  </property>
+
+  <property>
+    <name>backend_auth_policy</name>
+    <value>desktop.auth.backend.AllowAllBackend</value>
+    <description>Authentication backend.</description>
+  </property>
+
+  <!-- Hue Database configuration properties -->
+  <property>
+    <name>db_engine</name>
+    <value>mysql</value>
+    <description>Configuration options for specifying the Desktop Database.</description>
+  </property>
+
+  <property>
+    <name>db_host</name>
+    <value>localhost</value>
+    <description>Configuration options for specifying the Desktop Database.</description>
+  </property>
+
+  <property>
+    <name>db_port</name>
+    <value>3306</value>
+    <description>Configuration options for specifying the Desktop Database.</description>
+  </property>
+
+  <property>
+    <name>db_user</name>
+    <value>sandbox</value>
+    <description>Configuration options for specifying the Desktop Database.</description>
+  </property>
+
+  <property>
+    <name>db_password</name>
+    <value>1111</value>
+    <description>Configuration options for specifying the Desktop Database.</description>
+  </property>
+
+  <property>
+    <name>db_name</name>
+    <value>sandbox</value>
+    <description>Configuration options for specifying the Desktop Database.</description>
+  </property>
+
+  <!-- Hue Email configuration properties -->
+  <property>
+    <name>smtp_host</name>
+    <value>localhost</value>
+    <description>The SMTP server information for email notification delivery.</description>
+  </property>
+
+  <property>
+    <name>smtp_port</name>
+    <value>25</value>
+    <description>The SMTP server information for email notification delivery.</description>
+  </property>
+
+  <property>
+    <name>smtp_user</name>
+    <value></value>
+    <description>The SMTP server information for email notification delivery.</description>
+  </property>
+
+  <property>
+    <name>smtp_password</name>
+    <value>25</value>
+    <description>The SMTP server information for email notification delivery.</description>
+  </property>
+
+  <property>
+    <name>tls</name>
+    <value>no</value>
+    <description>Whether to use a TLS (secure) connection when talking to the SMTP server.</description>
+  </property>
+
+  <property>
+    <name>default_from_email</name>
+    <value>sandbox@hortonworks.com</value>
+    <description>The SMTP server information for email notification delivery.</description>
+  </property>
+
+  <!-- Hue Hadoop configuration properties -->
+  <property>
+    <name>fs_defaultfs</name>
+    <value></value>
+    <description>Enter the filesystem uri. E.g
+      .:hdfs://sandbox:8020</description>
+  </property>
+
+  <property>
+    <name>webhdfs_url</name>
+    <value></value>
+    <description>Use WebHdfs/HttpFs as the communication mechanism. To fallback to
+      using the Thrift plugin (used in Hue 1.x), this must be uncommented
+      and explicitly set to the empty value.
+      Value e.g.: http://localhost:50070/webhdfs/v1/</description>
+  </property>
+
+  <property>
+    <name>jobtracker_host</name>
+    <value></value>
+    <description>Enter the host on which you are running the Hadoop JobTracker.</description>
+  </property>
+
+  <property>
+    <name>jobtracker_port</name>
+    <value>50030</value>
+    <description>The port where the JobTracker IPC listens on.</description>
+  </property>
+
+  <property>
+    <name>hadoop_mapred_home</name>
+    <value>/usr/lib/hadoop/lib</value>
+    <description>The SMTP server information for email notification delivery.</description>
+  </property>
+
+  <property>
+    <name>resourcemanager_host</name>
+    <value></value>
+    <description>Enter the host on which you are running the ResourceManager.</description>
+  </property>
+
+  <property>
+    <name>resourcemanager_port</name>
+    <value></value>
+    <description>The port where the ResourceManager IPC listens on.</description>
+  </property>
+
+  <!-- Hue Beeswax configuration properties -->
+  <property>
+    <name>hive_home_dir</name>
+    <value></value>
+    <description>Hive home directory.</description>
+  </property>
+
+  <property>
+    <name>hive_conf_dir</name>
+    <value></value>
+    <description>Hive configuration directory, where hive-site.xml is
+      located.</description>
+  </property>
+
+  <property>
+    <name>templeton_url</name>
+    <value></value>
+    <description>WebHcat http URL</description>
+  </property>
+
+  <!-- Hue shell types configuration -->
+  <property>
+    <name>pig_nice_name</name>
+    <value></value>
+    <description>Define and configure a new shell type pig</description>
+  </property>
+
+  <property>
+    <name>pig_shell_command</name>
+    <value>/usr/bin/pig -l /dev/null</value>
+    <description>Define and configure a new shell type pig.</description>
+  </property>
+
+  <property>
+    <name>pig_java_home</name>
+    <value></value>
+    <description>Define and configure a new shell type pig.</description>
+  </property>
+
+  <property>
+    <name>hbase_nice_name</name>
+    <value>HBase Shell</value>
+    <description>Define and configure a new shell type hbase</description>
+  </property>
+
+  <property>
+    <name>hbase_shell_command</name>
+    <value>/usr/bin/hbase shell</value>
+    <description>Define and configure a new shell type hbase.</description>
+  </property>
+
+  <property>
+    <name>bash_nice_name</name>
+    <value></value>
+    <description>Define and configure a new shell type bash for testing
+      only</description>
+  </property>
+
+  <property>
+    <name>bash_shell_command</name>
+    <value>/bin/bash</value>
+    <description>Define and configure a new shell type bash for testing only
+      .</description>
+  </property>
+
+  <!-- Hue Settings for the User Admin application -->
+  <property>
+    <name>whitelist</name>
+    <value>(localhost|127\.0\.0\.1):(50030|50070|50060|50075|50111)</value>
+    <description>proxy settings</description>
+  </property>
+
+</configuration>
\ No newline at end of file


Mime
View raw message