ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dmitriu...@apache.org
Subject [48/70] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation. Change defaults (dlysnichenko)
Date Thu, 09 Jun 2016 15:02:13 GMT
http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/TEZ/configuration/tez-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/TEZ/configuration/tez-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/TEZ/configuration/tez-site.xml
index aa53f30..d719322 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/TEZ/configuration/tez-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/TEZ/configuration/tez-site.xml
@@ -24,25 +24,25 @@
       Specifying a single .tar.gz or .tgz assumes that a compressed version of the tez libs is being used. This is uncompressed into a tezlibs directory when running containers, and tezlibs/;tezlibs/lib/ are added to the classpath (after . and .*).
       If multiple files are specified - files are localized as regular files, contents of directories are localized as regular files (non-recursive).
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.cluster.additional.classpath.prefix</name>
     <value>/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.log.level</name>
     <value>INFO</value>
     <description>Root Logging level passed to the Tez app master</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.generate.debug.artifacts</name>
     <value>false</value>
     <description>Generate debug artifacts such as a text representation of the submitted DAG plan</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.resource.memory.mb</name>
@@ -50,7 +50,7 @@
     <description>The amount of memory to be used by the AppMaster.
       Used only if the value is not specified explicitly by the DAG definition.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.launch.cmd-opts</name>
@@ -58,13 +58,13 @@
     <description>Java options for the Tez AppMaster process. The Xmx value is derived based on tez.am.resource.memory.mb and is 80% of the value by default.
       Used only if the value is not specified explicitly by the DAG definition.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.launch.cluster-default.cmd-opts</name>
     <value>-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
     <description>Cluster default Java options for the Tez AppMaster process. These will be prepended to the properties specified via tez.am.launch.cmd-opts</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.launch.env</name>
@@ -74,7 +74,7 @@
         you want to have access to native libraries.
       Used only if the value is not specified explicitly by the DAG definition.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.task.resource.memory.mb</name>
@@ -82,7 +82,7 @@
     <description>The amount of memory to be used by launched tasks.
       Used only if the value is not specified explicitly by the DAG definition.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.task.launch.cmd-opts</name>
@@ -90,13 +90,13 @@
     <description>Java options for tasks. The Xmx value is derived based on tez.task.resource.memory.mb and is 80% of this value by default.
       Used only if the value is not specified explicitly by the DAG definition.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.task.launch.cluster-default.cmd-opts</name>
     <value>-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
     <description>Cluster default Java options for tasks. These will be prepended to the properties specified via tez.task.launch.cmd-opts</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.task.launch.env</name>
@@ -106,7 +106,7 @@
       you want to have access to native libraries.
       Used only if the value is not specified explicitly by the DAG definition.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.shuffle-vertex-manager.min-src-fraction</name>
@@ -114,7 +114,7 @@
     <description>In case of a ScatterGather connection, the fraction of source tasks which should
       complete before tasks for the current vertex are schedule
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.shuffle-vertex-manager.max-src-fraction</name>
@@ -123,13 +123,13 @@
       completed, all tasks on the current vertex can be scheduled. Number of tasks ready for
       scheduling on the current vertex scales linearly between min-fraction and max-fraction
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.am-rm.heartbeat.interval-ms.max</name>
     <value>250</value>
     <description>The maximum heartbeat interval between the AM and RM in milliseconds</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.grouping.split-waves</name>
@@ -138,7 +138,7 @@
       a Vertex. 1.7 with 100% queue available implies generating a number of tasks roughly equal
       to 170% of the available containers on the queue
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.grouping.min-size</name>
@@ -146,7 +146,7 @@
     <description>Lower bound on the size (in bytes) of a grouped split, to avoid generating
       too many splits
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.grouping.max-size</name>
@@ -154,39 +154,39 @@
     <description>Upper bound on the size (in bytes) of a grouped split, to avoid generating
       excessively large split
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.container.reuse.enabled</name>
     <value>true</value>
     <description>Configuration to specify whether container should be reused</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.container.reuse.rack-fallback.enabled</name>
     <value>true</value>
     <description>Whether to reuse containers for rack local tasks. Active only if reuse is enabled
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.container.reuse.non-local-fallback.enabled</name>
     <value>false</value>
     <description>Whether to reuse containers for non-local tasks. Active only if reuse is enabled
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.container.idle.release-timeout-min.millis</name>
     <value>10000</value>
     <description>The minimum amount of time to hold on to a container that is idle. Only active when reuse is enabled.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.container.idle.release-timeout-max.millis</name>
     <value>20000</value>
     <description>The maximum amount of time to hold on to a container if no task can be assigned to it immediately. Only active when reuse is enabled.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.container.reuse.locality.delay-allocation-millis</name>
@@ -194,13 +194,13 @@
     <description>The amount of time to wait before assigning a container to the next level of
       locality. NODE -&gt; RACK -&gt; NON_LOCAL
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.max.app.attempts</name>
     <value>2</value>
     <description>Specifies the total number of time the app master will run in case recovery is triggered</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.maxtaskfailures.per.node</name>
@@ -208,13 +208,13 @@
     <description>The maximum number of allowed task attempt failures on a node before
       it gets marked as blacklisted
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.task.am.heartbeat.counter.interval-ms.max</name>
     <value>4000</value>
     <description>Time interval at which task counters are sent to the AM</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.task.get-task.sleep.interval-ms.max</name>
@@ -222,13 +222,13 @@
     <description>The maximum amount of time, in seconds, to wait before a task asks an AM for
       another task
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.task.max-events-per-heartbeat</name>
     <value>500</value>
     <description>Maximum number of of events to fetch from the AM by the tasks in a single heartbeat.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.session.client.timeout.secs</name>
@@ -236,7 +236,7 @@
     <description>Time (in seconds) to wait for AM to come up when trying to submit a DAG from
       the client
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.session.am.dag.submit.timeout.secs</name>
@@ -244,19 +244,19 @@
     <description>Time (in seconds) for which the Tez AM should wait for a DAG to be submitted
       before shutting down
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.counters.max</name>
     <value>2000</value>
     <description>The number of allowed counters for the executing DAG</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.counters.max.groups</name>
     <value>1000</value>
     <description>The number of allowed counter groups for the executing DAG</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- Configuration for runtime components -->
   <!-- These properties can be set on a per edge basis by configuring the payload for each
@@ -265,7 +265,7 @@
     <name>tez.runtime.compress</name>
     <value>true</value>
     <description>Whether intermediate data should be compressed or not</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.runtime.compress.codec</name>
@@ -273,19 +273,19 @@
     <description>The coded to be used if compressing intermediate data. Only
       applicable if tez.runtime.compress is enabled
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.runtime.io.sort.mb</name>
     <value>512</value>
     <description>The size of the sort buffer when output needs to be sorted</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.runtime.unordered.output.buffer.size-mb</name>
     <value>100</value>
     <description>The size of the buffer when output does not require to be sorted</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.history.logging.service.class</name>
@@ -294,6 +294,6 @@
       Set to org.apache.tez.dag.history.logging.ats.ATSHistoryLoggingService to log to ATS
       Set to org.apache.tez.dag.history.logging.impl.SimpleHistoryLoggingService to log to the filesystem specified by ${fs.defaultFS}
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration-mapred/mapred-site.xml
index 9e8ba0a..cc04c63 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration-mapred/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration-mapred/mapred-site.xml
@@ -23,7 +23,7 @@
     <deleted>true</deleted>
     <value/>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.application.classpath</name>
@@ -33,64 +33,64 @@
     <description>
       CLASSPATH for MR applications. A comma-separated list of CLASSPATH entries.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.application.framework.path</name>
     <deleted>true</deleted>
     <value/>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.admin-command-opts</name>
     <deleted>true</deleted>
     <value/>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.admin.map.child.java.opts</name>
     <deleted>true</deleted>
     <value/>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.admin.reduce.child.java.opts</name>
     <deleted>true</deleted>
     <value/>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.shuffle.fetch.retry.enabled</name>
     <value>1</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.shuffle.fetch.retry.interval-ms</name>
     <value>1000</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.shuffle.fetch.retry.timeout-ms</name>
     <value>30000</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.job.emit-timeline-data</name>
     <value>false</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.bind-host</name>
     <value>0.0.0.0</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/capacity-scheduler.xml
index 1cfae9d..6a957c0 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/capacity-scheduler.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/capacity-scheduler.xml
@@ -34,30 +34,30 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.accessible-node-labels</name>
     <value>*</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.accessible-node-labels.default.capacity</name>
     <value>-1</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity</name>
     <value>-1</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default-node-label-expression</name>
     <value> </value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-env.xml
index ec7946b..858264f 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-env.xml
@@ -39,6 +39,6 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-site.xml
index c8b339d..62c5693 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-site.xml
@@ -22,7 +22,7 @@
     <name>yarn.application.classpath</name>
     <value>$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*</value>
     <description>Classpath for typical applications.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.registry.rm.enabled</name>
@@ -30,7 +30,7 @@
     <description>
       Is the registry enabled: does the RM start it up, create the user and system paths, and purge service records when containers, application attempts and applications complete
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.registry.zk.quorum</name>
@@ -38,13 +38,13 @@
     <description>
       List of hostname:port pairs defining the zookeeper quorum binding for the registry
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.recovery.enabled</name>
     <value>true</value>
     <description>Enable the node manager to recover after starting</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.recovery.dir</name>
@@ -53,19 +53,19 @@
       The local filesystem directory in which the node manager will store
       state when recovery is enabled.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.client.nodemanager-connect.retry-interval-ms</name>
     <value>10000</value>
     <description>Time interval between each attempt to connect to NM</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.client.nodemanager-connect.max-wait-ms</name>
     <value>60000</value>
     <description>Max time to wait to establish a connection to NM</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.recovery.enabled</name>
@@ -74,7 +74,7 @@
       Enable RM to recover state after starting.
       If true, then yarn.resourcemanager.store.class must be specified.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
@@ -86,7 +86,7 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.store.class</name>
@@ -97,7 +97,7 @@
       the store is implicitly fenced; meaning a single ResourceManager
       is able to use the store at any point in time.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-address</name>
@@ -105,37 +105,37 @@
     <description>
       List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-state-store.parent-path</name>
     <value>/rmstore</value>
     <description>Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-acl</name>
     <value>world:anyone:rwcda</value>
     <description>ACL's to be used for ZooKeeper znodes.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
     <value>10000</value>
     <description>Set the amount of time RM waits before allocating new containers on work-preserving-recovery. Such wait period gives RM a chance to settle down resyncing with NMs in the cluster on recovery, before assigning new containers to applications.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.connect.retry-interval.ms</name>
     <value>30000</value>
     <description>How often to try connecting to the ResourceManager.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.connect.max-wait.ms</name>
     <value>900000</value>
     <description>Maximum time to wait to establish connection to ResourceManager</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-retry-interval-ms</name>
@@ -145,67 +145,67 @@
       automatically from yarn.resourcemanager.zk-timeout-ms and
       yarn.resourcemanager.zk-num-retries."
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-num-retries</name>
     <value>1000</value>
     <description>Number of times RM tries to connect to ZooKeeper.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-timeout-ms</name>
     <value>10000</value>
     <description>ZooKeeper session timeout in milliseconds. Session expiration is managed by the ZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat).</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.state-store.max-completed-applications</name>
     <value>${yarn.resourcemanager.max-completed-applications}</value>
     <description>The maximum number of completed applications RM state store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. By default, it equals to ${yarn.resourcemanager.max-completed-applications}. This ensures that the applications kept in the state store are consistent with the applications remembered in RM memory. Any values larger than ${yarn.resourcemanager.max-completed-applications} will be reset to ${yarn.resourcemanager.max-completed-applications}. Note that this value impacts the RM recovery performance.Typically,  a smaller value indicates better performance on RM recovery.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
     <value>2000, 500</value>
     <description>hdfs client retry policy specification. hdfs client retry is always enabled. Specified in pairs of sleep-time and number-of-retries and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the following n1 retries sleep t1 milliseconds on average, and so on.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.fs.state-store.uri</name>
     <value> </value>
     <description>RI pointing to the location of the FileSystem path where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.ha.enabled</name>
     <value>false</value>
     <description>enable RM HA or not</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
     <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
     <description>Pre-requisite to use CGroups</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
     <value>hadoop-yarn</value>
     <description>Name of the Cgroups hierarchy under which all YARN jobs will be launched</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
     <value>false</value>
     <description>If true, YARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
     <value>false</value>
     <description>Strictly limit CPU resource usage to allocated usage even if spare CPU is available</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.resource.cpu-vcores</name>
@@ -223,7 +223,7 @@
         <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
@@ -236,37 +236,37 @@
       <maximum>100</maximum>
       <increment-step>1</increment-step>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.node-labels.manager-class</name>
     <value>org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager</value>
     <description>If user want to enable this feature, specify it to "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.node-labels.fs-store.retry-policy-spec</name>
     <value>2000, 500</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name>
     <value>1000</value>
     <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
     <value>90</value>
     <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
     <value>-1</value>
     <description>Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.log-aggregation.debug-enabled</name>
@@ -275,37 +275,37 @@
       This configuration is for debug and test purpose.
       By setting this configuration as true.
       We can break the lower bound of yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.log-aggregation.num-log-files-per-app</name>
     <value>30</value>
     <description>This is temporary solution. The configuration will be deleted once, we find a more scalable method to only write a single log file per LRS.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
     <value>true</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
     <value>10</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.client.max-retries</name>
     <value>30</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.client.retry-interval-ms</name>
     <value>1000</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.ttl-enable</name>
@@ -313,13 +313,13 @@
     <description>
       Enable age off of timeline store data.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.path</name>
     <value>/hadoop/yarn/timeline</value>
     <description>Store file name for leveldb timeline store.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.read-cache-size</name>
@@ -327,7 +327,7 @@
     <description>
       Size of read cache for uncompressed blocks for leveldb timeline store in bytes.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size</name>
@@ -335,7 +335,7 @@
     <description>
       Size of cache for recently read entity start times for leveldb timeline store in number of entities.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size</name>
@@ -343,7 +343,7 @@
     <description>
       Size of cache for recently written entity start times for leveldb timeline store in number of entities.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.http-authentication.type</name>
@@ -352,13 +352,13 @@
       Defines authentication used for the Timeline Server HTTP endpoint.
       Supported values are: simple | kerberos | $AUTHENTICATION_HANDLER_CLASSNAME
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
     <value>true</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name>
@@ -369,31 +369,31 @@
       tokens(fallback to kerberos if the tokens are missing).
       Only applicable when the http authentication type is kerberos.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.bind-host</name>
     <value>0.0.0.0</value>
     <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.bind-host</name>
     <value>0.0.0.0</value>
     <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.bind-host</name>
     <value>0.0.0.0</value>
     <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.node-labels.fs-store.root-dir</name>
     <value>/system/yarn/node-labels</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.minimum-allocation-vcores</name>
@@ -412,7 +412,7 @@
         <name>yarn.nodemanager.resource.cpu-vcores</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.maximum-allocation-vcores</name>
@@ -431,7 +431,7 @@
         <name>yarn.nodemanager.resource.cpu-vcores</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.node-labels.enabled</name>
@@ -454,7 +454,7 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.scheduler.monitor.enable</name>
@@ -474,6 +474,6 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/FALCON/configuration/falcon-startup.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/FALCON/configuration/falcon-startup.properties.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/FALCON/configuration/falcon-startup.properties.xml
index 00a6855..ae4ecfd 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/FALCON/configuration/falcon-startup.properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/FALCON/configuration/falcon-startup.properties.xml
@@ -24,6 +24,6 @@
     <name>*.shared.libs</name>
     <value>activemq-core,ant,geronimo-j2ee-management,jms,json-simple,oozie-client,spring-jms,commons-lang3,commons-el</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HBASE/configuration/hbase-site.xml
index bc39039..afdb672 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HBASE/configuration/hbase-site.xml
@@ -28,36 +28,36 @@
     <value-attributes>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.master.info.port</name>
     <value>60010</value>
     <description>The port for the HBase Master web UI.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.regionserver.port</name>
     <value>16020</value>
     <description>The port the HBase RegionServer binds to.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.regionserver.info.port</name>
     <value>16030</value>
     <description>The port for the HBase RegionServer web UI.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.regionserver.global.memstore.upperLimit</name>
     <value>0.4</value>
     <deleted>true</deleted>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.regionserver.global.memstore.lowerLimit</name>
     <value>0.38</value>
     <deleted>true</deleted>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HDFS/configuration/hdfs-site.xml
index 9aafef6..41590b6 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HDFS/configuration/hdfs-site.xml
@@ -28,12 +28,12 @@
       they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
       One needs to make sure the directory has enough space.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>nfs.exports.allowed.hosts</name>
     <value>* rw</value>
     <display-name>Allowed hosts</display-name>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HIVE/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HIVE/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HIVE/configuration/webhcat-site.xml
index 7d49034..c86cc93 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HIVE/configuration/webhcat-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HIVE/configuration/webhcat-site.xml
@@ -24,6 +24,6 @@ limitations under the License.
     <name>templeton.libjars</name>
     <value>file:///c:/hdp/hive/lib/zookeeper.jar,file:///c:/hdp/hive/lib/hive-common.jar</value>
     <description>Jars to add the the classpath.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-env.xml
index 517ad50..399aecc 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-env.xml
@@ -23,7 +23,7 @@
   <property>
     <name>oozie_user</name>
     <deleted>true</deleted>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie_database</name>
@@ -33,7 +33,7 @@
     <value-attributes>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie_data_dir</name>
@@ -44,7 +44,7 @@
       <editable-only-at-install>true</editable-only-at-install>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie_log_dir</name>
@@ -55,7 +55,7 @@
       <editable-only-at-install>true</editable-only-at-install>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie_pid_dir</name>
@@ -66,7 +66,7 @@
       <editable-only-at-install>true</editable-only-at-install>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- oozie-env.cmd -->
   <property>
@@ -142,6 +142,6 @@ set JAVA_LIBRARY_PATH=%HADOOP_COMMON_HOME%\bin
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-site.xml
index 42eff2e..7bcc994 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-site.xml
@@ -37,7 +37,7 @@
         <name>oozie.db.schema.name</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.JPAService.create.db.schema</name>
@@ -48,7 +48,7 @@
       If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
       If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.JPAService.jdbc.driver</name>
@@ -66,7 +66,7 @@
         <name>oozie_database</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
@@ -79,7 +79,7 @@
       the Oozie configuration directory; though the path can be absolute (i.e. to point
       to Hadoop client conf/ directories in the local filesystem.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.coord.check.maximum.frequency</name>
@@ -89,7 +89,7 @@
       this check or submit coordinators with frequencies faster than 5 minutes: doing so can cause unintended behavior and
       additional system stress.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.services</name>
@@ -133,16 +133,16 @@
       All services to be created and managed by Oozie Services singleton.
       Class names must be separated by commas.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.SchemaService.wf.ext.schemas</name>
     <value>shell-action-0.1.xsd,shell-action-0.2.xsd,shell-action-0.3.xsd,email-action-0.1.xsd,email-action-0.2.xsd,hive-action-0.2.xsd,hive-action-0.3.xsd,hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,ssh-action-0.2.xsd,distcp-action-0.1.xsd,distcp-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.AuthorizationService.security.enabled</name>
     <deleted>true</deleted>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/STORM/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/STORM/configuration/storm-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/STORM/configuration/storm-site.xml
index a79a4c6..14ef896 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/STORM/configuration/storm-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/STORM/configuration/storm-site.xml
@@ -26,37 +26,37 @@
       <editable-only-at-install>true</editable-only-at-install>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>topology.min.replication.count.default</name>
     <value>1</value>
     <description>Default minimum number of nimbus hosts where the code must be replicated before leader nimbus can mark the topology as active and create assignments. </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>topology.min.replication.count</name>
     <value>{{actual_topology_min_replication_count}}</value>
     <description>Calculated minimum number of nimbus hosts where the code must be replicated before leader nimbus can mark the topology as active and create assignments. </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>topology.max.replication.wait.time.sec.default</name>
     <value>60</value>
     <description>Default maximum wait time for the nimbus host replication to achieve the nimbus.min.replication.count. Once this time is elapsed nimbus will go ahead and perform topology activation tasks even if required nimbus.min.replication.count is not achieved</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>topology.max.replication.wait.time.sec</name>
     <value>{{actual_topology_max_replication_wait_time_sec}}</value>
     <description>Calculated maximum wait time for the nimbus host replication to achieve the nimbus.min.replication.count. Once this time is elapsed nimbus will go ahead and perform topology activation tasks even if required nimbus.min.replication.count is not achieved</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>nimbus.host</name>
     <value>localhost</value>
     <description>Deprecated config in favor of nimbus.seeds used during non HA mode.</description>
     <deleted>true</deleted>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/TEZ/configuration/tez-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/TEZ/configuration/tez-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/TEZ/configuration/tez-site.xml
index 8e48aee..78ad07e 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/TEZ/configuration/tez-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/TEZ/configuration/tez-site.xml
@@ -26,12 +26,12 @@
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.cluster.additional.classpath.prefix</name>
     <value>C:\hdp\hadoop\share\hadoop\common\lib\hadoop-lzo-0.4.19.{{hdp_stack_version}}.jar</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.lib.uris</name>
@@ -40,7 +40,7 @@
       Specifying a single .tar.gz or .tgz assumes that a compressed version of the tez libs is being used. This is uncompressed into a tezlibs directory when running containers, and tezlibs/;tezlibs/lib/ are added to the classpath (after . and .*).
       If multiple files are specified - files are localized as regular files, contents of directories are localized as regular files (non-recursive).
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.runtime.optimize.local.fetch</name>
@@ -49,7 +49,7 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.task.generate.counters.per.io</name>
@@ -58,7 +58,7 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.runtime.sorter.class</name>
@@ -78,7 +78,7 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.runtime.pipelined.sorter.sort.threads</name>
@@ -93,7 +93,7 @@
         <name>tez.runtime.sorter.class</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.runtime.io.sort.mb</name>
@@ -109,6 +109,6 @@
         <name>tez.runtime.sorter.class</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/YARN/configuration/capacity-scheduler.xml
index 3984743..bbee4a5 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/YARN/configuration/capacity-scheduler.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/YARN/configuration/capacity-scheduler.xml
@@ -19,16 +19,16 @@
   <property>
     <name>yarn.scheduler.capacity.root.accessible-node-labels.default.capacity</name>
     <deleted>true</deleted>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity</name>
     <deleted>true</deleted>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default-node-label-expression</name>
     <deleted>true</deleted>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/core-site.xml
index a7c2a04..ab67a5b 100644
--- a/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/core-site.xml
@@ -22,7 +22,7 @@
     <name>hadoop.http.authentication.simple.anonymous.allowed</name>
     <value>true</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.security.key.provider.path</name>
@@ -48,22 +48,22 @@
         <name>ranger.service.https.attrib.ssl.enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- HDFS properties required for HAWQ -->
   <property>
     <name>ipc.client.connection.maxidletime</name>
     <value>3600000</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ipc.client.connect.timeout</name>
     <value>300000</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ipc.server.listen.queue.size</name>
     <value>3300</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/hdfs-site.xml
index f2c8fa1..df2c591 100644
--- a/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/hdfs-site.xml
@@ -22,25 +22,25 @@
     <name>dfs.namenode.startup.delay.block.deletion.sec</name>
     <value>3600</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.journalnode.edits.dir</name>
     <value>/hadoop/hdfs/journalnode</value>
     <description>The path where the JournalNode daemon will store its local state. </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.client.retry.policy.enabled</name>
     <value>false</value>
     <description>Enables HDFS client retry in the event of a NameNode failure.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.content-summary.limit</name>
     <value>5000</value>
     <description>Dfs content summary limit.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.encryption.key.provider.uri</name>
@@ -66,42 +66,42 @@
         <name>ranger.service.https.attrib.ssl.enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- HDFS properties required for HAWQ -->
   <property>
     <name>dfs.allow.truncate</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.support.append</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.client.read.shortcircuit</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.block.local-path-access.user</name>
     <value>gpadmin</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.datanode.data.dir.perm</name>
     <value>750</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.datanode.handler.count</name>
     <value>60</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.accesstime.precision</name>
     <value>-1</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_common_services/HIVE/0.11.0.2.0.5.0/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_common_services/HIVE/0.11.0.2.0.5.0/configuration/hive-site.xml b/ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_common_services/HIVE/0.11.0.2.0.5.0/configuration/hive-site.xml
index c706178..d56bfab 100644
--- a/ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_common_services/HIVE/0.11.0.2.0.5.0/configuration/hive-site.xml
+++ b/ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_common_services/HIVE/0.11.0.2.0.5.0/configuration/hive-site.xml
@@ -21,163 +21,163 @@ limitations under the License.
     <name>ambari.hive.db.schema.name</name>
     <value>hive</value>
     <description>Database name used as the Hive Metastore</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>javax.jdo.option.ConnectionURL</name>
     <value>jdbc</value>
     <description>JDBC connect string for a JDBC metastore</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>javax.jdo.option.ConnectionDriverName</name>
     <value>com.mysql.jdbc.Driver</value>
     <description>Driver class name for a JDBC metastore</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>javax.jdo.option.ConnectionUserName</name>
     <value>hive</value>
     <description>username to use against metastore database</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>javax.jdo.option.ConnectionPassword</name>
     <value> </value>
     <description>password to use against metastore database</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.metastore.warehouse.dir</name>
     <value>/apps/hive/warehouse</value>
     <description>location of default database for the warehouse</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.metastore.sasl.enabled</name>
     <value/>
     <description>If true, the metastore thrift interface will be secured with SASL.
      Clients must authenticate with Kerberos.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.metastore.kerberos.keytab.file</name>
     <value/>
     <description>The path to the Kerberos Keytab file containing the metastore
      thrift server's service principal.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.metastore.kerberos.principal</name>
     <value/>
     <description>The service principal for the metastore thrift server. The special
     string _HOST will be replaced automatically with the correct host name.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.metastore.cache.pinobjtypes</name>
     <value>Table,Database,Type,FieldSchema,Order</value>
     <description>List of comma separated metastore object types that should be pinned in the cache</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.metastore.uris</name>
     <value>thrift://localhost:9083</value>
     <description>URI for client to contact metastore server</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.metastore.client.socket.timeout</name>
     <value>60</value>
     <description>MetaStore Client socket timeout in seconds</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.metastore.execute.setugi</name>
     <value>true</value>
     <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.security.authorization.enabled</name>
     <value>false</value>
     <description>enable or disable the hive client authorization</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.security.authorization.manager</name>
     <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
     <description>the hive client authorization manager class name.
     The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.security.metastore.authorization.manager</name>
     <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
     <description>The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.  </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.security.authenticator.manager</name>
     <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
     <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.server2.enable.doAs</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.hdfs.impl.disable.cache</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.file.impl.disable.cache</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.enforce.bucketing</name>
     <value>true</value>
     <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.enforce.sorting</name>
     <value>true</value>
     <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.map.aggr</name>
     <value>true</value>
     <description>Whether to use map-side aggregation in Hive Group By queries.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.optimize.bucketmapjoin</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.optimize.bucketmapjoin.sortedmerge</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.mapred.reduce.tasks.speculative.execution</name>
     <value>false</value>
     <description>Whether speculative execution for reducers should be turned on.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.auto.convert.join</name>
     <value>true</value>
     <description>Whether Hive enable the optimization about converting common
       join into mapjoin based on the input file size.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.auto.convert.sortmerge.join</name>
@@ -185,12 +185,12 @@ limitations under the License.
     <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
       the criteria for sort-merge join.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.auto.convert.join.noconditionaltask</name>
@@ -199,7 +199,7 @@ limitations under the License.
       size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
       specified size, the join is directly converted to a mapjoin (there is no conditional task).
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.auto.convert.join.noconditionaltask.size</name>
@@ -208,7 +208,7 @@ limitations under the License.
       is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
       converted to a mapjoin(there is no conditional task). The default is 10MB.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.optimize.reducededuplication.min.reducer</name>
@@ -217,7 +217,7 @@ limitations under the License.
       That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
       The optimization will be disabled if number of reducers is less than specified value.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.optimize.mapjoin.mapreduce</name>
@@ -227,7 +227,7 @@ limitations under the License.
       job (for e.g a group by), each map-only job is merged with the following
       map-reduce job.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.mapjoin.bucket.cache.size</name>
@@ -236,17 +236,17 @@ limitations under the License.
       Size per reducer.The default is 1G, i.e if the input size is 10G, it
       will use 10 reducers.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.vectorized.execution.enabled</name>
     <value>false</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.optimize.reducededuplication</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.optimize.index.filter</name>
@@ -254,6 +254,6 @@ limitations under the License.
     <description>
     Whether to enable automatic use of indexes
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>


Mime
View raw message