http://git-wip-us.apache.org/repos/asf/ambari/blob/b59dc1dd/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-site.xml
 
b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-site.xml
index f395c29..2d56e38 100644
--- 
a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/configuration/yarn-site.xml
@@ -27,7 +27,7 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.generic-application-history.store-class</name>
@@ -35,7 +35,7 @@
     <description>
       Store class name for history store, defaulting to file system store
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.webapp.address</name>
@@ -69,7 +69,7 @@
     <value-attributes>
       <type>int</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <description>Length of time to wait between deletion cycles of leveldb 
timeline store in milliseconds.</description>
@@ -78,7 +78,7 @@
     <value-attributes>
       <type>int</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <!-- These properties exist in HDP 2.2 and higher. -->
@@ -86,7 +86,7 @@
     <name>yarn.application.classpath</name>
     
<value>$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*</value>
     <description>Classpath for typical applications.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hadoop.registry.rm.enabled</name>
@@ -94,7 +94,7 @@
     <description>
       Is the registry enabled: does the RM start it up, create the user and 
system paths, and purge service records when containers, application attempts 
and applications complete
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hadoop.registry.zk.quorum</name>
@@ -108,7 +108,7 @@
     <name>yarn.nodemanager.recovery.enabled</name>
     <value>true</value>
     <description>Enable the node manager to recover after 
starting</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.recovery.dir</name>
@@ -117,19 +117,19 @@
       The local filesystem directory in which the node manager will store
       state when recovery is enabled.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.client.nodemanager-connect.retry-interval-ms</name>
     <value>10000</value>
     <description>Time interval between each attempt to connect to 
NM</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.client.nodemanager-connect.max-wait-ms</name>
     <value>60000</value>
     <description>Max time to wait to establish a connection to NM</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.recovery.enabled</name>
@@ -138,7 +138,7 @@
       Enable RM to recover state after starting.
       If true, then yarn.resourcemanager.store.class must be specified.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
@@ -150,7 +150,7 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.store.class</name>
@@ -161,7 +161,7 @@
       the store is implicitly fenced; meaning a single ResourceManager
       is able to use the store at any point in time.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-address</name>
@@ -175,31 +175,31 @@
     <name>yarn.resourcemanager.zk-state-store.parent-path</name>
     <value>/rmstore</value>
     <description>Full path of the ZooKeeper znode where RM state will be 
stored. This must be supplied when using 
org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the 
value for yarn.resourcemanager.store.class</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-acl</name>
     <value>world:anyone:rwcda</value>
     <description>ACL's to be used for ZooKeeper znodes.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     
<name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
     <value>10000</value>
     <description>Set the amount of time RM waits before allocating new 
containers on work-preserving-recovery. Such wait period gives RM a chance to 
settle down resyncing with NMs in the cluster on recovery, before assigning new 
containers to applications.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.connect.retry-interval.ms</name>
     <value>30000</value>
     <description>How often to try connecting to the 
ResourceManager.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.connect.max-wait.ms</name>
     <value>900000</value>
     <description>Maximum time to wait to establish connection to 
ResourceManager</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-retry-interval-ms</name>
@@ -209,43 +209,43 @@
       automatically from yarn.resourcemanager.zk-timeout-ms and
       yarn.resourcemanager.zk-num-retries."
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-num-retries</name>
     <value>1000</value>
     <description>Number of times RM tries to connect to 
ZooKeeper.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-timeout-ms</name>
     <value>10000</value>
     <description>ZooKeeper session timeout in milliseconds. Session expiration 
is managed by the ZooKeeper cluster itself, not by the client. This value is 
used by the cluster to determine when the client's session expires. Expirations 
happens when the cluster does not hear from the client within the specified 
session timeout period (i.e. no heartbeat).</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.state-store.max-completed-applications</name>
     <value>${yarn.resourcemanager.max-completed-applications}</value>
     <description>The maximum number of completed applications RM state store 
keeps, less than or equals to 
${yarn.resourcemanager.max-completed-applications}. By default, it equals to 
${yarn.resourcemanager.max-completed-applications}. This ensures that the 
applications kept in the state store are consistent with the applications 
remembered in RM memory. Any values larger than 
${yarn.resourcemanager.max-completed-applications} will be reset to 
${yarn.resourcemanager.max-completed-applications}. Note that this value 
impacts the RM recovery performance.Typically,  a smaller value indicates 
better performance on RM recovery.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
     <value>2000, 500</value>
     <description>hdfs client retry policy specification. hdfs client retry is 
always enabled. Specified in pairs of sleep-time and number-of-retries and (t0, 
n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the 
following n1 retries sleep t1 milliseconds on average, and so on.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.fs.state-store.uri</name>
     <value> </value>
     <description>RI pointing to the location of the FileSystem path where RM 
state will be stored. This must be supplied when using 
org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore 
as the value for yarn.resourcemanager.store.class </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.ha.enabled</name>
     <value>false</value>
     <description>enable RM HA or not</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     
<name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
@@ -257,7 +257,7 @@
         <name>yarn_cgroups_enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
@@ -269,7 +269,7 @@
         <name>yarn_cgroups_enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
@@ -281,7 +281,7 @@
         <name>yarn_cgroups_enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
@@ -293,13 +293,13 @@
         <name>yarn_cgroups_enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     
<name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
     <value>false</value>
     <description>Strictly limit CPU resource usage to allocated usage even if 
spare CPU is available</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.resource.cpu-vcores</name>
@@ -324,7 +324,7 @@
         <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
@@ -337,7 +337,7 @@
       <maximum>100</maximum>
       <increment-step>1</increment-step>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.node-labels.fs-store.retry-policy-spec</name>
@@ -347,19 +347,19 @@
       specified by N pairs of sleep-time in milliseconds and number-of-retries
       &quot;s1,n1,s2,n2,...&quot;.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     
<name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name>
     <value>1000</value>
     <description>This is related to disk size on the machines, admins should 
set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or 
yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage 
but not both. If both are set, the more conservative value will be 
used</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     
<name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
     <value>90</value>
     <description>This is related to disk size on the machines, admins should 
set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or 
yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage 
but not both. If both are set, the more conservative value will be 
used</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.log-aggregation.debug-enabled</name>
@@ -368,37 +368,37 @@
       This configuration is for debug and test purpose.
       By setting this configuration as true.
       We can break the lower bound of 
yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.log-aggregation.num-log-files-per-app</name>
     <value>30</value>
     <description>This is temporary solution. The configuration will be deleted 
once, we find a more scalable method to only write a single log file per 
LRS.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
     <value>true</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     
<name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
     <value>10</value>
     <description>Number of worker threads that send the yarn system metrics 
data.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.client.max-retries</name>
     <value>30</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.client.retry-interval-ms</name>
     <value>1000</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.ttl-enable</name>
@@ -409,13 +409,13 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.state-store-class</name>
     
<value>org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore</value>
     <description>Store class name for timeline state store.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-state-store.path</name>
@@ -424,7 +424,7 @@
     <value-attributes>
       <type>directory</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.path</name>
@@ -433,7 +433,7 @@
     <value-attributes>
       <type>directory</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.read-cache-size</name>
@@ -441,7 +441,7 @@
     <description>
       Size of read cache for uncompressed blocks for leveldb timeline store in 
bytes.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     
<name>yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size</name>
@@ -449,7 +449,7 @@
     <description>
       Size of cache for recently read entity start times for leveldb timeline 
store in number of entities.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     
<name>yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size</name>
@@ -457,7 +457,7 @@
     <description>
       Size of cache for recently written entity start times for leveldb 
timeline store in number of entities.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.http-authentication.type</name>
@@ -466,13 +466,13 @@
       Defines authentication used for the Timeline Server HTTP endpoint.
       Supported values are: simple | kerberos | 
$AUTHENTICATION_HANDLER_CLASSNAME
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     
<name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
     <value>true</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     
<name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name>
@@ -483,25 +483,25 @@
       tokens(fallback to kerberos if the tokens are missing).
       Only applicable when the http authentication type is kerberos.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.bind-host</name>
     <value>0.0.0.0</value>
     <description>Default value is 0.0.0.0, when this is set the service will 
bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans 
quotes) should be the two available values, with blank as the 
default.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.bind-host</name>
     <value>0.0.0.0</value>
     <description>Default value is 0.0.0.0, when this is set the service will 
bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans 
quotes) should be the two available values, with blank as the 
default.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.bind-host</name>
     <value>0.0.0.0</value>
     <description>Default value is 0.0.0.0, when this is set the service will 
bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans 
quotes) should be the two available values, with blank as the 
default.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.node-labels.fs-store.root-dir</name>
@@ -509,7 +509,7 @@
     <description>
       URI for NodeLabelManager.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.scheduler.minimum-allocation-vcores</name>
@@ -528,7 +528,7 @@
         <name>yarn.nodemanager.resource.cpu-vcores</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.scheduler.maximum-allocation-vcores</name>
@@ -547,7 +547,7 @@
         <name>yarn.nodemanager.resource.cpu-vcores</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.node-labels.enabled</name>
@@ -570,7 +570,7 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.container-executor.class</name>
@@ -586,7 +586,7 @@
         <name>hadoop.security.authentication</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.group</name>
@@ -602,7 +602,7 @@
         <name>user_group</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.scheduler.monitor.enable</name>
@@ -627,7 +627,7 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <!-- This first section of properties exists in HDP 2.3 and higher. -->
@@ -638,7 +638,7 @@
       true, then yarn.timeline-service.state-store-class must be specified.
     </description>
     <value>true</value>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.acl.enable</name>
@@ -650,8 +650,8 @@
         <name>ranger-yarn-plugin-enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>arn.nodemanager.linux-container-executo
+    <on-ambari-upgrade add="false"/>
+  </property>
   <property>
     <name>yarn.authorization-provider</name>
     <description> Yarn authorization provider class. </description>
@@ -661,7 +661,7 @@
         <name>ranger-yarn-plugin-enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.admin.acl</name>
@@ -670,32 +670,32 @@
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <!-- ATS v1.5 properties that exist in HDP 2.3 and higher. -->
   <property>
     <name>yarn.timeline-service.version</name>
     <value>1.5</value>
     <description>Timeline service version we&#x2019;re currently 
using.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.store-class</name>
     
<value>org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore</value>
     <description>Main storage class for YARN timeline server.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.entity-group-fs-store.active-dir</name>
     <value>/ats/active/</value>
     <description>DFS path to store active application&#x2019;s timeline 
data</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.entity-group-fs-store.done-dir</name>
     <value>/ats/done/</value>
     <description>DFS path to store done application&#x2019;s timeline 
data</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     
<name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes</name>
@@ -704,7 +704,7 @@
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <!-- advanced ats v1.5 properties-->
   <property>
@@ -712,7 +712,7 @@
     <description>Summary storage for ATS v1.5</description>
     <!-- Use rolling leveldb, advanced -->
     
<value>org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore</value>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     
<name>yarn.timeline-service.entity-group-fs-store.scan-interval-seconds</name>
@@ -723,7 +723,7 @@
     </description>
     <!-- Default is 60 seconds, advanced -->
     <value>60</value>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     
<name>yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds</name>
@@ -734,7 +734,7 @@
     </description>
     <!-- 3600 is default, advanced -->
     <value>3600</value>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.entity-group-fs-store.retain-seconds</name>
@@ -744,7 +744,7 @@
     </description>
     <!-- 7 days is default, advanced -->
     <value>604800</value>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
 

Reply via email to