http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml
index f267e51..63e4c95 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml
@@ -1,8 +1,26 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
 <!-- Put site-specific property overrides in this file. -->
 <configuration supports_final="true">
-  <!-- file system properties -->
+
   <property>
     <name>dfs.namenode.name.dir</name>
     <value>file:///c:/hdpdata/hdfs/nn</value>
@@ -13,24 +31,6 @@
     <final>true</final>
   </property>
   <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-  <property>
     <name>dfs.datanode.data.dir</name>
     <value>file:///c:/hdpdata/hdfs/dn</value>
     <description>Determines where on the local filesystem an DFS data node
@@ -42,231 +42,41 @@
     <final>true</final>
   </property>
   <property>
-    <name>dfs.checksum.type</name>
-    <value>CRC32</value>
-    <description>The checksum method to be used by default. To maintain
-    compatibility, it is being set to CRC32. Once all migration steps
-    are complete, we can change it to CRC32C and take advantage of the
-    additional performance benefit.</description>
-  </property>
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-  </description>
-  </property>
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in 
seconds.</description>
-  </property>
-  <property>
-    <name>dfs.namenode.safemode.threshold-pct</name>
-    <value>1.0f</value>
-    <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
-  </property>
-  <property>
-    <name>dfs.datanode.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-  </property>
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:50010</value>
-  </property>
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:50075</value>
-  </property>
-  <property>
-    <name>dfs.datanode.https.address</name>
-    <value>0.0.0.0:50076</value>
-  </property>
-  <property>
-    <name>dfs.blocksize</name>
-    <value>134217728</value>
-    <description>The default block size for new files, in bytes.
-      You can use the following suffix (case insensitive): k(kilo),
-      m(mega), g(giga), t(tera), p(peta), e(exa) to specify the
-      size (such as 128k, 512m, 1g, etc.), Or provide complete size
-      in bytes (such as 134217728 for 128 MB).</description>
-  </property>
-  <property>
-    <name>dfs.namenode.http-address</name>
-    <value>localhost:50070</value>
-    <description>The address and the base port where the dfs namenode
-      web ui will listen on. If the port is 0 then the server will
-      start on a free port.</description>
-    <final>true</final>
-  </property>
-  <property>
-    <name>dfs.https.port</name>
-    <value>50070</value>
-    <final>true</final>
-  </property>
-  <property>
-    <name>dfs.datanode.du.reserved</name>
-    <value>1073741824</value>
-    <description>Reserved space in bytes per volume. Always leave this much 
space free for non dfs use.
-    </description>
-  </property>
-  <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:8010</value>
-    <description>The datanode ipc server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>120</value>
-    <description>Delay for first block report in seconds.</description>
-  </property>
-  <property>
-    <name>dfs.datanode.du.pct</name>
-    <value>0.85f</value>
-    <description>When calculating remaining space, only use this percentage of 
the real available space
-    </description>
+    <name>dfs.hosts.exclude</name>
+    <value>c:\hdp\hadoop\etc\hadoop\dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+      not permitted to connect to the namenode.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.</description>
   </property>
   <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>40</value>
-    <description>The number of server threads for the namenode.</description>
+    <name>dfs.hosts</name>
+    <value>c:\hdp\hadoop\etc\hadoop\dfs.include</value>
+    <description>Names a file that contains a list of hosts that are
+      permitted to connect to the namenode. The full pathname of the file
+      must be specified.  If the value is empty, all hosts are
+      permitted.</description>
   </property>
   <property>
     <name>dfs.namenode.checkpoint.dir</name>
-    <value>file:///c:/hdpdata/hdfs/snn</value>
+    <value>file:///c:/hadoop/hdfs/snn</value>
     <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary images to merge.
-        If this is a comma-delimited list of directories then the image is
-        replicated in all of the directories for redundancy.
+      name node should store the temporary images to merge.
+      If this is a comma-delimited list of directories then the image is
+      replicated in all of the directories for redundancy.
     </description>
   </property>
+
   <property>
-    <name>dfs.namenode.checkpoint.edits.dir</name>
-    <value>file:///c:/hadoop/hdfs/namesecondary</value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary edits to merge.
-        If this is a comma-delimited list of directoires then teh edits is
-        replicated in all of the directoires for redundancy.
-        Default value is same as dfs.namenode.checkpoint.dir
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.checkpoint.period</name>
-    <value>86400</value>
-    <description>The number of seconds between two periodic checkpoints.
-    </description>
-  </property>
-  <property>
-    <name>dfs.datanode.max.transfer.threads</name>
-    <value>1024</value>
-    <description>Specifies the maximum number of threads to use for
-      transferring data in and out of the DN.</description>
-  </property>
-  <!-- Permissions configuration -->
-  <property>
-    <name>dfs.permissions.enabled</name>
-    <value>true</value>
-    <description>
-        If "true", enable permission checking in HDFS.
-        If "false", permission checking is turned off,
-        but all other behavior is unchanged.
-        Switching from one parameter value to the other does not change the 
mode,
-        owner or group of files or directories.
-    </description>
-  </property>
-  <property>
-    <name>dfs.permissions.superusergroup</name>
-    <value>hdfs</value>
-    <description>The name of the group of super-users.</description>
-  </property>
-  <property>
-    <name>ipc.server.max.response.size</name>
-    <value>5242880</value>
-  </property>
-  <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>false</value>
-    <description>
-        If "true", access tokens are used as capabilities for accessing 
datanodes.
-        If "false", no access tokens are checked on accessing datanodes.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.secondary.http-address</name>
-    <value>localhost:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50091</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-  <property>
-    <name>dfs.namenode.https-address</name>
-    <value>localhost:50701</value>
-    <description>The https address where namenode binds</description>
-  </property>
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>755</value>
-    <description>The permissions that should be there on dfs.data.dir
-        directories. The datanode will not come up if the permissions are
-        different on existing dfs.data.dir directories. If the directories
-        don't exist, they will be created with this permission.</description>
-  </property>
-  <property>
-    <name>dfs.namenode.accesstime.precision</name>
-    <value>0</value>
-    <description>The access time for HDFS file is precise upto this value.
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cluster.administrators</name>
-    <value>hdfs</value>
-    <description>ACL for who all can view the default servlets in the 
HDFS</description>
-  </property>
-  <property>
-    <name>ipc.server.read.threadpool.size</name>
-    <value>5</value>
-    <description />
-  </property>
-  <property>
-    <name>dfs.encrypt.data.transfer</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>dfs.encrypt.data.transfer.algorithm</name>
-    <value>3des</value>
-  </property>
-  <property>
-    <name>dfs.https.enable</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>dfs.replication</name>
-    <value>1</value>
+    <name>dfs.client.read.shortcircuit</name>
+    <deleted>true</deleted>
   </property>
-
   <property>
-    <name>dfs.hosts.exclude</name>
-    <value>c:\hdp\hadoop\etc\hadoop\dfs.exclude</value>
+    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
+    <deleted>true</deleted>
   </property>
-
   <property>
-    <name>dfs.hosts</name>
-    <value>c:\hdp\hadoop\etc\hadoop\dfs.include</value>
+    <name>dfs.domain.socket.path</name>
+    <deleted>true</deleted>
   </property>
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml
index 127e205..fe1bb17 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml
@@ -20,143 +20,21 @@
   <services>
     <service>
       <name>HDFS</name>
-      <displayName>HDFS</displayName>
-      <comment>Apache Hadoop Distributed File System</comment>
-      <version>2.1.0.2.0</version>
+      <extends>common-services/HDFS/2.1.0.2.0</extends>
+      <version>2.4.0.2.1.1.0</version>
 
       <components>
         <component>
-          <name>NAMENODE</name>
-          <displayName>NameNode</displayName>
-          <category>MASTER</category>
-          <cardinality>1-2</cardinality>
-          <commandScript>
-            <script>scripts/namenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/namenode.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-            <customCommand>
-              <name>REBALANCEHDFS</name>
-              <background>true</background>
-              <commandScript>
-                <script>scripts/namenode.py</script>
-                <scriptType>PYTHON</scriptType>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>DATANODE</name>
-          <displayName>DataNode</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/datanode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>SECONDARY_NAMENODE</name>
-          <displayName>SNameNode</displayName>
-          <!-- TODO:  cardinality is conditional on HA usage -->
-          <cardinality>1</cardinality>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/snamenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
           <name>HDFS_CLIENT</name>
-          <displayName>HDFS Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/hdfs_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
           <configFiles>
             <configFile>
-              <type>xml</type>
-              <fileName>hdfs-site.xml</fileName>
-              <dictionaryName>hdfs-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>core-site.xml</fileName>
-              <dictionaryName>core-site</dictionaryName>
-            </configFile>
-            <configFile>
               <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hadoop-env.cmds</fileName>
+              <fileName>hadoop-env.cmd</fileName>
               <dictionaryName>hadoop-env</dictionaryName>
             </configFile>
           </configFiles>
         </component>
-
-        <component>
-          <name>JOURNALNODE</name>
-          <displayName>JournalNode</displayName>
-          <category>SLAVE</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/journalnode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>ZKFC</name>
-          <displayName>ZKFailoverController</displayName>
-          <category>SLAVE</category>
-          <!-- TODO: cardinality is conditional on HA topology -->
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/zkfc_slave.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
       </components>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>core-site</config-type>
-        <config-type>hdfs-site</config-type>
-        <config-type>hadoop-env</config-type>
-        <config-type>hadoop-policy</config-type>
-        <config-type>hdfs-log4j</config-type>
-      </configuration-dependencies>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hcat-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hcat-env.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hcat-env.xml
new file mode 100644
index 0000000..ee18f17
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hcat-env.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- hcat-env.cmd -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hcat-env.cmd file</description>
+    <value>
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-env.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-env.xml
index 57144be..6bdb8f3 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-env.xml
@@ -27,13 +27,6 @@
     <description>Default HIVE DB type.</description>
   </property>
   <property>
-    <name>hive_hostname</name>
-    <value></value>
-    <description>
-      Specify the host on which the HIVE database is hosted.
-    </description>
-  </property>
-  <property>
     <name>hive_database</name>
     <value>Existing MSSQL Server database with sql auth</value>
     <description>
@@ -42,57 +35,43 @@
   </property>
   <property>
     <name>hive_ambari_database</name>
-    <value>MySQL</value>
+    <value>MSSQL</value>
     <description>Database type.</description>
   </property>
   <property>
-    <name>hive_database_name</name>
-    <value>hive</value>
-    <description>Database name.</description>
-  </property>
-  <property>
-    <name>hive_dbroot</name>
-    <value>/usr/lib/hive/lib/</value>
-    <description>Hive DB Directory.</description>
-  </property>
-  <property>
     <name>hive_log_dir</name>
-    <value>/var/log/hive</value>
+    <value>c:\hadoop\logs\hive</value>
     <description>Directory for Hive Log files.</description>
   </property>
   <property>
     <name>hive_pid_dir</name>
-    <value>/var/run/hive</value>
+    <value>c:\hadoop\run\hive</value>
     <description>Hive PID Dir.</description>
   </property>
   <property>
     <name>hive_user</name>
-    <value>hive</value>
-    <description>Hive User.</description>
+    <deleted>true</deleted>
   </property>
 
   <!--HCAT-->
-
-  <!--<property>
+  <property>
     <name>hcat_log_dir</name>
-    <value>/var/log/webhcat</value>
+    <value>c:\hadoop\logs\webhcat</value>
     <description>WebHCat Log Dir.</description>
   </property>
   <property>
     <name>hcat_pid_dir</name>
-    <value>/var/run/webhcat</value>
+    <value>c:\hadooop\run\webhcat</value>
     <description>WebHCat Pid Dir.</description>
   </property>
   <property>
     <name>hcat_user</name>
-    <value>hcat</value>
-    <description>HCat User.</description>
+    <deleted>true</deleted>
   </property>
   <property>
     <name>webhcat_user</name>
-    <value>hcat</value>
-    <description>WebHCat User.</description>
-  </property>-->
+    <deleted>true</deleted>
+  </property>
 
   <!-- hive-env.cmd -->
   <property>
@@ -101,5 +80,4 @@
     <value>
     </value>
   </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-site.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-site.xml
index 3f90c76..e479f79 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-site.xml
@@ -3,54 +3,7 @@
 
 <configuration supports_final="true">
 
-  <!-- Hive Configuration can either be stored in this file or in the hadoop 
configuration files  -->
-  <!-- that are implied by Hadoop setup variables.                             
                   -->
-  <!-- Aside from Hadoop setup variables - this file is provided as a 
convenience so that Hive    -->
-  <!-- users do not have to edit hadoop configuration files (that may be 
managed as a centralized -->
-  <!-- resource).                                                              
                   -->
-
-  <!-- Hive Execution Parameters -->
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value>thrift://localhost:9083</value>
-  </property>
-
-  <property>
-    <name>hive.metastore.connect.retries</name>
-    <value>5</value>
-    <description>Number of retries while opening a connection to 
metastore</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.ds.retry.attempts</name>
-    <value>0</value>
-    <description>The number of times to retry a metastore call if there were a 
connection error</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.ds.retry.interval</name>
-    <value>1000</value>
-    <description>The number of miliseconds between metastore retry 
attempts</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.hmshandler.retry.attempts</name>
-    <value>5</value>
-    <description>The number of times to retry a HMSHandler call if there were 
a connection error</description>
-  </property>
-
-  <property>
-    <name>hive.hmshandler.retry.interval</name>
-    <value>1000</value>
-    <description>The number of miliseconds between HMSHandler retry 
attempts</description>
-  </property>
-
+  <!-- Windows specific properties -->
   <property>
     <name>javax.jdo.option.ConnectionURL</name>
     <value></value>
@@ -64,228 +17,249 @@
   </property>
 
   <property>
-    <name>ambari.hive.db.schema.name</name>
-    <value>hive</value>
-    <description>Database name used as the Hive Metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value>hive</value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property require-input="true">
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value></value>
-    <type>PASSWORD</type>
-    <description>password to use against metastore database</description>
+    <name>hive.querylog.location</name>
+    <value>c:\hadoop\logs\hive</value>
   </property>
 
   <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
+    <name>hive.log.dir</name>
+    <value>c:\hadoop\logs\hive</value>
   </property>
 
+  <!-- New/Updated properties for 2.1 -->
   <property>
-    <name>hive.hwi.listen.host</name>
-    <value>0.0.0.0</value>
-    <description>This is the host address the Hive Web Interface will listen 
on</description>
+    <name>hive.metastore.kerberos.keytab.file</name>
+    <value>/etc/security/keytabs/hive.service.keytab</value>
+    <description>The path to the Kerberos Keytab file containing the metastore
+      thrift server's service principal.</description>
   </property>
 
   <property>
-    <name>hive.hwi.listen.port</name>
-    <value>9999</value>
-    <description>This is the port the Hive Web Interface will listen 
on</description>
+    <name>hive.metastore.kerberos.principal</name>
+    <value>hive/_h...@example.com</value>
+    <description>The service principal for the metastore thrift server. The 
special
+      string _HOST will be replaced automatically with the correct host 
name.</description>
   </property>
 
   <property>
-    <name>hive.hwi.war.file</name>
-    <value>lib\hive-hwi-@hive.version@.war</value>
-    <description>This is the WAR file with the jsp content for Hive Web 
Interface</description>
+    <name>hive.enforce.sortmergebucketmapjoin</name>
+    <value>true</value>
+    <description>If the user asked for sort-merge bucketed map-side join, and 
it cannot be performed, should the query fail or not</description>
   </property>
 
   <property>
-    <name>hive.server2.transport.mode</name>
-    <value>binary</value>
-    <description>Server transport mode. "binary" or "http".</description>
+    <name>hive.optimize.reducededuplication.min.reducer</name>
+    <value>4</value>
+    <description>Reduce deduplication merges two RSs by moving 
key/parts/reducer-num of the child RS to parent RS.
+      That means if reducer-num of the child RS is fixed (order by or forced 
bucketing) and small, it can make very slow, single MR.
+      The optimization will be disabled if number of reducers is less than 
specified value.
+    </description>
   </property>
 
   <property>
-    <name>hive.server2.thrift.http.port</name>
-    <value>10001</value>
-    <description>Port number when in HTTP mode.</description>
+    <name>hive.vectorized.execution.enabled</name>
+    <value>true</value>
+    <description>This flag controls the vectorized mode of query execution as 
documented in HIVE-4160 (as of Hive 0.13.0)
+    </description>
   </property>
 
   <property>
-    <name>hive.server2.thrift.http.path</name>
-    <value>/</value>
-    <description>Path component of URL endpoint when in HTTP 
mode.</description>
+    <name>hive.execution.engine</name>
+    <value>mr</value>
+    <description>Whether to use MR or Tez</description>
   </property>
 
   <property>
-    <name>hive.server2.thrift.http.min.worker.threads</name>
-    <value>5</value>
-    <description>Minimum number of worker threads when in HTTP 
mode.</description>
+    <name>hive.exec.post.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>Comma-separated list of post-execution hooks to be invoked 
for each statement.</description>
   </property>
 
   <property>
-    <name>hive.server2.thrift.http.max.worker.threads</name>
-    <value>100</value>
-    <description>Maximum number of worker threads when in HTTP 
mode.</description>
+    <name>hive.exec.pre.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>Comma-separated list of pre-execution hooks to be invoked for 
each statement.</description>
   </property>
 
   <property>
-    <name>hive.server2.thrift.port</name>
-    <value>10001</value>
-    <description>HiveServer2 thrift port</description>
+    <name>hive.exec.failure.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>Comma-separated list of on-failure hooks to be invoked for 
each statement.</description>
   </property>
 
   <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>false</value>
+    <name>hive.vectorized.groupby.maxentries</name>
+    <value>100000</value>
+    <description>Max number of entries in the vector group by aggregation 
hashtables.
+      Exceeding this will trigger a flush irrelevant of memory pressure 
condition.
+    </description>
   </property>
 
   <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>true</value>
-    <description>enable or disable the hive client authorization</description>
+    <name>hive.vectorized.groupby.checkinterval</name>
+    <value>1024</value>
+    <description>Number of entries added to the group by aggregation hash 
before a reocmputation of average entry size is performed.</description>
   </property>
 
   <property>
-    <name>hive.security.authorization.manager</name>
-    
<value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <name>hive.vectorized.groupby.flush.percent</name>
+    <value>0.1</value>
+    <description>Percent of entries in the group by aggregation hash flushed 
when the memory treshold is exceeded.</description>
   </property>
 
   <property>
-    <name>hive.optimize.mapjoin.mapreduce</name>
+    <name>hive.stats.autogather</name>
     <value>true</value>
+    <description>A flag to gather statistics automatically during the INSERT 
OVERWRITE command.</description>
   </property>
 
   <property>
-    <name>hive.enforce.bucketing</name>
-    <value>true</value>
+    <name>hive.tez.container.size</name>
+    <value>682</value>
+    <description>By default, Tez uses the java options from map tasks. Use 
this property to override that value.</description>
   </property>
 
   <property>
-    <name>hive.enforce.sorting</name>
-    <value>true</value>
+    <name>hive.tez.input.format</name>
+    <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
+    <description>The default input format for Tez. Tez groups splits in the 
Application Master.</description>
   </property>
 
   <property>
-    <name>hive.optimize.index.filter</name>
-    <value>true</value>
+    <name>hive.tez.java.opts</name>
+    <value>-server -Xmx545m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 
-XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc 
-XX:+PrintGCTimeStamps</value>
+    <description>Java command line options for Tez. The -Xmx parameter value 
is generally 80% of hive.tez.container.size.</description>
   </property>
 
   <property>
-    <name>hive.mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
+    <name>hive.compute.query.using.stats</name>
+    <value>true</value>
+    <description>
+      When set to true Hive will answer a few queries like count(1) purely 
using stats
+      stored in metastore. For basic stats collection turn on the config 
hive.stats.autogather to true.
+      For more advanced stats collection need to run analyze table queries.
+    </description>
   </property>
 
   <property>
     <name>hive.orc.splits.include.file.footer</name>
     <value>false</value>
+    <description>
+      If turned on splits generated by orc will include metadata about the 
stripes in the file. This
+      data is read remotely (from the client or HS2 machine) and sent to all 
the tasks.
+    </description>
   </property>
 
   <property>
-    <name>hive.exec.local.cache</name>
+    <name>hive.limit.optimize.enable</name>
     <value>true</value>
+    <description>Whether to enable the optimization of trying a smaller subset 
of data for simple LIMIT first.</description>
   </property>
 
   <property>
-    <name>hive.vectorized.execution.enabled</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.vectorized.groupby.flush.percent</name>
-    <value>1.0</value>
+    <name>hive.limit.pushdown.memory.usage</name>
+    <value>0.04</value>
+    <description>The max memory to be used for hash in RS operator for top K 
selection.</description>
   </property>
 
   <property>
-    <name>hive.vectorized.groupby.checkinterval</name>
-    <value>1024</value>
+    <name>hive.server2.tez.default.queues</name>
+    <value>default</value>
+    <description>A comma-separated list of queues configured for the 
cluster.</description>
   </property>
 
   <property>
-    <name>hive.vectorized.groupby.maxentries</name>
-    <value>1024</value>
+    <name>hive.server2.tez.sessions.per.default.queue</name>
+    <value>1</value>
+    <description>The number of sessions for each queue named in the 
hive.server2.tez.default.queues.</description>
   </property>
 
   <property>
-    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
+    <name>hive.server2.tez.initialize.default.sessions</name>
     <value>false</value>
+    <description>Enables a user to use HiveServer2 without enabling Tez for 
HiveServer2. Users may potentially may want to run queries with Tez without a 
pool of sessions.</description>
   </property>
 
   <property>
-    <name>hive.optimize.bucketmapjoin</name>
-    <value>true</value>
+    <name>hive.txn.manager</name>
+    <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
+    <description>Select the class to do transaction management. The default 
DummyTxnManager does no transactions and retains the legacy 
behavior.</description>
   </property>
 
   <property>
-    <name>hive.enforce.sortmergebucketmapjoin</name>
-    <value>true</value>
+    <name>hive.txn.timeout</name>
+    <value>300</value>
+    <description>Time after which transactions are declared aborted if the 
client has not sent a heartbeat, in seconds.</description>
   </property>
 
   <property>
-    <name>hive.convert.join.bucket.mapjoin.tez</name>
-    <value>false</value>
+    <name>hive.txn.max.open.batch</name>
+    <value>1000</value>
+    <description>Maximum number of transactions that can be fetched in one 
call to open_txns(). Increasing this will decrease the number of delta files 
created when streaming data into Hive. But it will also increase the number of 
open transactions at any given time, possibly impacting read 
performance.</description>
   </property>
 
   <property>
-    <name>hive.auto.convert.sortmerge.join</name>
-    <value>true</value>
+    <name>hive.compactor.initiator.on</name>
+    <value>false</value>
+    <description>Whether to run the compactor's initiator thread in this 
metastore instance or not. If there is more than one instance of the thrift 
metastore this should only be set to true for one of them.</description>
   </property>
 
   <property>
-    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
-    <value>true</value>
+    <name>hive.compactor.worker.threads</name>
+    <value>0</value>
+    <description>Number of compactor worker threads to run on this metastore 
instance. Can be different values on different metastore 
instances.</description>
   </property>
 
   <property>
-    <name>hive.server2.tez.sessions.per.default.queue</name>
-    <value>1</value>
+    <name>hive.compactor.worker.timeout</name>
+    <value>86400L</value>
+    <description>Time, in seconds, before a given compaction in working state 
is declared a failure and returned to the initiated state.</description>
   </property>
 
   <property>
-    <name>hive.server2.tez.initialize.default.sessions</name>
-    <value>false</value>
+    <name>hive.compactor.check.interval</name>
+    <value>300L</value>
+    <description>Time in seconds between checks to see if any partitions need 
compacted. This should be kept high because each check for compaction requires 
many calls against the NameNode.</description>
   </property>
 
   <property>
-    <name>hive.server2.tez.default.queues</name>
-    <value>default</value>
+    <name>hive.compactor.delta.num.threshold</name>
+    <value>10</value>
+    <description>Number of delta files that must exist in a directory before 
the compactor will attempt a minor compaction.</description>
   </property>
 
   <property>
-    <name>hive.stats.dbclass</name>
-    <value>fs</value>
+    <name>hive.compactor.delta.pct.threshold</name>
+    <value>0.1f</value>
+    <description>Percentage (by size) of base that deltas can be before major 
compaction is initiated.</description>
   </property>
 
   <property>
-    <name>hive.compute.query.using.stats</name>
-    <value>true</value>
+    <name>hive.compactor.abortedtxn.threshold</name>
+    <value>1000</value>
+    <description>Number of aborted transactions involving a particular table 
or partition before major compaction is initiated.</description>
   </property>
 
-
   <property>
-    <name>hive.querylog.location</name>
-    <value>c:\hadoop\logs\hive</value>
+    <name>datanucleus.cache.level2.type</name>
+    <value>none</value>
+    <description>Determines caching mechanism DataNucleus L2 cache will use. 
It is strongly recommended to use default value of 'none' as other values may 
cause consistency errors in Hive.</description>
   </property>
 
   <property>
-    <name>hive.log.dir</name>
-    <value>c:\hadoop\logs\hive</value>
+    <name>hive.server2.authentication.spnego.principal</name>
+    <value>HTTP/_h...@example.com</value>
+    <description>
+      This keytab would be used by HiveServer2 when Kerberos security is 
enabled and HTTP transport mode is used.
+    </description>
   </property>
 
   <property>
-    <name>hive.stats.autogather</name>
-    <value>true</value>
+    <name>hive.server2.authentication.spnego.keytab</name>
+    <value>/etc/security/keytabs/spnego.service.keytab</value>
+    <description>
+      The SPNEGO service principal would be used by HiveServer2 when Kerberos 
security is enabled and HTTP transport mode is used.
+    </description>
   </property>
 
-  <property>
-    <name>hive.execution.engine</name>
-    <value>mr</value>
-  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-env.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-env.xml
new file mode 100644
index 0000000..cd0cb75
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-env.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- webhcat-env.cmd -->
+  <property>
+    <name>content</name>
+    <description>webhcat-env.cmd content</description>
+    <value>
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-site.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-site.xml
index bae9712..7b95ed8 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-site.xml
@@ -21,57 +21,32 @@ limitations under the License.
 <!-- Edit templeton-site.xml to change settings for your local -->
 <!-- install. -->
 
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-    <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
+<configuration supports_final="true">
 
   <property>
     <name>templeton.jar</name>
-    <value>c:\hdp\hive\hcatalog\share\webhcat\svr\lib\hive-webhca.jar</value>
+    <value>c:\hdp\hive\hcatalog\share\webhcat\svr\lib\hive-webhcat.jar</value>
     <description>The path to the Templeton jar file.</description>
   </property>
 
   <property>
-    <name>templeton.override.enabled</name>
-    <value>false</value>
-    <description>
-      Enable the override path in templeton.override.jars
-    </description>
-  </property>
-
-  <property>
     <name>templeton.hcat</name>
-    <value>${env.HCAT_HOME}/bin/hcat.py</value>
+    <value>${env.HCAT_HOME}\bin\hcat.py</value>
     <description>The path to the hcatalog executable.</description>
   </property>
 
   <property>
     <name>templeton.hadoop</name>
-    <value>${env.HADOOP_HOME}/bin/hadoop.cmd</value>
+    <value>${env.HADOOP_HOME}\bin\hadoop.cmd</value>
     <description>The path to the Hadoop executable.</description>
   </property>
 
   <property>
-    <name>templeton.exec.envs</name>
-    
<value>HADOOP_HOME,JAVA_HOME,HIVE_HOME,TEMP,HADOOP_BIN_PATH,PATH,SystemRoot,TEZ_CLASSPATH</value>
-    <description>The environment variables passed through to 
exec.</description>
-  </property>
-
-  <property>
     <name>templeton.streaming.jar</name>
     
<value>file:///c:/hdp/hadoop/share/hadoop/tools/lib/hadoop-streaming.jar</value>
     <description>The hdfs path to the Hadoop streaming jar file.</description>
   </property>
 
-  <property>
-    <name>templeton.hive.properties</name>
-    
<value>hive.metastore.local=false,hive.metastore.uris=thrift://WIN-QS1HDPKHRAM:9083</value>
-    <description>Properties to set when running hive.</description>
-  </property>
 
   <property>
     <name>templeton.libjars</name>
@@ -87,23 +62,7 @@ limitations under the License.
 
   <property>
     <name>templeton.hive.path</name>
-    <value>${env.HIVE_HOME}/bin/hive.cmd</value>
+    <value>${env.HIVE_HOME}\bin\hive.cmd</value>
     <description>The path to the Hive executable.</description>
   </property>
-
-
-  <property>
-    <name>templeton.hadoop.queue.name</name>
-    <value>joblauncher</value>
-  </property>
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value>localhost:2181</value>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
-  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/metainfo.xml 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/metainfo.xml
index af45930..c601a94 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/metainfo.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/metainfo.xml
@@ -20,147 +20,13 @@
   <services>
     <service>
       <name>HIVE</name>
-      <displayName>Hive</displayName>
-      <comment>Data warehouse system for ad-hoc queries &amp; analysis of 
large datasets and table &amp; storage management service</comment>
-      <version>0.12.0.2.0</version>
+      <extends>common-services/HIVE/0.12.0.2.0</extends>
+      <version>0.13.0.2.1.1.0</version>
       <components>
-
-        <component>
-          <name>HIVE_METASTORE</name>
-          <displayName>Hive Metastore</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <auto-deploy>
-            <enabled>true</enabled>
-            <co-locate>HIVE/HIVE_SERVER</co-locate>
-          </auto-deploy>
-          <commandScript>
-            <script>scripts/hive_metastore.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HIVE_SERVER</name>
-          <displayName>HiveServer2</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>HIVE/HIVE_SERVER</co-locate>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>TEZ/TEZ_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/hive_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-        <component>
-          <name>WEBHCAT_SERVER</name>
-          <displayName>WebHCat Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <clientsToUpdateConfigs>
-            <client>HCAT</client>
-          </clientsToUpdateConfigs>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>HIVE/WEBHCAT_SERVER</co-locate>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/webhcat_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-        </component>
-        <component>
-          <name>MYSQL_SERVER</name>
-          <displayName>MySQL Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/mysql_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
         <component>
           <name>HIVE_CLIENT</name>
-          <displayName>Hive Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/hive_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
           <configFiles>
             <configFile>
-              <type>xml</type>
-              <fileName>hive-site.xml</fileName>
-              <dictionaryName>hive-site</dictionaryName>
-            </configFile>
-            <configFile>
               <type>env</type>
               <fileName>hive-env.cmd</fileName>
               <dictionaryName>hive-env</dictionaryName>
@@ -169,13 +35,6 @@
         </component>
         <component>
           <name>HCAT</name>
-          <displayName>HCat Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/hcat_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
           <configFiles>
             <configFile>
               <type>env</type>
@@ -184,27 +43,7 @@
             </configFile>
           </configFiles>
         </component>
-
       </components>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>1200</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-        <service>YARN</service>
-        <service>TEZ</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>hive-site</config-type>
-        <config-type>hive-env</config-type>
-        <config-type>webhcat-site</config-type>
-      </configuration-dependencies>
     </service>
-
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
index 423db73..4f690b5 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
@@ -23,8 +23,7 @@
 <configuration>
   <property>
     <name>oozie_user</name>
-    <value>oozie</value>
-    <description>Oozie User.</description>
+    <deleted>true</deleted>
   </property>
   <property>
     <name>oozie_hostname</name>
@@ -39,32 +38,22 @@
     <description>Oozie Server Database.</description>
   </property>
   <property>
-    <name>oozie_derby_database</name>
-    <value>Derby</value>
-    <description>Oozie Derby Database</description>
-  </property>
-  <property>
     <name>oozie_data_dir</name>
-    <value>/hadoop/oozie/data</value>
+    <value>c:\hadoop\oozie\data</value>
     <description>Data directory in which the Oozie DB exists</description>
   </property>
   <property>
     <name>oozie_log_dir</name>
-    <value>/var/log/oozie</value>
+    <value>c:\hadoop\logs\oozie</value>
     <description>Directory for oozie logs</description>
   </property>
   <property>
     <name>oozie_pid_dir</name>
-    <value>/var/run/oozie</value>
+    <value>c:\hadoop\run\oozie</value>
     <description>Directory in which the pid files for oozie 
reside.</description>
   </property>
-  <property>
-    <name>oozie_admin_port</name>
-    <value>11001</value>
-    <description>The admin port Oozie server runs.</description>
-  </property>
 
-  <!-- oozie-env.sh -->
+  <!-- oozie-env.cmd -->
   <property>
     <name>content</name>
     <description>oozie-env.cmd content</description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-log4j.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-log4j.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-log4j.xml
deleted file mode 100644
index cb77566..0000000
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-log4j.xml
+++ /dev/null
@@ -1,96 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <value>
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. See accompanying LICENSE file.
-#
-
-# If the Java System property 'oozie.log.dir' is not defined at Oozie start up 
time
-# XLogService sets its value to '${oozie.home}/logs'
-
-log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
-log4j.appender.oozie.Append=true
-log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
-log4j.appender.oozieops.Append=true
-log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - 
%m%n
-
-log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
-log4j.appender.oozieinstrumentation.Append=true
-log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p 
%c{1}:%L - %m%n
-
-log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
-log4j.appender.oozieaudit.Append=true
-log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - 
%m%n
-
-log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
-log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
-log4j.appender.openjpa.Append=true
-log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
-log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.logger.openjpa=INFO, openjpa
-log4j.logger.oozieops=INFO, oozieops
-log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
-log4j.logger.oozieaudit=ALL, oozieaudit
-log4j.logger.org.apache.oozie=INFO, oozie
-log4j.logger.org.apache.hadoop=WARN, oozie
-log4j.logger.org.mortbay=WARN, oozie
-log4j.logger.org.hsqldb=WARN, oozie
-log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
index 2051d01..739d59b 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
@@ -16,487 +16,87 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-<configuration  supports_final="true">
-
-    <!--
-        Refer to the oozie-default.xml file for the complete list of
-        Oozie configuration properties and their default values.
-    -->
-
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>
-            org.apache.oozie.action.email.EmailActionExecutor,
-            org.apache.oozie.action.hadoop.HiveActionExecutor,
-            org.apache.oozie.action.hadoop.ShellActionExecutor,
-            org.apache.oozie.action.hadoop.SqoopActionExecutor,
-            org.apache.oozie.action.hadoop.DistcpActionExecutor
-        </value>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>
-            
shell-action-0.1.xsd,shell-action-0.2.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,
-            
hive-action-0.3.xsd,hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,
-            
sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,ssh-action-0.2.xsd,distcp-action-0.1.xsd,
-            oozie-sla-0.1.xsd,oozie-sla-0.2.xsd
-        </value>
-    </property>
-
-    <property>
-        <name>oozie.system.id</name>
-        <value>oozie-${user.name}</value>
-        <description>
-            The Oozie system ID.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.systemmode</name>
-        <value>NORMAL</value>
-        <description>
-            System mode for  Oozie at startup.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.AuthorizationService.security.enabled</name>
-        <value>true</value>
-        <description>
-            Specifies whether security (user name/admin role) is enabled or 
not.
-            If disabled any user can manage Oozie system and manage any job.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.PurgeService.older.than</name>
-        <value>30</value>
-        <description>
-            Jobs older than this value, in days, will be purged by the 
PurgeService.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.PurgeService.purge.interval</name>
-        <value>3600</value>
-        <description>
-            Interval at which the purge service will run, in seconds.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.CallableQueueService.queue.size</name>
-        <value>10000</value>
-        <description>Max callable queue size</description>
-    </property>
-
-    <property>
-        <name>oozie.service.CallableQueueService.threads</name>
-        <value>10</value>
-        <description>Number of threads used for executing 
callables</description>
-    </property>
-
-    <property>
-        <name>oozie.service.CallableQueueService.callable.concurrency</name>
-        <value>3</value>
-        <description>
-            Maximum concurrency for a given callable type.
-            Each command is a callable type (submit, start, run, signal, job, 
jobs, suspend,resume, etc).
-            Each action type is a callable type (Map-Reduce, Pig, SSH, FS, 
sub-workflow, etc).
-            All commands that use action executors (action-start, action-end, 
action-kill and action-check) use
-            the action type as the callable type.
-        </description>
-    </property>
-
-    <property>
-               <name>oozie.service.coord.normal.default.timeout
-               </name>
-               <value>120</value>
-               <description>Default timeout for a coordinator action input 
check (in minutes) for normal job.
-            -1 means infinite timeout</description>
-       </property>
-
-    <property>
-        <name>oozie.db.schema.name</name>
-        <value>oozie</value>
-        <description>
-            Oozie DataBase Name
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>true</value>
-        <description>
-            Creates Oozie DB.
-
-            If set to true, it creates the DB schema if it does not exist. If 
the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB 
schema does not exist it fails start up.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>com.microsoft.sqlserver.jdbc.SQLServerDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value></value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>oozie</value>
-        <description>
-            DB user name.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value>oozie</value>
-        <description>
-            DB user password.
-
-            IMPORTANT: if password is emtpy leave a 1 space string, the 
service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.kerberos.enabled</name>
-        <value>false</value>
-        <description>
-            Indicates if Oozie is configured to use Kerberos.
-        </description>
-    </property>
-
-    <property>
-        <name>local.realm</name>
-        <value>LOCALHOST</value>
-        <description>
-            Kerberos Realm used by Oozie and Hadoop. Using 'local.realm' to be 
aligned with Hadoop configuration
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.keytab.file</name>
-        <value>${user.home}/oozie.keytab</value>
-        <description>
-            Location of the Oozie user keytab file.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.kerberos.principal</name>
-        <value>${user.name}/localhost@${local.realm}</value>
-        <description>
-            Kerberos principal for Oozie service.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-        <value> </value>
-        <description>
-            Whitelisted job tracker for Oozie service.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-        <value> </value>
-        <description>
-            Whitelisted job tracker for Oozie service.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-        <value>*=c:\hdp\hadoop\etc\hadoop</value>
-        <description>
-            Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the 
HOST:PORT of
-            the Hadoop service (JobTracker, HDFS). The wildcard '*' 
configuration is
-            used when there is no exact match for an authority. The 
HADOOP_CONF_DIR contains
-            the relevant Hadoop *-site.xml files. If the path is relative is 
looked within
-            the Oozie configuration directory; though the path can be absolute 
(i.e. to point
-            to Hadoop client conf/ directories in the local filesystem.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.WorkflowAppService.system.libpath</name>
-        <value>/user/${user.name}/share/lib</value>
-        <description>
-            System library path to use for workflow applications.
-            This path is added to workflow application if their job properties 
sets
-            the property 'oozie.use.system.libpath' to true.
-        </description>
-    </property>
-
-    <property>
-        <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-        <value>false</value>
-        <description>
-            If set to true, submissions of MapReduce and Pig jobs will include
-            automatically the system library path, thus not requiring users to
-            specify where the Pig JAR files are. Instead, the ones from the 
system
-            library path are used.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.type</name>
-        <value>simple</value>
-        <description>
-            Defines authentication used for Oozie HTTP endpoint.
-            Supported values are: simple | basic | kerberos | 
#AUTHENTICATION_HANDLER_CLASSNAME#
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.token.validity</name>
-        <value>36000</value>
-        <description>
-            Indicates how long (in seconds) an authentication token is valid 
before it has
-            to be renewed.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.signature.secret</name>
-        <value>oozie</value>
-        <description>
-            The signature secret for signing the authentication tokens.
-            If not set a random secret is generated at startup time.
-            In order to authentiation to work correctly across multiple hosts
-            the secret must be the same across al the hosts.
-        </description>
-    </property>
-
-    <!--<property>
-      <name>oozie.authentication.cookie.domain</name>
-      <value></value>
-      <description>
-        The domain to use for the HTTP cookie that stores the authentication 
token.
-        In order to authentiation to work correctly across multiple hosts
-        the domain must be correctly set.
-      </description>
-    </property>-->
-
-    <property>
-        <name>oozie.authentication.simple.anonymous.allowed</name>
-        <value>true</value>
-        <description>
-            Indicates if anonymous requests are allowed.
-            This setting is meaningful only when using 'simple' authentication.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.kerberos.principal</name>
-        <value>HTTP/localhost@${local.realm}</value>
-        <description>
-            Indicates the Kerberos principal to be used for HTTP endpoint.
-            The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO 
specification.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.kerberos.keytab</name>
-        <value>${oozie.service.HadoopAccessorService.keytab.file}</value>
-        <description>
-            Location of the keytab file with the credentials for the principal.
-            Referring to the same keytab file Oozie uses for its Kerberos 
credentials for Hadoop.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.kerberos.name.rules</name>
-        <value>DEFAULT</value>
-        <description>
-            The kerberos names rules is to resolve kerberos principal names, 
refer to Hadoop's
-            KerberosName for more details.
-        </description>
-    </property>
-
-    <!-- Proxyuser Configuration -->
-
-    <!--
-
-    <property>
-        <name>oozie.service.ProxyUserService.proxyuser.#USER#.hosts</name>
-        <value>*</value>
-        <description>
-            List of hosts the '#USER#' user is allowed to perform 'doAs'
-            operations.
-
-            The '#USER#' must be replaced with the username o the user who is
-            allowed to perform 'doAs' operations.
-
-            The value can be the '*' wildcard or a list of hostnames.
-
-            For multiple users copy this property and replace the user name
-            in the property name.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ProxyUserService.proxyuser.#USER#.groups</name>
-        <value>*</value>
-        <description>
-            List of groups the '#USER#' user is allowed to impersonate users
-            from to perform 'doAs' operations.
-
-            The '#USER#' must be replaced with the username o the user who is
-            allowed to perform 'doAs' operations.
-
-            The value can be the '*' wildcard or a list of groups.
-
-            For multiple users copy this property and replace the user name
-            in the property name.
-        </description>
-    </property>
-
-    -->
-
-
-  <property>
-    <name>oozie.service.coord.push.check.requeue.interval</name>
-    <value>30000</value>
-  </property>
-
-  <property>
-    <name>oozie.services.ext</name>
-    <value>org.apache.oozie.service.JMSAccessorService,
-                org.apache.oozie.service.PartitionDependencyManagerService,
-                org.apache.oozie.service.HCatAccessorService</value>
-  </property>
+<configuration supports_final="true">
 
+  <!--
+      Refer to the oozie-default.xml file for the complete list of
+      Oozie configuration properties and their default values.
+  -->
   <property>
-    <name>oozie.credentials.credentialclasses</name>
-    <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials</value>
+    <name>oozie.service.JPAService.jdbc.driver</name>
+    <value>com.microsoft.sqlserver.jdbc.SQLServerDriver</value>
+    <description>
+      JDBC driver class.
+    </description>
   </property>
-
   <property>
-    <name>oozie.service.URIHandlerService.uri.handlers</name>
-    <value>org.apache.oozie.dependency.FSURIHandler,
-                org.apache.oozie.dependency.HCatURIHandler</value>
+    <name>oozie.service.JPAService.jdbc.url</name>
+    <value></value>
+    <description>
+      JDBC URL.
+    </description>
   </property>
-
   <property>
-    <name>oozie.service.ELService.ext.functions.coord-job-submit-data</name>
-    <value>now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
-                
today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
-                
yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
-                
currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
-                
lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
-                
currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
-                
lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
-                
dataIn=org.apache.oozie.extensions.OozieELExtensions#ph1_dataIn_echo,
-                
instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap,
-                
formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
-                
dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo,
-                user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
+    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+    <value>*=c:\hdp\hadoop\etc\hadoop</value>
+    <description>
+      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the 
HOST:PORT of
+      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+      used when there is no exact match for an authority. The HADOOP_CONF_DIR 
contains
+      the relevant Hadoop *-site.xml files. If the path is relative is looked 
within
+      the Oozie configuration directory; though the path can be absolute (i.e. 
to point
+      to Hadoop client conf/ directories in the local filesystem.
+    </description>
   </property>
 
+  <!-- New/Updated properties for 2.1 -->
   <property>
-    <name>oozie.service.ProxyUserService.proxyuser.hadoop.hosts</name>
-    <value>*</value>
+    <name>oozie.services</name>
+    <value>
+      org.apache.oozie.service.SchedulerService,
+      org.apache.oozie.service.InstrumentationService,
+      org.apache.oozie.service.CallableQueueService,
+      org.apache.oozie.service.UUIDService,
+      org.apache.oozie.service.ELService,
+      org.apache.oozie.service.AuthorizationService,
+      org.apache.oozie.service.UserGroupInformationService,
+      org.apache.oozie.service.HadoopAccessorService,
+      org.apache.oozie.service.URIHandlerService,
+      org.apache.oozie.service.MemoryLocksService,
+      org.apache.oozie.service.DagXLogInfoService,
+      org.apache.oozie.service.SchemaService,
+      org.apache.oozie.service.LiteWorkflowAppService,
+      org.apache.oozie.service.JPAService,
+      org.apache.oozie.service.StoreService,
+      org.apache.oozie.service.CoordinatorStoreService,
+      org.apache.oozie.service.SLAStoreService,
+      org.apache.oozie.service.DBLiteWorkflowStoreService,
+      org.apache.oozie.service.CallbackService,
+      org.apache.oozie.service.ActionService,
+      org.apache.oozie.service.ActionCheckerService,
+      org.apache.oozie.service.RecoveryService,
+      org.apache.oozie.service.PurgeService,
+      org.apache.oozie.service.CoordinatorEngineService,
+      org.apache.oozie.service.BundleEngineService,
+      org.apache.oozie.service.DagEngineService,
+      org.apache.oozie.service.CoordMaterializeTriggerService,
+      org.apache.oozie.service.StatusTransitService,
+      org.apache.oozie.service.PauseTransitService,
+      org.apache.oozie.service.GroupsService,
+      org.apache.oozie.service.ProxyUserService,
+      org.apache.oozie.service.XLogStreamingService,
+      org.apache.oozie.service.JobsConcurrencyService
+    </value>
+    <description>List of Oozie services</description>
   </property>
 
   <property>
-    <name>oozie.service.ELService.ext.functions.coord-sla-submit</name>
-    
<value>instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed,
-                user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-sla-create</name>
-    
<value>instanceTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime,
-                user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
-  </property>
-
-  <property>
-    <name>oozie.service.ProxyUserService.proxyuser.hadoop.groups</name>
-    <value>*</value>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-action-create</name>
-    <value>now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
-                today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
-                
yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
-                
currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
-                
lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
-                
currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
-                
lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
-                
latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-                
future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
-                
formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
-                user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
-  </property>
-
-  <property>
-    
<name>oozie.service.ELService.ext.functions.coord-job-submit-instances</name>
-    <value>now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
-                
today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
-                
yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
-                
currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
-                
lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
-                
currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
-                
lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
-                
formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
-                
latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-                
future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo</value>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-action-start</name>
-    <value>now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
-                today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
-                
yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
-                
currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
-                
lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
-                
currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
-                
lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
-                
latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest,
-                
future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future,
-                
dataIn=org.apache.oozie.extensions.OozieELExtensions#ph3_dataIn,
-                
instanceTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime,
-                
dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset,
-                
formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime,
-                user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-action-create-inst</name>
-    <value>now=org.apache.oozie.extensions.OozieELExtensions#ph2_now_inst,
-                
today=org.apache.oozie.extensions.OozieELExtensions#ph2_today_inst,
-                
yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday_inst,
-                
currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth_inst,
-                
lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth_inst,
-                
currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear_inst,
-                
lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear_inst,
-                
latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-                
future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
-                
formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
-                user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
+    <name>oozie.services.ext</name>
+    
<value>org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService
+    </value>
+    <description>
+      To add/replace services defined in 'oozie.services' with custom 
implementations.
+      Class names must be separated by commas.
+    </description>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/metainfo.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/metainfo.xml
index 347954b..b2b394c 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/metainfo.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/metainfo.xml
@@ -20,107 +20,20 @@
   <services>
     <service>
       <name>OOZIE</name>
-      <displayName>Oozie</displayName>
-      <comment>System for workflow coordination and execution of Apache Hadoop 
jobs.  This also includes the installation of the optional Oozie Web Console 
which relies on and will install the &lt;a target="_blank" 
href="http://www.sencha.com/legal/open-source-faq/"&gt;ExtJS&lt;/a&gt; Library.
-      </comment>
-      <version>4.0.0.2.0</version>
+      <extends>common-services/OOZIE/4.0.0.2.0</extends>
+      <version>4.0.0.2.1.1.0</version>
       <components>
         <component>
-          <name>OOZIE_SERVER</name>
-          <displayName>Oozie Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/oozie_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
           <name>OOZIE_CLIENT</name>
-          <displayName>Oozie Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/oozie_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
           <configFiles>
             <configFile>
-              <type>xml</type>
-              <fileName>oozie-site.xml</fileName>
-              <dictionaryName>oozie-site</dictionaryName>
-            </configFile>
-            <configFile>
               <type>env</type>
               <fileName>oozie-env.cmd</fileName>
               <dictionaryName>oozie-env</dictionaryName>
             </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>oozie-log4j.properties</fileName>
-              <dictionaryName>oozie-log4j</dictionaryName>
-            </configFile>
           </configFiles>
         </component>
       </components>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>YARN</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>oozie-site</config-type>
-        <config-type>oozie-env</config-type>
-        <config-type>oozie-log4j</config-type>
-        <config-type>yarn-site</config-type>
-      </configuration-dependencies>
     </service>
   </services>
 </metainfo>

Reply via email to