Author: rvs
Date: Tue Feb  7 22:21:49 2012
New Revision: 1241663

URL: http://svn.apache.org/viewvc?rev=1241663&view=rev
Log:
Merge branch 'trunk' into RCs

Modified:
    
incubator/bigtop/branches/RCs/bigtop-packages/src/common/hbase/install_hbase.sh
    incubator/bigtop/branches/RCs/bigtop-packages/src/deb/hbase/rules
    incubator/bigtop/branches/RCs/bigtop-packages/src/rpm/hbase/SPECS/hbase.spec
    
incubator/bigtop/branches/RCs/bigtop-test-framework/src/main/groovy/org/apache/bigtop/itest/pmanager/PackageManager.groovy
    
incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_apt.xml
    
incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_urpmi.xml
    
incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_yum.xml
    
incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_zypper.xml

Modified: 
incubator/bigtop/branches/RCs/bigtop-packages/src/common/hbase/install_hbase.sh
URL: 
http://svn.apache.org/viewvc/incubator/bigtop/branches/RCs/bigtop-packages/src/common/hbase/install_hbase.sh?rev=1241663&r1=1241662&r2=1241663&view=diff
==============================================================================
--- 
incubator/bigtop/branches/RCs/bigtop-packages/src/common/hbase/install_hbase.sh 
(original)
+++ 
incubator/bigtop/branches/RCs/bigtop-packages/src/common/hbase/install_hbase.sh 
Tue Feb  7 22:21:49 2012
@@ -131,10 +131,9 @@ elif [ -e /usr/lib/bigtop-utils/bigtop-d
   . /usr/lib/bigtop-utils/bigtop-detect-javahome
 fi
 
-export ZOOKEEPER_CONF=\${ZOOKEEPER_CONF:-/etc/zookeeper}
 export HADOOP_CONF=\${HADOOP_CONF:-/etc/hadoop/conf}
 export ZOOKEEPER_HOME=\${ZOOKEEPER_HOME:-/usr/lib/zookeeper}
-export 
HBASE_CLASSPATH=\$ZOOKEEPER_CONF:\$HADOOP_CONF:\$HADOOP_HOME/*:\$HADOOP_HOME/lib/*:\$ZOOKEEPER_HOME/*:\$ZOOKEEPER_HOME/lib/*:\$HBASE_CLASSPATH
+export 
HBASE_CLASSPATH=\$HADOOP_CONF:\$HADOOP_HOME/*:\$HADOOP_HOME/lib/*:\$ZOOKEEPER_HOME/*:\$ZOOKEEPER_HOME/lib/*:\$HBASE_CLASSPATH
 
 exec /usr/lib/hbase/bin/hbase "\$@"
 EOF

Modified: incubator/bigtop/branches/RCs/bigtop-packages/src/deb/hbase/rules
URL: 
http://svn.apache.org/viewvc/incubator/bigtop/branches/RCs/bigtop-packages/src/deb/hbase/rules?rev=1241663&r1=1241662&r2=1241663&view=diff
==============================================================================
--- incubator/bigtop/branches/RCs/bigtop-packages/src/deb/hbase/rules (original)
+++ incubator/bigtop/branches/RCs/bigtop-packages/src/deb/hbase/rules Tue Feb  
7 22:21:49 2012
@@ -58,6 +58,8 @@ install: build
        cp debian/hbase.nofiles.conf 
debian/tmp/etc/security/limits.d/${hbase_pkg_name}.nofiles.conf
        # Symlink in the dependency jars from their packages. Both of these 
packages
        # provide an unversioned symlink foo.jar -> foo-0.1.2.jar.
+       rm -f debian/tmp/usr/lib/hadoop-*
+       rm -f debian/tmp/usr/lib/zookeeper-*
        ln -f -s ${hbase_jar_deps} debian/tmp/usr/lib/${hbase_pkg_name}/lib/
        ln -s /var/log/${hbase_pkg_name} 
debian/tmp/usr/lib/${hbase_pkg_name}/logs
        ln -s /var/run/${hbase_pkg_name} 
debian/tmp/usr/lib/${hbase_pkg_name}/pids

Modified: 
incubator/bigtop/branches/RCs/bigtop-packages/src/rpm/hbase/SPECS/hbase.spec
URL: 
http://svn.apache.org/viewvc/incubator/bigtop/branches/RCs/bigtop-packages/src/rpm/hbase/SPECS/hbase.spec?rev=1241663&r1=1241662&r2=1241663&view=diff
==============================================================================
--- 
incubator/bigtop/branches/RCs/bigtop-packages/src/rpm/hbase/SPECS/hbase.spec 
(original)
+++ 
incubator/bigtop/branches/RCs/bigtop-packages/src/rpm/hbase/SPECS/hbase.spec 
Tue Feb  7 22:21:49 2012
@@ -231,6 +231,8 @@ done
 %__install -d -m 0755 $RPM_BUILD_ROOT/usr/bin
 
 # Pull zookeeper and hadoop from their packages
+rm -f $RPM_BUILD_ROOT/%{lib_hbase}/hadoop-*
+rm -f $RPM_BUILD_ROOT/%{lib_hbase}/zookeeper-*
 ln -f -s %{hbase_jar_deps} $RPM_BUILD_ROOT/%{lib_hbase}
 
 %pre

Modified: 
incubator/bigtop/branches/RCs/bigtop-test-framework/src/main/groovy/org/apache/bigtop/itest/pmanager/PackageManager.groovy
URL: 
http://svn.apache.org/viewvc/incubator/bigtop/branches/RCs/bigtop-test-framework/src/main/groovy/org/apache/bigtop/itest/pmanager/PackageManager.groovy?rev=1241663&r1=1241662&r2=1241663&view=diff
==============================================================================
--- 
incubator/bigtop/branches/RCs/bigtop-test-framework/src/main/groovy/org/apache/bigtop/itest/pmanager/PackageManager.groovy
 (original)
+++ 
incubator/bigtop/branches/RCs/bigtop-test-framework/src/main/groovy/org/apache/bigtop/itest/pmanager/PackageManager.groovy
 Tue Feb  7 22:21:49 2012
@@ -209,7 +209,7 @@ public abstract class PackageManager {
     switch (linux_flavor ?: OS.linux_flavor) {
       case ~/(?is).*(ubuntu|debian).*/:
         return new AptCmdLinePackageManager();
-      case ~/(?is).*(redhat|centos|rhel).*/:
+      case ~/(?is).*(redhat|centos|rhel|fedora).*/:
         return new YumCmdLinePackageManager();
       case ~/(?is).*(suse|sles|sled).*/:
         return new ZypperCmdLinePackageManager();

Modified: 
incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_apt.xml
URL: 
http://svn.apache.org/viewvc/incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_apt.xml?rev=1241663&r1=1241662&r2=1241663&view=diff
==============================================================================
--- 
incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_apt.xml
 (original)
+++ 
incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_apt.xml
 Tue Feb  7 22:21:49 2012
@@ -65,7 +65,7 @@
   * Smart defaults for services. You can get a properly configured system
     running quickly, while still being able to override settings as needed.
     </description>
-    <url>http://incubator.apache.org/whirr</url>
+    <url>http://whirr.apache.org/</url>
   </metadata>
   <deps>
     <sun-java6-jre/>
@@ -1139,36 +1139,6 @@
     </flume-conf>
   </alternatives>
 </flume>
-<flume-master>
-  <metadata>
-    <summary>central administration point for the flume data collection 
system</summary>
-    <description>The Flume master daemon is the central administration and 
data path control
- point for flume nodes.</description>
-    <url>http://www.cloudera.com</url>
-  </metadata>
-  <deps>
-    <flume>/self</flume>
-  </deps>
-  <services>
-    <flume-master>
-       
<runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-       <oninstall>start</oninstall>
-       <configured>true</configured>
-    </flume-master>
-  </services>
-  <content>
-    <file name="/." owners="0" perm="drwxr-xr-x" user="root" group="root"/>
-    <file name="/etc" owners="119" perm="drwxr-xr-x" user="root" group="root"/>
-    <file name="/etc/init.d" owners="32" perm="drwxr-xr-x" user="root" 
group="root"/>
-    <config name="/etc/init.d/flume-master" owners="1" perm="-rwxr-xr-x" 
user="root" group="root"/>
-    <file name="/usr" owners="418" perm="drwxr-xr-x" user="root" group="root"/>
-    <file name="/usr/share" owners="418" perm="drwxr-xr-x" user="root" 
group="root"/>
-    <file name="/usr/share/doc" owners="418" perm="drwxr-xr-x" user="root" 
group="root"/>
-    <file name="/usr/share/doc/flume-master" owners="1" perm="drwxr-xr-x" 
user="root" group="root"/>
-    <file name="/usr/share/doc/flume-master/copyright" owners="1" 
perm="-rw-r--r--" user="root" group="root"/>
-    <file name="/usr/share/doc/flume-master/changelog.Debian.gz" owners="1" 
perm="-rw-r--r--" user="root" group="root"/>
-  </content>
-</flume-master>
 <flume-node>
   <metadata>
     <summary>core element of Flume's data path that collects and delivers 
data</summary>
@@ -1446,7 +1416,7 @@
  In case of workflow job failure, the workflow job can be rerun skipping
  previously completed actions, the workflow application can be patched before
  being rerun.</description>
-    <url>http://archive.cloudera.com/cdh/3/oozie</url>
+    <url>http://incubator.apache.org/oozie/</url>
   </metadata>
   <deps>
     <oozie-client>/self</oozie-client>
@@ -1575,7 +1545,7 @@
  user can deploy workflows and perform other administrative and
  monitoring tasks such as start, stop, kill, resume workflows
  and coordinator jobs.</description>
-    <url>http://archive.cloudera.com/cdh/3/oozie</url>
+    <url>http://incubator.apache.org/oozie/</url>
   </metadata>
   <content>
     <file name="/." owners="0" perm="drwxr-xr-x" user="root" group="root"/>
@@ -2390,7 +2360,7 @@
     <summary>A high-performance coordination service for distributed 
applications.</summary>
     <description>ZooKeeper is a centralized service for maintaining 
configuration information, naming, providing distributed synchronization, and 
providing group services.  All of these kinds of services are used in some form 
or another by distributed applications. Each time they are implemented there is 
a lot of work that goes into fixing the bugs and race conditions that are 
inevitable. Because of the difficulty of implementing these kinds of services, 
applications initially usually skimp on them ,which make them brittle in the 
presence of change and difficult to manage. Even when done correctly, different 
implementations of these services lead to management complexity when the 
applications are deployed.
     </description>
-    <url>http://hadoop.apache.org/zookeeper/</url>
+    <url>http://zookeeper.apache.org/</url>
     <!-- group>misc</group -->
   </metadata>
   <deps>
@@ -2787,7 +2757,7 @@
     <!-- license>APL2</license -->
     <!-- arch>universal</arch -->
     <summary>This runs the zookeeper server on startup.</summary>
-    <url>http://hadoop.apache.org/zookeeper/</url>
+    <url>http://zookeeper.apache.org/</url>
     <!-- vendor>(none)</vendor -->
     <!-- group>Development/Libraries</group -->
     <!-- 
depends><dep>adduser</dep><dep>sun-java6-jre</dep><dep>sun-java6-bin</dep></depends
 -->
@@ -2842,7 +2812,7 @@
     automatically, allowing the user to focus on semantics rather than 
efficiency.
  * Extensibility
     Users can create their own functions to do special-purpose 
processing.</description>
-    <url>http://hadoop.apache.org/pig/</url>
+    <url>http://pig.apache.org/</url>
   </metadata>
   <deps>
     <sun-java6-jre/>
@@ -6787,7 +6757,7 @@
  plug in their custom mappers and reducers to do more sophisticated
  analysis which may not be supported by the built-in capabilities of
  the language.</description>
-    <url>http://hadoop.apache.org/hive/</url>
+    <url>http://hive.apache.org/</url>
   </metadata>
   <deps>
     <sun-java6-jre/>
@@ -7099,7 +7069,7 @@
     <description>Use it when you need random, realtime read/write access to 
your Big Data.
  This project's goal is the hosting of very large tables -- billions of rows
  X millions of columns -- atop clusters of commodity hardware.</description>
-    <url>http://hadoop.apache.org/hbase/</url>
+    <url>http://hbase.apache.org/</url>
     <!-- group>misc</group -->
   </metadata>
   <deps>
@@ -7290,7 +7260,7 @@
   <metadata>
     <summary>Documentation for HBase</summary>
     <description>This package contains the HBase manual and 
JavaDoc.</description>
-    <url>http://hadoop.apache.org/hbase/</url>
+    <url>http://hbase.apache.org/</url>
     <!-- group>misc</group -->
   </metadata>
   <content>
@@ -9065,7 +9035,7 @@
   <metadata>
     <summary>HMaster is the "master server" for a HBase</summary>
     <description>There is only one HMaster for a single HBase 
deployment.</description>
-    <url>http://hadoop.apache.org/hbase/</url>
+    <url>http://hbase.apache.org/</url>
   </metadata>
   <deps>
     <hbase>/self</hbase>
@@ -9098,7 +9068,7 @@
     <summary>HRegionServer makes a set of HRegions available to 
clients</summary>
     <description>It checks in with the HMaster. There are many HRegionServers 
in a single
  HBase deployment.</description>
-    <url>http://hadoop.apache.org/hbase/</url>
+    <url>http://hbase.apache.org/</url>
   </metadata>
   <deps>
     <hbase>/self</hbase>
@@ -9131,7 +9101,7 @@
     <summary>Provides an HBase Thrift service</summary>
     <description>This package provides a Thrift service interface to the HBase 
distributed
  database.</description>
-    <url>http://hadoop.apache.org/hbase/</url>
+    <url>http://hbase.apache.org/</url>
   </metadata>
   <deps>
     <hbase>/self</hbase>

Modified: 
incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_urpmi.xml
URL: 
http://svn.apache.org/viewvc/incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_urpmi.xml?rev=1241663&r1=1241662&r2=1241663&view=diff
==============================================================================
--- 
incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_urpmi.xml
 (original)
+++ 
incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_urpmi.xml
 Tue Feb  7 22:21:49 2012
@@ -63,7 +63,7 @@ also on potential use cases. Come to the
   service.
 * Smart defaults for services. You can get a properly configured system
   running quickly, while still being able to override settings as 
needed.</description>
-    <url>http://incubator.apache.org/whirr</url>
+    <url>http://whirr.apache.org/</url>
   </metadata>
   <deps>
     <tag name="/bin/bash"/>
@@ -804,7 +804,7 @@ also on potential use cases. Come to the
  mechanisms and many failover and recovery mechanisms.  The system is centrally
  managed and allows for intelligent dynamic management.  It uses a simple
  extensible data model that allows for online analytic 
applications.</description>
-    <url>https://github.com/cloudera/flume</url>
+    <url>http://incubator.apache.org/projects/flume.html</url>
   </metadata>
   <deps>
     <tag name="/bin/sh"/>
@@ -1117,44 +1117,11 @@ also on potential use cases. Come to the
     </flume-conf>
   </alternatives>
 </flume>
-<flume-master>
-  <metadata>
-    <summary>The flume master daemon is the central administration and data 
path control point for flume nodes.</summary>
-    <description>Flume is a reliable, scalable, and manageable distributed 
data collection application for collecting data such as logs and delivering it 
to data stores such as Hadoop's HDFS. It can efficiently collect, aggregate, 
and move large amounts of log data. It has a simple, but flexible, architecture 
based on streaming data flows. It is robust and fault tolerant with tunable 
reliability mechanisms and many failover and recovery mechanisms. The system is 
centrally managed and allows for intelligent dynamic management. It uses a 
simple extensible data model that allows for online analytic 
applications.</description>
-    <url>https://github.com/cloudera/flume</url>
-  </metadata>
-  <deps>
-    <tag name="/bin/bash"/>
-    <tag name="/bin/sh"/>
-    <tag name="/sbin/chkconfig"/>
-    <tag name="/sbin/service"/>
-    <tag name="/usr/bin/env"/>
-    <tag name="/usr/sbin/useradd"/>
-    <flume>/self</flume>
-    <jre>>=1.6</jre>
-    <redhat-lsb/>
-    <sh-utils/>
-    <textutils/>
-  </deps>
-  <services>
-    <flume-master>
-       
<runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-       <oninstall>stop</oninstall>
-       <configured>true</configured>
-    </flume-master>
-  </services>
-  <content>
-    <file name="/etc/rc.d/init.d/flume-master" owners="1" perm="-rwxr-xr-x" 
user="root" group="root"/>
-    <file name="/usr/lib/flume" owners="2" perm="drwxr-xr-x" user="root" 
group="root"/>
-    <file name="/usr/lib/flume/bin" owners="2" perm="drwxr-xr-x" user="root" 
group="root"/>
-    <file name="/usr/lib/flume/bin/flume-daemon.sh" owners="2" 
perm="-rwxr-xr-x" user="root" group="root"/>
-  </content>
-</flume-master>
 <flume-node>
   <metadata>
     <summary>The flume node daemon is a core element of flume's data path and 
is responsible for generating, processing, and delivering data.</summary>
     <description>Flume is a reliable, scalable, and manageable distributed 
data collection application for collecting data such as logs and delivering it 
to data stores such as Hadoop's HDFS. It can efficiently collect, aggregate, 
and move large amounts of log data. It has a simple, but flexible, architecture 
based on streaming data flows. It is robust and fault tolerant with tunable 
reliability mechanisms and many failover and recovery mechanisms. The system is 
centrally managed and allows for intelligent dynamic management. It uses a 
simple extensible data model that allows for online analytic 
applications.</description>
-    <url>https://github.com/cloudera/flume</url>
+    <url>http://incubator.apache.org/projects/flume.html</url>
   </metadata>
   <deps>
     <tag name="/bin/bash"/>
@@ -1418,7 +1385,7 @@ also on potential use cases. Come to the
  In case of workflow job failure, the workflow job can be rerun skipping
  previously completed actions, the workflow application can be patched before
  being rerun.</description>
-    <url>http://www.cloudera.com</url>
+    <url>http://incubator.apache.org/oozie/</url>
   </metadata>
   <deps>
     <tag name="/bin/bash"/>
@@ -1542,7 +1509,7 @@ also on potential use cases. Come to the
  you can also change the status of the entire system, get vesion
  information. This client utility also allows you to validate
  any worflows before they are deployed to the Oozie server.</description>
-    <url>http://www.cloudera.com</url>
+    <url>http://incubator.apache.org/oozie/</url>
   </metadata>
   <deps>
     <tag name="/bin/bash"/>
@@ -2356,7 +2323,7 @@ into fixing the bugs and race conditions
 difficulty of implementing these kinds of services, applications initially
 usually skimp on them ,which make them brittle in the presence of change and
 difficult to manage. Even when done correctly, different implementations of 
these services lead to management complexity when the applications are 
deployed.</description>
-    <url>http://hadoop.apache.org/zookeeper/</url>
+    <url>http://zookeeper.apache.org/</url>
     <!-- group>misc</group -->
   </metadata>
   <deps>
@@ -2740,7 +2707,7 @@ difficult to manage. Even when done corr
     <!-- arch>universal</arch -->
     <summary>The Hadoop Zookeeper server</summary>
     <description>This package starts the zookeeper server on 
startup</description>
-    <url>http://hadoop.apache.org/zookeeper/</url>
+    <url>http://zookeeper.apache.org/</url>
     <!-- vendor>(none)</vendor -->
     <!-- group>Development/Libraries</group -->
     <!-- 
depends><dep>adduser</dep><dep>sun-java6-jre</dep><dep>sun-java6-bin</dep></depends
 -->
@@ -2788,7 +2755,7 @@ difficult to manage. Even when done corr
    automatically, allowing the user to focus on semantics rather than 
efficiency.
  * Extensibility
    Users can create their own functions to do special-purpose 
processing.</description>
-    <url>http://hadoop.apache.org/pig/</url>
+    <url>http://pig.apache.org/</url>
   </metadata>
   <deps>
     <tag name="/bin/sh"/>
@@ -6717,7 +6684,7 @@ difficult to manage. Even when done corr
  plug in their custom mappers and reducers to do more sophisticated
  analysis which may not be supported by the built-in capabilities of
  the language.</description>
-    <url>http://hadoop.apache.org/hive/</url>
+    <url>http://hive.apache.org/</url>
   </metadata>
   <deps>
     <tag name="/bin/sh"/>
@@ -7157,7 +7124,7 @@ difficult to manage. Even when done corr
     * Cascading source and sink modules
     * Extensible jruby-based (JIRB) shell
     * Support for exporting metrics via the Hadoop metrics subsystem to files 
or Ganglia; or via JMX</description>
-    <url>http://hadoop.apache.org/hbase/</url>
+    <url>http://hbase.apache.org/</url>
     <!-- group>misc</group -->
   </metadata>
   <deps>
@@ -7336,7 +7303,7 @@ difficult to manage. Even when done corr
   <metadata>
     <summary>Hbase Documentation</summary>
     <description>Documentation for Hbase</description>
-    <url>http://hadoop.apache.org/hbase/</url>
+    <url>http://hbase.apache.org/</url>
     <!-- group>misc</group -->
   </metadata>
   <content>
@@ -9105,7 +9072,7 @@ difficult to manage. Even when done corr
   <metadata>
     <summary>The Hadoop HBase master Server.</summary>
     <description>HMaster is the "master server" for a HBase. There is only one 
HMaster for a single HBase deployment.</description>
-    <url>http://hadoop.apache.org/hbase/</url>
+    <url>http://hbase.apache.org/</url>
   </metadata>
   <deps>
     <tag name="/bin/bash"/>
@@ -9128,7 +9095,7 @@ difficult to manage. Even when done corr
   <metadata>
     <summary>The Hadoop HBase RegionServer server.</summary>
     <description>HRegionServer makes a set of HRegions available to clients. 
It checks in with the HMaster. There are many HRegionServers in a single HBase 
deployment.</description>
-    <url>http://hadoop.apache.org/hbase/</url>
+    <url>http://hbase.apache.org/</url>
   </metadata>
   <deps>
     <tag name="/bin/bash"/>
@@ -9151,7 +9118,7 @@ difficult to manage. Even when done corr
   <metadata>
     <summary>The Hadoop HBase Thrift Interface</summary>
     <description>ThriftServer - this class starts up a Thrift server which 
implements the Hbase API specified in the Hbase.thrift IDL file. "Thrift is a 
software framework for scalable cross-language services development. It 
combines a powerful software stack with a code generation engine to build 
services that work efficiently and seamlessly between C++, Java, Python, PHP, 
and Ruby. Thrift was developed at Facebook, and we are now releasing it as open 
source." For additional information, see 
http://developers.facebook.com/thrift/. Facebook has announced their intent to 
migrate Thrift into Apache Incubator.</description>
-    <url>http://hadoop.apache.org/hbase/</url>
+    <url>http://hbase.apache.org/</url>
   </metadata>
   <deps>
     <tag name="/bin/bash"/>

Modified: 
incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_yum.xml
URL: 
http://svn.apache.org/viewvc/incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_yum.xml?rev=1241663&r1=1241662&r2=1241663&view=diff
==============================================================================
--- 
incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_yum.xml
 (original)
+++ 
incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_yum.xml
 Tue Feb  7 22:21:49 2012
@@ -156,7 +156,7 @@ also on potential use cases. Come to the
   service.
 * Smart defaults for services. You can get a properly configured system
   running quickly, while still being able to override settings as 
needed.</description>
-      <url>http://incubator.apache.org/whirr</url>
+      <url>http://whirr.apache.org/</url>
     </metadata>
     <deps>
       <tag name="/bin/bash"/>
@@ -1060,7 +1060,7 @@ also on potential use cases. Come to the
  mechanisms and many failover and recovery mechanisms.  The system is centrally
  managed and allows for intelligent dynamic management.  It uses a simple
  extensible data model that allows for online analytic 
applications.</description>
-      <url>https://github.com/cloudera/flume</url>
+      <url>http://incubator.apache.org/projects/flume.html</url>
     </metadata>
     <deps>
       <tag name="/bin/sh"/>
@@ -1369,46 +1369,11 @@ also on potential use cases. Come to the
       </flume-conf>
     </alternatives>
   </flume>
-  <flume-master>
-    <metadata>
-      <summary>The flume master daemon is the central administration and data 
path control point for flume nodes.</summary>
-      <description>Flume is a reliable, scalable, and manageable distributed 
data collection application for collecting data such as logs and delivering it 
to data stores such as Hadoop's HDFS. It can efficiently collect, aggregate, 
and move large amounts of log data. It has a simple, but flexible, architecture 
based on streaming data flows. It is robust and fault tolerant with tunable 
reliability mechanisms and many failover and recovery mechanisms. The system is 
centrally managed and allows for intelligent dynamic management. It uses a 
simple extensible data model that allows for online analytic 
applications.</description>
-      <url>https://github.com/cloudera/flume</url>
-    </metadata>
-    <deps>
-      <tag name="/bin/bash"/>
-      <tag name="/bin/sh"/>
-      <tag name="/sbin/chkconfig"/>
-      <tag name="/sbin/service"/>
-      <tag name="/usr/bin/env"/>
-      <tag name="/usr/sbin/useradd"/>
-      <flume>/self</flume>
-      <jre>&gt;=1.6</jre>
-      <redhat-lsb/>
-      <sh-utils/>
-      <textutils/>
-    </deps>
-    <services>
-      <flume-master>
-        <runlevel>2</runlevel>
-        <runlevel>3</runlevel>
-        <runlevel>4</runlevel>
-        <runlevel>5</runlevel>
-        <oninstall>stop</oninstall>
-        <configured>true</configured>
-      </flume-master>
-    </services>
-    <content>
-      <file name="/etc/rc.d/init.d/flume-master" perm="-rwxr-xr-x" 
group="root" owners="-1" user="root"/>
-      <file name="/usr/lib/flume" perm="drwxr-xr-x" group="root" owners="-1" 
user="root"/>
-      <file name="/usr/lib/flume/bin" perm="drwxr-xr-x" group="root" 
owners="-1" user="root"/>
-    </content>
-  </flume-master>
   <flume-node>
     <metadata>
       <summary>The flume node daemon is a core element of flume's data path 
and is responsible for generating, processing, and delivering data.</summary>
       <description>Flume is a reliable, scalable, and manageable distributed 
data collection application for collecting data such as logs and delivering it 
to data stores such as Hadoop's HDFS. It can efficiently collect, aggregate, 
and move large amounts of log data. It has a simple, but flexible, architecture 
based on streaming data flows. It is robust and fault tolerant with tunable 
reliability mechanisms and many failover and recovery mechanisms. The system is 
centrally managed and allows for intelligent dynamic management. It uses a 
simple extensible data model that allows for online analytic 
applications.</description>
-      <url>https://github.com/cloudera/flume</url>
+      <url>http://incubator.apache.org/projects/flume.html</url>
     </metadata>
     <deps>
       <tag name="/bin/bash"/>
@@ -1685,7 +1650,7 @@ also on potential use cases. Come to the
  In case of workflow job failure, the workflow job can be rerun skipping
  previously completed actions, the workflow application can be patched before
  being rerun.</description>
-      <url>http://www.cloudera.com</url>
+      <url>http://incubator.apache.org/oozie/</url>
     </metadata>
     <deps>
       <tag name="/bin/bash"/>
@@ -1815,7 +1780,7 @@ also on potential use cases. Come to the
  you can also change the status of the entire system, get vesion
  information. This client utility also allows you to validate
  any worflows before they are deployed to the Oozie server.</description>
-      <url>http://www.cloudera.com</url>
+      <url>http://incubator.apache.org/oozie/</url>
     </metadata>
     <deps>
       <tag name="/bin/bash"/>
@@ -4171,7 +4136,7 @@ into fixing the bugs and race conditions
 difficulty of implementing these kinds of services, applications initially
 usually skimp on them ,which make them brittle in the presence of change and
 difficult to manage. Even when done correctly, different implementations of 
these services lead to management complexity when the applications are 
deployed.</description>
-      <url>http://hadoop.apache.org/zookeeper/</url>
+      <url>http://zookeeper.apache.org/</url>
     </metadata>
     <deps>
       <tag name="/bin/sh"/>
@@ -4550,7 +4515,7 @@ difficult to manage. Even when done corr
     <metadata>
       <summary>The Hadoop Zookeeper server</summary>
       <description>This package starts the zookeeper server on 
startup</description>
-      <url>http://hadoop.apache.org/zookeeper/</url>
+      <url>http://zookeeper.apache.org/</url>
     </metadata>
     <deps>
       <tag name="/bin/bash"/>
@@ -4595,7 +4560,7 @@ difficult to manage. Even when done corr
    automatically, allowing the user to focus on semantics rather than 
efficiency.
  * Extensibility
    Users can create their own functions to do special-purpose 
processing.</description>
-      <url>http://hadoop.apache.org/pig/</url>
+      <url>http://pig.apache.org/</url>
     </metadata>
     <deps>
       <tag name="/bin/sh"/>
@@ -8471,7 +8436,7 @@ difficult to manage. Even when done corr
  plug in their custom mappers and reducers to do more sophisticated
  analysis which may not be supported by the built-in capabilities of
  the language.</description>
-      <url>http://hadoop.apache.org/hive/</url>
+      <url>http://hive.apache.org/</url>
     </metadata>
     <deps>
       <tag name="/bin/sh"/>
@@ -8927,7 +8892,7 @@ difficult to manage. Even when done corr
     * Cascading source and sink modules
     * Extensible jruby-based (JIRB) shell
     * Support for exporting metrics via the Hadoop metrics subsystem to files 
or Ganglia; or via JMX</description>
-      <url>http://hadoop.apache.org/hbase/</url>
+      <url>http://hbase.apache.org/</url>
     </metadata>
     <deps>
       <tag name="/bin/sh"/>
@@ -9114,7 +9079,7 @@ difficult to manage. Even when done corr
     <metadata>
       <summary>Hbase Documentation</summary>
       <description>Documentation for Hbase</description>
-      <url>http://hadoop.apache.org/hbase/</url>
+      <url>http://hbase.apache.org/</url>
     </metadata>
     <content>
       <file name="/usr/share/doc/hbase-0.90.4.16" perm="drwxr-xr-x" 
group="root" owners="-1" user="root"/>
@@ -11842,7 +11807,7 @@ difficult to manage. Even when done corr
     <metadata>
       <summary>The Hadoop HBase master Server.</summary>
       <description>HMaster is the "master server" for a HBase. There is only 
one HMaster for a single HBase deployment.</description>
-      <url>http://hadoop.apache.org/hbase/</url>
+      <url>http://hbase.apache.org/</url>
     </metadata>
     <deps>
       <tag name="/bin/bash"/>
@@ -11868,7 +11833,7 @@ difficult to manage. Even when done corr
     <metadata>
       <summary>The Hadoop HBase RegionServer server.</summary>
       <description>HRegionServer makes a set of HRegions available to clients. 
It checks in with the HMaster. There are many HRegionServers in a single HBase 
deployment.</description>
-      <url>http://hadoop.apache.org/hbase/</url>
+      <url>http://hbase.apache.org/</url>
     </metadata>
     <deps>
       <tag name="/bin/bash"/>
@@ -11894,7 +11859,7 @@ difficult to manage. Even when done corr
     <metadata>
       <summary>The Hadoop HBase Thrift Interface</summary>
       <description>ThriftServer - this class starts up a Thrift server which 
implements the Hbase API specified in the Hbase.thrift IDL file. "Thrift is a 
software framework for scalable cross-language services development. It 
combines a powerful software stack with a code generation engine to build 
services that work efficiently and seamlessly between C++, Java, Python, PHP, 
and Ruby. Thrift was developed at Facebook, and we are now releasing it as open 
source." For additional information, see 
http://developers.facebook.com/thrift/. Facebook has announced their intent to 
migrate Thrift into Apache Incubator.</description>
-      <url>http://hadoop.apache.org/hbase/</url>
+      <url>http://hbase.apache.org/</url>
     </metadata>
     <deps>
       <tag name="/bin/bash"/>

Modified: 
incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_zypper.xml
URL: 
http://svn.apache.org/viewvc/incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_zypper.xml?rev=1241663&r1=1241662&r2=1241663&view=diff
==============================================================================
--- 
incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_zypper.xml
 (original)
+++ 
incubator/bigtop/branches/RCs/bigtop-tests/test-artifacts/package/src/main/resources/package_data_zypper.xml
 Tue Feb  7 22:21:49 2012
@@ -63,7 +63,7 @@ also on potential use cases. Come to the
   service.
 * Smart defaults for services. You can get a properly configured system
   running quickly, while still being able to override settings as 
needed.</description>
-    <url>http://incubator.apache.org/whirr</url>
+    <url>http://whirr.apache.org/</url>
   </metadata>
   <deps>
     <jre>>=1.6</jre>
@@ -804,7 +804,7 @@ also on potential use cases. Come to the
  mechanisms and many failover and recovery mechanisms.  The system is centrally
  managed and allows for intelligent dynamic management.  It uses a simple
  extensible data model that allows for online analytic 
applications.</description>
-    <url>https://github.com/cloudera/flume</url>
+    <url>http://incubator.apache.org/projects/flume.html</url>
   </metadata>
   <deps>
     <sh-utils/>
@@ -1117,44 +1117,11 @@ also on potential use cases. Come to the
     </flume-conf>
   </alternatives>
 </flume>
-<flume-master>
-  <metadata>
-    <summary>The flume master daemon is the central administration and data 
path control point for flume nodes.</summary>
-    <description>Flume is a reliable, scalable, and manageable distributed 
data collection application for collecting data such as logs and delivering it 
to data stores such as Hadoop's HDFS. It can efficiently collect, aggregate, 
and move large amounts of log data. It has a simple, but flexible, architecture 
based on streaming data flows. It is robust and fault tolerant with tunable 
reliability mechanisms and many failover and recovery mechanisms. The system is 
centrally managed and allows for intelligent dynamic management. It uses a 
simple extensible data model that allows for online analytic 
applications.</description>
-    <url>https://github.com/cloudera/flume</url>
-  </metadata>
-  <deps>
-    <flume>/self</flume>
-    <sh-utils/>
-    <textutils/>
-    <tag name="/usr/sbin/useradd"/>
-    <tag name="/sbin/chkconfig"/>
-    <tag name="/sbin/service"/>
-    <jre>>=1.6</jre>
-    <insserv/>
-    <tag name="/bin/sh"/>
-    <tag name="/bin/bash"/>
-    <tag name="/usr/bin/env"/>
-  </deps>
-  <services>
-    <flume-master>
-       <!-- BUG https://jira.cloudera.com/browse/KITCHEN-1095 
--><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-       <oninstall>stop</oninstall>
-       <configured>true</configured>
-    </flume-master>
-  </services>
-  <content>
-    <file name="/etc/rc.d/flume-master" owners="1" perm="-rwxr-xr-x" 
user="root" group="root"/>
-    <file name="/usr/lib/flume" owners="2" perm="drwxr-xr-x" user="root" 
group="root"/>
-    <file name="/usr/lib/flume/bin" owners="2" perm="drwxr-xr-x" user="root" 
group="root"/>
-    <file name="/usr/lib/flume/bin/flume-daemon.sh" owners="2" 
perm="-rwxr-xr-x" user="root" group="root"/>
-  </content>
-</flume-master>
 <flume-node>
   <metadata>
     <summary>The flume node daemon is a core element of flume's data path and 
is responsible for generating, processing, and delivering data.</summary>
     <description>Flume is a reliable, scalable, and manageable distributed 
data collection application for collecting data such as logs and delivering it 
to data stores such as Hadoop's HDFS. It can efficiently collect, aggregate, 
and move large amounts of log data. It has a simple, but flexible, architecture 
based on streaming data flows. It is robust and fault tolerant with tunable 
reliability mechanisms and many failover and recovery mechanisms. The system is 
centrally managed and allows for intelligent dynamic management. It uses a 
simple extensible data model that allows for online analytic 
applications.</description>
-    <url>https://github.com/cloudera/flume</url>
+    <url>http://incubator.apache.org/projects/flume.html</url>
   </metadata>
   <deps>
     <flume>/self</flume>
@@ -1418,7 +1385,7 @@ also on potential use cases. Come to the
  In case of workflow job failure, the workflow job can be rerun skipping
  previously completed actions, the workflow application can be patched before
  being rerun.</description>
-    <url>http://www.cloudera.com</url>
+    <url>http://incubator.apache.org/oozie/</url>
   </metadata>
   <deps>
     <tag name="/usr/sbin/groupadd"/>
@@ -1543,7 +1510,7 @@ also on potential use cases. Come to the
  you can also change the status of the entire system, get vesion
  information. This client utility also allows you to validate
  any worflows before they are deployed to the Oozie server.</description>
-    <url>http://www.cloudera.com</url>
+    <url>http://incubator.apache.org/oozie/</url>
   </metadata>
   <deps>
     <tag name="/bin/bash"/>
@@ -2356,7 +2323,7 @@ into fixing the bugs and race conditions
 difficulty of implementing these kinds of services, applications initially
 usually skimp on them ,which make them brittle in the presence of change and
 difficult to manage. Even when done correctly, different implementations of 
these services lead to management complexity when the applications are 
deployed.</description>
-    <url>http://hadoop.apache.org/zookeeper/</url>
+    <url>http://zookeeper.apache.org/</url>
     <!-- group>misc</group -->
   </metadata>
   <deps>
@@ -2728,9 +2695,9 @@ difficult to manage. Even when done corr
   <alternatives>
     <zookeeper-conf>
       <status>auto</status>
-      <link>/etc/zookeeper</link>
-      <value>/etc/zookeeper.dist</value>
-      <alt>/etc/zookeeper.dist</alt>
+      <link>/etc/zookeeper/conf</link>
+      <value>/etc/zookeeper/conf.dist</value>
+      <alt>/etc/zookeeper/conf.dist</alt>
     </zookeeper-conf>
   </alternatives>
 </zookeeper>
@@ -2740,7 +2707,7 @@ difficult to manage. Even when done corr
     <!-- arch>universal</arch -->
     <summary>The Hadoop Zookeeper server</summary>
     <description>This package starts the zookeeper server on 
startup</description>
-    <url>http://hadoop.apache.org/zookeeper/</url>
+    <url>http://zookeeper.apache.org/</url>
     <!-- vendor>(none)</vendor -->
     <!-- group>Development/Libraries</group -->
     <!-- 
depends><dep>adduser</dep><dep>sun-java6-jre</dep><dep>sun-java6-bin</dep></depends
 -->
@@ -2788,7 +2755,7 @@ difficult to manage. Even when done corr
    automatically, allowing the user to focus on semantics rather than 
efficiency.
  * Extensibility
    Users can create their own functions to do special-purpose 
processing.</description>
-    <url>http://hadoop.apache.org/pig/</url>
+    <url>http://pig.apache.org/</url>
   </metadata>
   <deps>
     <hadoop/>
@@ -6696,12 +6663,12 @@ difficult to manage. Even when done corr
     <doc  name="/usr/share/man/man1/pig.1.gz" owners="1" perm="-rw-r--r--" 
user="root" group="root"/>
   </content>
   <alternatives>
-    <pig> <!-- BUG: https://issues.cloudera.org/browse/DISTRO-223 -->
+    <pig-conf>
       <status>auto</status>
       <link>/etc/pig/conf</link>
       <value>/etc/pig/conf.dist</value>
       <alt>/etc/pig/conf.dist</alt>
-    </pig>
+    </pig-conf>
   </alternatives>
 </pig>
 <hive>
@@ -6717,7 +6684,7 @@ difficult to manage. Even when done corr
  plug in their custom mappers and reducers to do more sophisticated
  analysis which may not be supported by the built-in capabilities of
  the language.</description>
-    <url>http://hadoop.apache.org/hive/</url>
+    <url>http://hive.apache.org/</url>
   </metadata>
   <deps>
     <hadoop>>=0.20.1</hadoop>
@@ -7077,7 +7044,7 @@ difficult to manage. Even when done corr
     * Cascading source and sink modules
     * Extensible jruby-based (JIRB) shell
     * Support for exporting metrics via the Hadoop metrics subsystem to files 
or Ganglia; or via JMX</description>
-    <url>http://hadoop.apache.org/hbase/</url>
+    <url>http://hbase.apache.org/</url>
     <!-- group>misc</group -->
   </metadata>
   <deps>
@@ -7256,7 +7223,7 @@ difficult to manage. Even when done corr
   <metadata>
     <summary>Hbase Documentation</summary>
     <description>Documentation for Hbase</description>
-    <url>http://hadoop.apache.org/hbase/</url>
+    <url>http://hbase.apache.org/</url>
     <!-- group>misc</group -->
   </metadata>
   <content>
@@ -9025,7 +8992,7 @@ difficult to manage. Even when done corr
   <metadata>
     <summary>The Hadoop HBase master Server.</summary>
     <description>HMaster is the "master server" for a HBase. There is only one 
HMaster for a single HBase deployment.</description>
-    <url>http://hadoop.apache.org/hbase/</url>
+    <url>http://hbase.apache.org/</url>
   </metadata>
   <deps>
     <hbase>/self</hbase>
@@ -9048,7 +9015,7 @@ difficult to manage. Even when done corr
   <metadata>
     <summary>The Hadoop HBase RegionServer server.</summary>
     <description>HRegionServer makes a set of HRegions available to clients. 
It checks in with the HMaster. There are many HRegionServers in a single HBase 
deployment.</description>
-    <url>http://hadoop.apache.org/hbase/</url>
+    <url>http://hbase.apache.org/</url>
   </metadata>
   <deps>
     <hbase>/self</hbase>
@@ -9071,7 +9038,7 @@ difficult to manage. Even when done corr
   <metadata>
     <summary>The Hadoop HBase Thrift Interface</summary>
     <description>ThriftServer - this class starts up a Thrift server which 
implements the Hbase API specified in the Hbase.thrift IDL file. "Thrift is a 
software framework for scalable cross-language services development. It 
combines a powerful software stack with a code generation engine to build 
services that work efficiently and seamlessly between C++, Java, Python, PHP, 
and Ruby. Thrift was developed at Facebook, and we are now releasing it as open 
source." For additional information, see 
http://developers.facebook.com/thrift/. Facebook has announced their intent to 
migrate Thrift into Apache Incubator.</description>
-    <url>http://hadoop.apache.org/hbase/</url>
+    <url>http://hbase.apache.org/</url>
   </metadata>
   <deps>
     <hbase>/self</hbase>


Reply via email to