incubator-bigtop-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From r..@apache.org
Subject svn commit: r1353693 [1/2] - in /incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/package/src/main/resources: apt/package_data.xml package_data.xml yum/package_data.xml zypper/package_data.xml
Date Mon, 25 Jun 2012 18:23:02 GMT
Author: rvs
Date: Mon Jun 25 18:23:02 2012
New Revision: 1353693

URL: http://svn.apache.org/viewvc?rev=1353693&view=rev
Log:
BIGTOP-638. move <services> block from common package_date.xml to each platform's (yum, apt, zypper) package_data.xml (Johnny Zhang via rvs)

Modified:
    incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/package/src/main/resources/apt/package_data.xml
    incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/package/src/main/resources/package_data.xml
    incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/package/src/main/resources/yum/package_data.xml
    incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/package/src/main/resources/zypper/package_data.xml

Modified: incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/package/src/main/resources/apt/package_data.xml
URL: http://svn.apache.org/viewvc/incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/package/src/main/resources/apt/package_data.xml?rev=1353693&r1=1353692&r2=1353693&view=diff
==============================================================================
--- incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/package/src/main/resources/apt/package_data.xml (original)
+++ incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/package/src/main/resources/apt/package_data.xml Mon Jun 25 18:23:02 2012
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="UTF-8"?>
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -16,164 +16,479 @@
    limitations under the License.
 -->
 <packages>
-<flume>
-  <users>
-    <flume>
-      <home>/var/run/flume</home>
-      <descr>Flume User</descr>
-      <shell>/bin/false</shell>
-    </flume>
-  </users>
-  <deps>
-    <adduser/>
-  </deps>
-</flume>
-<sqoop-metastore>
-  <users>
-    <sqoop>
-      <home>/var/lib/sqoop</home>
-      <descr>Sqoop User</descr>
-      <shell>/bin/false</shell>
-    </sqoop>
-  </users>
-  <deps>
-    <adduser/>
-  </deps>
-</sqoop-metastore>
-<zookeeper>
-  <users>
-    <zookeeper>
-      <home>/var/lib/zookeeper</home>
-      <descr>ZooKeeper User</descr>
-      <shell>/bin/false</shell>
-    </zookeeper>
-  </users>
-  <deps>
-    <adduser/>
-  </deps>
-</zookeeper>
-<oozie>
-  <users>
-    <oozie>
-      <home>/var/lib/oozie</home>
-      <descr>Oozie User</descr>
-      <shell>/bin/false</shell>
-    </oozie>
-  </users>
-  <deps>
-    <adduser/>
-    <zip/>
-    <unzip/>
-  </deps>
-</oozie>
-<hive>
-  <deps>
-    <adduser/>
-  </deps>
-</hive>
-<hive-metastore>
-  <users>
-    <hive>
-      <home>/var/lib/hive</home>
-      <descr>Hive User</descr>
-      <shell>/bin/false</shell>
-    </hive>
-  </users>
-</hive-metastore>
-<hive-server>
-  <users>
-    <hive>
-      <home>/var/lib/hive</home>
-      <descr>Hive User</descr>
-      <shell>/bin/false</shell>
-    </hive>
-  </users>
-</hive-server>
-<hbase>
-  <users>
-    <hbase>
-      <home>/var/run/hbase</home>
-      <descr>HBase User</descr>
-      <shell>/bin/bash</shell>
-    </hbase>
-  </users>
-  <deps>
-    <adduser/>
-  </deps>
-</hbase>
-<hadoop>
-  <deps>
-    <libc6/>
-    <adduser/>
-  </deps>
-</hadoop>
-<hadoop-hdfs>
-  <users>
-    <hdfs>
-      <home>/var/lib/hadoop-hdfs</home>
-      <descr>Hadoop HDFS</descr>
-      <shell>/bin/bash</shell>
-    </hdfs>
-  </users>
-  <deps>
-    <libc6/>
-    <adduser/>
-  </deps>
-</hadoop-hdfs>
-<hadoop-yarn>
-  <users>
-    <yarn>
-      <home>/var/lib/hadoop-yarn</home>
-      <descr>Hadoop YARN</descr>
-      <shell>/bin/bash</shell>
-    </yarn>
-  </users>
-  <deps>
-    <libc6/>
-    <adduser/>
-  </deps>
-</hadoop-yarn>
-<hadoop-mapreduce>
-  <users>
-    <mapred>
-      <home>/var/lib/hadoop-mapreduce</home>
-      <descr>Hadoop MapReduce</descr>
-      <shell>/bin/bash</shell>
-    </mapred>
-  </users>
-  <deps>
-    <adduser/>
-  </deps>
-</hadoop-mapreduce>
-<hadoop-httpfs>
-  <users>
-    <httpfs>
-      <home>/var/run/hadoop-httpfs</home>
-      <descr>Hadoop HTTPFS</descr>
-      <shell>/bin/bash</shell>
-    </httpfs>
-  </users>
-</hadoop-httpfs>
-<libhdfs0>
-  <metadata>
-    <summary>Hadoop Filesystem Library</summary>
-    <description>Hadoop Filesystem Library</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <hadoop>/self</hadoop>
-    <libc6>>=2.4</libc6>
-  </deps>
-</libhdfs0>
-<libhdfs0-dev>
-  <metadata>
-    <summary>Development support for libhdfs0</summary>
-    <description>Includes examples and header files for accessing HDFS from C</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <hadoop>/self</hadoop>
-    <libhdfs0>/self</libhdfs0>
-  </deps>
-</libhdfs0-dev>
+  <flume>
+    <users>
+      <flume>
+        <home>/var/run/flume</home>
+        <descr>Flume User</descr>
+        <shell>/bin/false</shell>
+      </flume>
+    </users>
+    <deps>
+      <adduser/>
+    </deps>
+  </flume>
+  <flume-node>
+    <deps>
+      <tag name="/bin/bash"/>
+      <tag name="/bin/sh"/>
+      <tag name="/sbin/chkconfig"/>
+      <tag name="/sbin/service"/>
+      <tag name="/usr/bin/env"/>
+      <tag name="/usr/sbin/useradd"/>
+      <jre>&gt;=1.6</jre>
+      <sh-utils/>
+      <textutils/>
+    </deps>
+    <services>
+      <flume-node>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>start</oninstall>
+        <configured>true</configured>
+      </flume-node>
+    </services>
+  </flume-node>
+  <sqoop-metastore>
+    <users>
+      <sqoop>
+        <home>/var/lib/sqoop</home>
+        <descr>Sqoop User</descr>
+        <shell>/bin/false</shell>
+      </sqoop>
+    </users>
+    <deps>
+      <adduser/>
+    </deps>
+    <services>
+      <sqoop-metastore>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>start</oninstall>
+        <configured>true</configured>
+      </sqoop-metastore>
+    </services>
+  </sqoop-metastore>
+  <zookeeper>
+    <users>
+      <zookeeper>
+        <home>/var/lib/zookeeper</home>
+        <descr>ZooKeeper User</descr>
+        <shell>/bin/false</shell>
+      </zookeeper>
+    </users>
+    <deps>
+      <adduser/>
+    </deps>
+  </zookeeper>
+  <zookeeper-server>
+    <deps>
+      <tag name="/bin/bash"/>
+      <tag name="/bin/sh"/>
+    </deps>
+    <services>
+      <zookeeper-server>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>start</oninstall>
+        <configured>true</configured>
+        <init>true</init>
+      </zookeeper-server>
+    </services>
+  </zookeeper-server>
+  <oozie>
+    <users>
+      <oozie>
+        <home>/var/lib/oozie</home>
+        <descr>Oozie User</descr>
+        <shell>/bin/false</shell>
+      </oozie>
+    </users>
+    <deps>
+      <adduser/>
+      <zip/>
+      <unzip/>
+    </deps>
+    <services>
+      <oozie>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>true</configured>
+      </oozie>
+    </services>
+  </oozie>
+  <hive>
+    <deps>
+      <adduser/>
+    </deps>
+  </hive>
+  <hive-metastore>
+    <users>
+      <hive>
+        <home>/var/lib/hive</home>
+        <descr>Hive User</descr>
+        <shell>/bin/false</shell>
+      </hive>
+    </users>
+    <services>
+      <hive-metastore>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>start</oninstall>
+        <configured>true</configured>
+      </hive-metastore>
+    </services>
+  </hive-metastore>
+  <hive-server>
+    <users>
+      <hive>
+        <home>/var/lib/hive</home>
+        <descr>Hive User</descr>
+        <shell>/bin/false</shell>
+      </hive>
+    </users>
+    <services>
+      <hive-server>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>start</oninstall>
+        <configured>true</configured>
+      </hive-server>
+    </services>
+  </hive-server>
+  <hbase>
+    <users>
+      <hbase>
+        <home>/var/run/hbase</home>
+        <descr>HBase User</descr>
+        <shell>/bin/bash</shell>
+      </hbase>
+    </users>
+    <deps>
+      <adduser/>
+    </deps>
+  </hbase>
+  <hbase-doc>
+</hbase-doc>
+  <hbase-master>
+    <deps>
+      <tag name="/bin/bash"/>
+      <tag name="/bin/sh"/>
+    </deps>
+    <services>
+      <hbase-master>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>start</oninstall>
+        <configured>true</configured>
+        <config>
+          <configfile>/etc/hbase/conf/hbase-site.xml</configfile>
+          <property>
+            <name>hbase.rootdir</name>
+            <value>hdfs://localhost/hbase</value>
+          </property>
+          <property> <!-- workaround for HBASE-5975 and HBASE-5963 -->
+            <name>hbase.cluster.distributed</name>
+            <value>true</value>
+          </property>
+        </config>
+      </hbase-master>
+    </services>
+  </hbase-master>
+  <hbase-regionserver>
+    <deps>
+      <tag name="/bin/bash"/>
+      <tag name="/bin/sh"/>
+    </deps>
+    <services>
+      <hbase-regionserver>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hbase-regionserver>
+    </services>
+  </hbase-regionserver>
+  <hbase-thrift>
+    <deps>
+      <tag name="/bin/bash"/>
+      <tag name="/bin/sh"/>
+    </deps>
+    <services>
+      <hbase-thrift>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>start</oninstall>
+        <configured>false</configured>
+      </hbase-thrift>
+    </services>
+  </hbase-thrift>
+  <hbase-rest>
+    <deps>
+      <hbase>/self</hbase>
+    </deps>
+    <services>
+      <hbase-rest>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>start</oninstall>
+        <configured>false</configured>
+      </hbase-rest>
+    </services>
+  </hbase-rest>
+  <hadoop>
+    <deps>
+      <libc6/>
+      <adduser/>
+    </deps>
+  </hadoop>
+  <hadoop-hdfs>
+    <users>
+      <hdfs>
+        <home>/var/lib/hadoop-hdfs</home>
+        <descr>Hadoop HDFS</descr>
+        <shell>/bin/bash</shell>
+      </hdfs>
+    </users>
+    <deps>
+      <libc6/>
+      <adduser/>
+    </deps>
+  </hadoop-hdfs>
+  <hadoop-hdfs-namenode>
+    <deps>
+      <tag name="/bin/sh"/>
+      <tag name="/bin/bash"/>
+      <tag name="/usr/bin/env"/>
+    </deps>
+    <services>
+      <hadoop-hdfs-namenode>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+        <init>true</init>
+      </hadoop-hdfs-namenode>
+    </services>
+  </hadoop-hdfs-namenode>
+  <hadoop-hdfs-zkfc>
+    <services>
+      <hadoop-hdfs-zkfc>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hadoop-hdfs-zkfc>
+    </services>
+  </hadoop-hdfs-zkfc>
+  <hadoop-hdfs-secondarynamenode>
+    <deps>
+      <tag name="/bin/sh"/>
+      <tag name="/bin/bash"/>
+      <tag name="/usr/bin/env"/>
+    </deps>
+    <services>
+      <hadoop-hdfs-secondarynamenode>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hadoop-hdfs-secondarynamenode>
+    </services>
+  </hadoop-hdfs-secondarynamenode>
+  <hadoop-hdfs-datanode>
+    <deps>
+      <tag name="/bin/sh"/>
+      <tag name="/bin/bash"/>
+      <tag name="/usr/bin/env"/>
+    </deps>
+    <services>
+      <hadoop-hdfs-datanode>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hadoop-hdfs-datanode>
+    </services>
+  </hadoop-hdfs-datanode>
+  <hadoop-yarn>
+    <users>
+      <yarn>
+        <home>/var/lib/hadoop-yarn</home>
+        <descr>Hadoop YARN</descr>
+        <shell>/bin/bash</shell>
+      </yarn>
+    </users>
+    <deps>
+      <libc6/>
+      <adduser/>
+    </deps>
+  </hadoop-yarn>
+  <hadoop-yarn-resourcemanager>
+    <deps>
+      <tag name="/bin/sh"/>
+      <tag name="/bin/bash"/>
+      <tag name="/usr/bin/env"/>
+    </deps>
+    <services>
+      <hadoop-yarn-resourcemanager>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hadoop-yarn-resourcemanager>
+    </services>
+  </hadoop-yarn-resourcemanager>
+  <hadoop-yarn-nodemanager>
+    <deps>
+      <tag name="/bin/sh"/>
+      <tag name="/bin/bash"/>
+      <tag name="/usr/bin/env"/>
+    </deps>
+    <services>
+      <hadoop-yarn-nodemanager>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hadoop-yarn-nodemanager>
+    </services>
+  </hadoop-yarn-nodemanager>
+  <hadoop-yarn-proxyserver>
+    <deps>
+      <tag name="/bin/sh"/>
+      <tag name="/bin/bash"/>
+    </deps>
+    <services>
+      <hadoop-yarn-proxyserver>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+        <config>
+          <configfile>/etc/hadoop/conf/yarn-site.xml</configfile>
+          <property>
+            <name>yarn.web-proxy.address</name>
+            <value>0.0.0.0:8032</value>
+          </property>
+       </config>
+      </hadoop-yarn-proxyserver>
+    </services>
+  </hadoop-yarn-proxyserver>
+  <hadoop-mapreduce-historyserver>
+    <deps>
+      <tag name="/bin/sh"/>
+      <tag name="/bin/bash"/>
+    </deps>
+    <services>
+      <hadoop-mapreduce-historyserver>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hadoop-mapreduce-historyserver>
+    </services>
+  </hadoop-mapreduce-historyserver>
+  <hadoop-mapreduce>
+    <users>
+      <mapred>
+        <home>/var/lib/hadoop-mapreduce</home>
+        <descr>Hadoop MapReduce</descr>
+        <shell>/bin/bash</shell>
+      </mapred>
+    </users>
+    <deps>
+      <adduser/>
+    </deps>
+  </hadoop-mapreduce>
+  <hadoop-httpfs>
+    <users>
+      <httpfs>
+        <home>/var/run/hadoop-httpfs</home>
+        <descr>Hadoop HTTPFS</descr>
+        <shell>/bin/bash</shell>
+      </httpfs>
+    </users>
+    <services>
+      <hadoop-httpfs>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hadoop-httpfs>
+    </services>
+  </hadoop-httpfs>
+  <libhdfs0>
+    <metadata>
+      <summary>Hadoop Filesystem Library</summary>
+      <description>Hadoop Filesystem Library</description>
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <hadoop>/self</hadoop>
+      <libc6>&gt;=2.4</libc6>
+    </deps>
+  </libhdfs0>
+  <libhdfs0-dev>
+    <metadata>
+      <summary>Development support for libhdfs0</summary>
+      <description>Includes examples and header files for accessing HDFS from C</description>
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <hadoop>/self</hadoop>
+      <libhdfs0>/self</libhdfs0>
+    </deps>
+  </libhdfs0-dev>
+  <hue-server>
+    <services>
+      <hue>
+        <runlevel>2</runlevel>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>start</oninstall>
+        <configured>true</configured>
+      </hue>
+     </services>
+  </hue-server>
 </packages>

Modified: incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/package/src/main/resources/package_data.xml
URL: http://svn.apache.org/viewvc/incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/package/src/main/resources/package_data.xml?rev=1353693&r1=1353692&r2=1353693&view=diff
==============================================================================
--- incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/package/src/main/resources/package_data.xml (original)
+++ incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/package/src/main/resources/package_data.xml Mon Jun 25 18:23:02 2012
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="UTF-8"?>
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -16,32 +16,32 @@
    limitations under the License.
 -->
 <packages>
-<bigtop-utils>
-  <metadata>
-    <summary>Collection of useful tools for Bigtop</summary>
-    <description>This includes a collection of useful tools and files for Bigtop</description>
-    <url>http://incubator.apache.org/bigtop/</url>
-  </metadata>
-</bigtop-utils>
-<bigtop-jsvc>
-  <metadata>
-    <summary>Application to launch java daemon</summary>
-    <description>jsvc executes classfile that implements a Daemon interface.</description>
-    <url>http://commons.apache.org/daemon/</url>
-  </metadata>
-</bigtop-jsvc>
-<bigtop-tomcat>
-  <metadata>
-    <summary>Apache Tomcat</summary>
-    <description>Apache Tomcat is an open source software implementation of the
+  <bigtop-utils>
+    <metadata>
+      <summary>Collection of useful tools for Bigtop</summary>
+      <description>This includes a collection of useful tools and files for Bigtop</description>
+      <url>http://incubator.apache.org/bigtop/</url>
+    </metadata>
+  </bigtop-utils>
+  <bigtop-jsvc>
+    <metadata>
+      <summary>Application to launch java daemon</summary>
+      <description>jsvc executes classfile that implements a Daemon interface.</description>
+      <url>http://commons.apache.org/daemon/</url>
+    </metadata>
+  </bigtop-jsvc>
+  <bigtop-tomcat>
+    <metadata>
+      <summary>Apache Tomcat</summary>
+      <description>Apache Tomcat is an open source software implementation of the
 Java Servlet and JavaServer Pages technologies.</description>
-    <url>http://tomcat.apache.org/</url>
-  </metadata>
-</bigtop-tomcat>
-<mahout>
-  <metadata>
-    <summary>A set of Java libraries for scalable machine learning.</summary>
-    <description>Mahout's goal is to build scalable machine learning libraries.
+      <url>http://tomcat.apache.org/</url>
+    </metadata>
+  </bigtop-tomcat>
+  <mahout>
+    <metadata>
+      <summary>A set of Java libraries for scalable machine learning.</summary>
+      <description>Mahout's goal is to build scalable machine learning libraries.
  With scalable we mean:
  .
  Scalable to reasonably large data sets. Our core algorithms for clustering,
@@ -56,44 +56,44 @@ Java Servlet and JavaServer Pages techno
  Scalable community. The goal of Mahout is to build a vibrant, responsive,
  diverse community to facilitate discussions not only on the project itself but
  also on potential use cases. Come to the mailing lists to find out more.</description>
-    <url>http://mahout.apache.org</url>
-  </metadata>
-  <deps>
-    <hadoop/>
-    <bigtop-utils/>
-  </deps>
-  <alternatives>
-    <mahout-conf>
-      <status>auto</status>
-      <link>/etc/mahout/conf</link>
-      <value>/etc/mahout/conf.dist</value>
-      <alt>/etc/mahout/conf.dist</alt>
-    </mahout-conf>
-  </alternatives>
-</mahout>
-<giraph>
-  <metadata>
-    <summary>Giraph is a BSP inspired graph processing platform that runs on Hadoop</summary>
-    <description>Giraph implements a graph processing platform to run large scale algorithms (such as page rank, shared connections, personalization-based popularity, etc.) on top of Hadoop infrastructure. Giraph builds upon the graph-oriented nature of Pregel but additionally adds fault-tolerance to the coordinator process with the use of ZooKeeper as its centralized coordination service.</description>
-    <url>http://incubator.apache.org/giraph/</url>
-  </metadata>
-  <deps>
-    <hadoop-client/>
-    <bigtop-utils/>
-  </deps>
-  <alternatives>
-    <giraph-conf>
-      <status>auto</status>
-      <link>/etc/giraph/conf</link>
-      <value>/etc/giraph/conf.dist</value>
-      <alt>/etc/giraph/conf.dist</alt>
-    </giraph-conf>
-  </alternatives>
-</giraph>
-<whirr>
-  <metadata>
-    <summary>Scripts and libraries for running software services on cloud infrastructure.</summary>
-    <description>Whirr provides
+      <url>http://mahout.apache.org</url>
+    </metadata>
+    <deps>
+      <hadoop/>
+      <bigtop-utils/>
+    </deps>
+    <alternatives>
+      <mahout-conf>
+        <status>auto</status>
+        <link>/etc/mahout/conf</link>
+        <value>/etc/mahout/conf.dist</value>
+        <alt>/etc/mahout/conf.dist</alt>
+      </mahout-conf>
+    </alternatives>
+  </mahout>
+  <giraph>
+    <metadata>
+      <summary>Giraph is a BSP inspired graph processing platform that runs on Hadoop</summary>
+      <description>Giraph implements a graph processing platform to run large scale algorithms (such as page rank, shared connections, personalization-based popularity, etc.) on top of Hadoop infrastructure. Giraph builds upon the graph-oriented nature of Pregel but additionally adds fault-tolerance to the coordinator process with the use of ZooKeeper as its centralized coordination service.</description>
+      <url>http://incubator.apache.org/giraph/</url>
+    </metadata>
+    <deps>
+      <hadoop-client/>
+      <bigtop-utils/>
+    </deps>
+    <alternatives>
+      <giraph-conf>
+        <status>auto</status>
+        <link>/etc/giraph/conf</link>
+        <value>/etc/giraph/conf.dist</value>
+        <alt>/etc/giraph/conf.dist</alt>
+      </giraph-conf>
+    </alternatives>
+  </giraph>
+  <whirr>
+    <metadata>
+      <summary>Scripts and libraries for running software services on cloud infrastructure.</summary>
+      <description>Whirr provides
  .
   * A cloud-neutral way to run services. You don't have to worry about the
     idiosyncrasies of each provider.
@@ -102,16 +102,16 @@ Java Servlet and JavaServer Pages techno
   * Smart defaults for services. You can get a properly configured system
     running quickly, while still being able to override settings as needed.
     </description>
-    <url>http://whirr.apache.org/</url>
-  </metadata>
-  <deps>
-    <bigtop-utils/>
-  </deps>
-</whirr>
-<flume>
-  <metadata>
-    <summary>Flume is a reliable, scalable, and manageable distributed log collection application for collecting data such as logs and delivering it to data stores such as Hadoop's HDFS.</summary>
-    <description>Flume is a reliable, scalable, and manageable distributed data collection
+      <url>http://whirr.apache.org/</url>
+    </metadata>
+    <deps>
+      <bigtop-utils/>
+    </deps>
+  </whirr>
+  <flume>
+    <metadata>
+      <summary>Flume is a reliable, scalable, and manageable distributed log collection application for collecting data such as logs and delivering it to data stores such as Hadoop's HDFS.</summary>
+      <description>Flume is a reliable, scalable, and manageable distributed data collection
  application for collecting data such as logs and delivering it to data stores
  such as Hadoop's HDFS.  It can efficiently collect, aggregate, and move large
  amounts of log data.  It has a simple, but flexible, architecture based on
@@ -119,89 +119,75 @@ Java Servlet and JavaServer Pages techno
  mechanisms and many failover and recovery mechanisms.  The system is centrally
  managed and allows for intelligent dynamic management.  It uses a simple
  extensible data model that allows for online analytic applications.</description>
-    <url>http://incubator.apache.org/projects/flume.html</url>
-  </metadata>
-  <deps>
-    <zookeeper/>
-    <hadoop/>
-    <bigtop-utils/>
-  </deps>
-  <groups>
-    <flume>
-      <user>flume</user>
-    </flume>
-  </groups>
-  <alternatives>
-    <flume-conf>
-      <status>auto</status>
-      <link>/etc/flume/conf</link>
-      <value>/etc/flume/conf.empty</value>
-      <alt>/etc/flume/conf.empty</alt>
-    </flume-conf>
-  </alternatives>
-</flume>
-<flume-node>
-  <metadata>
-    <summary>The flume node daemon is a core element of flume's data path and is responsible for generating, processing, and delivering data.</summary>
-    <description>Flume is a reliable, scalable, and manageable distributed data collection application for collecting data such as logs and delivering it to data stores such as Hadoop's HDFS. It can efficiently collect, aggregate, and move large amounts of log data. It has a simple, but flexible, architecture based on streaming data flows. It is robust and fault tolerant with tunable reliability mechanisms and many failover and recovery mechanisms. The system is centrally managed and allows for intelligent dynamic management. It uses a simple extensible data model that allows for online analytic applications.</description>
-    <url>http://incubator.apache.org/projects/flume.html</url>
-  </metadata>
-  <deps>
-    <flume>/self</flume>
-  </deps>
-  <services>
-    <flume-node>
-       <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-       <oninstall>start</oninstall>
-       <configured>true</configured>
-    </flume-node>
-  </services>
-</flume-node>
-<sqoop>
-  <metadata>
-    <summary>Sqoop allows easy imports and exports of data sets between databases and the Hadoop Distributed File System (HDFS).</summary>
-    <description>Sqoop allows easy imports and exports of data sets between databases and the Hadoop Distributed File System (HDFS).</description>
-    <url>http://incubator.apache.org/sqoop/</url>
-  </metadata>
-  <deps>
-    <hadoop/>
-    <bigtop-utils/>
-  </deps>
-  <alternatives>
-    <sqoop-conf>
-      <status>auto</status>
-      <link>/etc/sqoop/conf</link>
-      <value>/etc/sqoop/conf.dist</value>
-      <alt>/etc/sqoop/conf.dist</alt>
-    </sqoop-conf>
-  </alternatives>
-</sqoop>
-<sqoop-metastore>
-  <metadata>
-    <summary>Shared metadata repository for Sqoop.</summary>
-    <description>Shared metadata repository for Sqoop. This optional package hosts a metadata server for Sqoop clients across a network to use.</description>
-    <url>http://incubator.apache.org/sqoop/</url>
-  </metadata>
-  <deps>
-    <sqoop>/self</sqoop>
-  </deps>
-  <services>
-    <sqoop-metastore>
-       <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-       <oninstall>start</oninstall>
-       <configured>true</configured>
-    </sqoop-metastore>
-  </services>
-  <groups>
-    <sqoop>
-      <user>sqoop</user>
-    </sqoop>
-  </groups>
-</sqoop-metastore>
-<oozie>
-  <metadata>
-    <summary>Oozie is a system that runs workflows of Hadoop jobs.</summary>
-    <description> Oozie is a system that runs workflows of Hadoop jobs.
+      <url>http://incubator.apache.org/projects/flume.html</url>
+    </metadata>
+    <deps>
+      <zookeeper/>
+      <hadoop/>
+      <bigtop-utils/>
+    </deps>
+    <groups>
+      <flume>
+        <user>flume</user>
+      </flume>
+    </groups>
+    <alternatives>
+      <flume-conf>
+        <status>auto</status>
+        <link>/etc/flume/conf</link>
+        <value>/etc/flume/conf.empty</value>
+        <alt>/etc/flume/conf.empty</alt>
+      </flume-conf>
+    </alternatives>
+  </flume>
+  <flume-node>
+    <metadata>
+      <summary>The flume node daemon is a core element of flume's data path and is responsible for generating, processing, and delivering data.</summary>
+      <description>Flume is a reliable, scalable, and manageable distributed data collection application for collecting data such as logs and delivering it to data stores such as Hadoop's HDFS. It can efficiently collect, aggregate, and move large amounts of log data. It has a simple, but flexible, architecture based on streaming data flows. It is robust and fault tolerant with tunable reliability mechanisms and many failover and recovery mechanisms. The system is centrally managed and allows for intelligent dynamic management. It uses a simple extensible data model that allows for online analytic applications.</description>
+      <url>http://incubator.apache.org/projects/flume.html</url>
+    </metadata>
+    <deps>
+      <flume>/self</flume>
+    </deps>
+  </flume-node>
+  <sqoop>
+    <metadata>
+      <summary>Sqoop allows easy imports and exports of data sets between databases and the Hadoop Distributed File System (HDFS).</summary>
+      <description>Sqoop allows easy imports and exports of data sets between databases and the Hadoop Distributed File System (HDFS).</description>
+      <url>http://incubator.apache.org/sqoop/</url>
+    </metadata>
+    <deps>
+      <hadoop/>
+      <bigtop-utils/>
+    </deps>
+    <alternatives>
+      <sqoop-conf>
+        <status>auto</status>
+        <link>/etc/sqoop/conf</link>
+        <value>/etc/sqoop/conf.dist</value>
+        <alt>/etc/sqoop/conf.dist</alt>
+      </sqoop-conf>
+    </alternatives>
+  </sqoop>
+  <sqoop-metastore>
+    <metadata>
+      <summary>Shared metadata repository for Sqoop.</summary>
+      <description>Shared metadata repository for Sqoop. This optional package hosts a metadata server for Sqoop clients across a network to use.</description>
+      <url>http://incubator.apache.org/sqoop/</url>
+    </metadata>
+    <deps>
+      <sqoop>/self</sqoop>
+    </deps>
+    <groups>
+      <sqoop>
+        <user>sqoop</user>
+      </sqoop>
+    </groups>
+  </sqoop-metastore>
+  <oozie>
+    <metadata>
+      <summary>Oozie is a system that runs workflows of Hadoop jobs.</summary>
+      <description> Oozie is a system that runs workflows of Hadoop jobs.
  Oozie workflows are actions arranged in a control dependency DAG (Direct
  Acyclic Graph).
 
@@ -237,36 +223,29 @@ Java Servlet and JavaServer Pages techno
  In case of workflow job failure, the workflow job can be rerun skipping
  previously completed actions, the workflow application can be patched before
  being rerun.</description>
-    <url>http://incubator.apache.org/oozie/</url>
-  </metadata>
-  <deps>
-    <oozie-client>/self</oozie-client>
-  </deps>
-  <services>
-    <oozie>
-       <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-       <oninstall>stop</oninstall>
-       <configured>true</configured>
-    </oozie>
-  </services>
-  <groups>
-    <oozie>
-      <user>oozie</user>
-    </oozie>
-  </groups>
-  <alternatives>
-    <oozie-conf>
-      <status>auto</status>
-      <link>/etc/oozie/conf</link>
-      <value>/etc/oozie/conf.dist</value>
-      <alt>/etc/oozie/conf.dist</alt>
-    </oozie-conf>
-  </alternatives>
-</oozie>
-<oozie-client>
-  <metadata>
-    <summary>Client for Oozie Workflow Engine</summary>
-    <description>Oozie client is a command line client utility that allows remote
+      <url>http://incubator.apache.org/oozie/</url>
+    </metadata>
+    <deps>
+      <oozie-client>/self</oozie-client>
+    </deps>
+    <groups>
+      <oozie>
+        <user>oozie</user>
+      </oozie>
+    </groups>
+    <alternatives>
+      <oozie-conf>
+        <status>auto</status>
+        <link>/etc/oozie/conf</link>
+        <value>/etc/oozie/conf.dist</value>
+        <alt>/etc/oozie/conf.dist</alt>
+      </oozie-conf>
+    </alternatives>
+  </oozie>
+  <oozie-client>
+    <metadata>
+      <summary>Client for Oozie Workflow Engine</summary>
+      <description>Oozie client is a command line client utility that allows remote
 administration and monitoring of worflows. Using this client
 utility you can submit worflows, start/suspend/resume/kill
 workflows and find out their status at any instance. Apart from
@@ -274,59 +253,51 @@ such operations, you can also change the
 system, get vesion information. This client utility also allows
 you to validate any worflows before they are deployed to the Oozie
 server.</description>
-    <url>http://incubator.apache.org/oozie/</url>
-  </metadata>
-  <deps>
-    <hadoop/>
-    <bigtop-utils/>
-  </deps>
-</oozie-client>
-<zookeeper>
-  <metadata>
-    <summary>A high-performance coordination service for distributed applications.</summary>
-    <description>ZooKeeper is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services.  All of these kinds of services are used in some form or another by distributed applications. Each time they are implemented there is a lot of work that goes into fixing the bugs and race conditions that are inevitable. Because of the difficulty of implementing these kinds of services, applications initially usually skimp on them ,which make them brittle in the presence of change and difficult to manage. Even when done correctly, different implementations of these services lead to management complexity when the applications are deployed.
+      <url>http://incubator.apache.org/oozie/</url>
+    </metadata>
+    <deps>
+      <hadoop/>
+      <bigtop-utils/>
+    </deps>
+  </oozie-client>
+  <zookeeper>
+    <metadata>
+      <summary>A high-performance coordination service for distributed applications.</summary>
+      <description>ZooKeeper is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services.  All of these kinds of services are used in some form or another by distributed applications. Each time they are implemented there is a lot of work that goes into fixing the bugs and race conditions that are inevitable. Because of the difficulty of implementing these kinds of services, applications initially usually skimp on them ,which make them brittle in the presence of change and difficult to manage. Even when done correctly, different implementations of these services lead to management complexity when the applications are deployed.
     </description>
-    <url>http://zookeeper.apache.org/</url>
-  </metadata>
-  <deps>
-    <bigtop-utils/>
-  </deps>
-  <groups>
-    <zookeeper>
-      <user>zookeeper</user>
-    </zookeeper>
-  </groups>
-  <alternatives>
-    <zookeeper-conf>
-      <status>auto</status>
-      <link>/etc/zookeeper/conf</link>
-      <value>/etc/zookeeper/conf.dist</value>
-      <alt>/etc/zookeeper/conf.dist</alt>
-    </zookeeper-conf>
-  </alternatives>
-</zookeeper>
-<zookeeper-server>
-  <metadata>
-    <summary>The Hadoop Zookeeper server</summary>
-    <description>This package starts the zookeeper server on startup</description>
-    <url>http://zookeeper.apache.org/</url>
-  </metadata>
-  <deps>
-    <zookeeper>/self</zookeeper>
-  </deps>
-  <services>
-    <zookeeper-server>
-      <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-      <oninstall>start</oninstall>
-      <configured>true</configured>
-      <init>true</init>
-    </zookeeper-server>
-  </services>
-</zookeeper-server>
-<pig>
-  <metadata>
-    <summary>Pig is a platform for analyzing large data sets</summary>
-    <description>Pig is a platform for analyzing large data sets that consists of a high-level language
+      <url>http://zookeeper.apache.org/</url>
+    </metadata>
+    <deps>
+      <bigtop-utils/>
+    </deps>
+    <groups>
+      <zookeeper>
+        <user>zookeeper</user>
+      </zookeeper>
+    </groups>
+    <alternatives>
+      <zookeeper-conf>
+        <status>auto</status>
+        <link>/etc/zookeeper/conf</link>
+        <value>/etc/zookeeper/conf.dist</value>
+        <alt>/etc/zookeeper/conf.dist</alt>
+      </zookeeper-conf>
+    </alternatives>
+  </zookeeper>
+  <zookeeper-server>
+    <metadata>
+      <summary>The Hadoop Zookeeper server</summary>
+      <description>This package starts the zookeeper server on startup</description>
+      <url>http://zookeeper.apache.org/</url>
+    </metadata>
+    <deps>
+      <zookeeper>/self</zookeeper>
+    </deps>
+  </zookeeper-server>
+  <pig>
+    <metadata>
+      <summary>Pig is a platform for analyzing large data sets</summary>
+      <description>Pig is a platform for analyzing large data sets that consists of a high-level language
  for expressing data analysis programs, coupled with infrastructure for evaluating these
  programs. The salient property of Pig programs is that their structure is amenable
  to substantial parallelization, which in turns enables them to handle very large data sets.
@@ -346,25 +317,25 @@ server.</description>
     automatically, allowing the user to focus on semantics rather than efficiency.
  * Extensibility
     Users can create their own functions to do special-purpose processing.</description>
-    <url>http://pig.apache.org/</url>
-  </metadata>
-  <deps>
-    <hadoop/>
-    <bigtop-utils/>
-  </deps>
-  <alternatives>
-    <pig-conf>
-      <status>auto</status>
-      <link>/etc/pig/conf</link>
-      <value>/etc/pig/conf.dist</value>
-      <alt>/etc/pig/conf.dist</alt>
-    </pig-conf>
-  </alternatives>
-</pig>
-<hive>
-  <metadata>
-    <summary>Hive is a data warehouse infrastructure built on top of Hadoop</summary>
-    <description>Hive is a data warehouse infrastructure built on top of Hadoop that
+      <url>http://pig.apache.org/</url>
+    </metadata>
+    <deps>
+      <hadoop/>
+      <bigtop-utils/>
+    </deps>
+    <alternatives>
+      <pig-conf>
+        <status>auto</status>
+        <link>/etc/pig/conf</link>
+        <value>/etc/pig/conf.dist</value>
+        <alt>/etc/pig/conf.dist</alt>
+      </pig-conf>
+    </alternatives>
+  </pig>
+  <hive>
+    <metadata>
+      <summary>Hive is a data warehouse infrastructure built on top of Hadoop</summary>
+      <description>Hive is a data warehouse infrastructure built on top of Hadoop that
  provides tools to enable easy data summarization, adhoc querying and
  analysis of large datasets data stored in Hadoop files. It provides a
  mechanism to put structure on this data and it also provides a simple
@@ -374,69 +345,55 @@ server.</description>
  plug in their custom mappers and reducers to do more sophisticated
  analysis which may not be supported by the built-in capabilities of
  the language.</description>
-    <url>http://hive.apache.org/</url>
-  </metadata>
-  <deps>
-    <hadoop/>
-    <bigtop-utils/>
-  </deps>
-  <alternatives>
-    <hive-conf>
-      <status>auto</status>
-      <value>/etc/hive/conf.dist</value>
-      <link>/etc/hive/conf</link>
-      <alt>/etc/hive/conf.dist</alt>
-    </hive-conf>
-  </alternatives>
-</hive>
-<hive-metastore>
-  <metadata>
-    <summary>Shared metadata repository for Hive.</summary>
-    <description>This optional package hosts a metadata server for Hive clients across a network to use.</description>
-    <url>http://hive.apache.org/</url>
-  </metadata>
-  <deps>
-    <hive>/self</hive>
-  </deps>
-  <services>
-    <hive-metastore>
-       <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-       <oninstall>start</oninstall>
-       <configured>true</configured>
-    </hive-metastore>
-  </services>
-  <groups>
-    <hive>
-      <user>hive</user>
-    </hive>
-  </groups>
-</hive-metastore>
-<hive-server>
-  <metadata>
-    <summary>Provides a Hive Thrift service.</summary>
-    <description>This optional package hosts a Thrift server for Hive clients across a network to use.</description>
-    <url>http://hive.apache.org/</url>
-  </metadata>
-  <deps>
-    <hive>/self</hive>
-  </deps>
-  <services>
-    <hive-server>
-       <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-       <oninstall>start</oninstall>
-       <configured>true</configured>
-    </hive-server>
-  </services>
-  <groups>
-    <hive>
-      <user>hive</user>
-    </hive>
-  </groups>
-</hive-server>
-<hbase>
-  <metadata>
-    <summary>HBase is the Hadoop database. Use it when you need random, realtime read/write access to your Big Data. This project's goal is the hosting of very large tables -- billions of rows X millions of columns -- atop clusters of commodity hardware.</summary>
-    <description>HBase is an open-source, distributed, column-oriented store modeled after Google' Bigtable: A Distributed Storage System for Structured Data by Chang et al. Just as Bigtable leverages the distributed data storage provided by the Google File System, HBase provides Bigtable-like capabilities on top of Hadoop. HBase includes:
+      <url>http://hive.apache.org/</url>
+    </metadata>
+    <deps>
+      <hadoop/>
+      <bigtop-utils/>
+    </deps>
+    <alternatives>
+      <hive-conf>
+        <status>auto</status>
+        <value>/etc/hive/conf.dist</value>
+        <link>/etc/hive/conf</link>
+        <alt>/etc/hive/conf.dist</alt>
+      </hive-conf>
+    </alternatives>
+  </hive>
+  <hive-metastore>
+    <metadata>
+      <summary>Shared metadata repository for Hive.</summary>
+      <description>This optional package hosts a metadata server for Hive clients across a network to use.</description>
+      <url>http://hive.apache.org/</url>
+    </metadata>
+    <deps>
+      <hive>/self</hive>
+    </deps>
+    <groups>
+      <hive>
+        <user>hive</user>
+      </hive>
+    </groups>
+  </hive-metastore>
+  <hive-server>
+    <metadata>
+      <summary>Provides a Hive Thrift service.</summary>
+      <description>This optional package hosts a Thrift server for Hive clients across a network to use.</description>
+      <url>http://hive.apache.org/</url>
+    </metadata>
+    <deps>
+      <hive>/self</hive>
+    </deps>
+    <groups>
+      <hive>
+        <user>hive</user>
+      </hive>
+    </groups>
+  </hive-server>
+  <hbase>
+    <metadata>
+      <summary>HBase is the Hadoop database. Use it when you need random, realtime read/write access to your Big Data. This project's goal is the hosting of very large tables -- billions of rows X millions of columns -- atop clusters of commodity hardware.</summary>
+      <description>HBase is an open-source, distributed, column-oriented store modeled after Google' Bigtable: A Distributed Storage System for Structured Data by Chang et al. Just as Bigtable leverages the distributed data storage provided by the Google File System, HBase provides Bigtable-like capabilities on top of Hadoop. HBase includes:
 
     * Convenient base classes for backing Hadoop MapReduce jobs with HBase tables
     * Query predicate push down via server side scan and get filters
@@ -446,114 +403,79 @@ server.</description>
     * Cascading source and sink modules
     * Extensible jruby-based (JIRB) shell
     * Support for exporting metrics via the Hadoop metrics subsystem to files or Ganglia; or via JMX</description>
-    <url>http://hbase.apache.org/</url>
-  </metadata>
-  <deps>
-    <zookeeper/>
-    <hadoop/>
-    <bigtop-utils/>
-  </deps>
-  <alternatives>
-    <hbase-conf>
-      <status>auto</status>
-      <value>/etc/hbase/conf.dist</value>
-      <link>/etc/hbase/conf</link>
-      <alt>/etc/hbase/conf.dist</alt>
-    </hbase-conf>
-  </alternatives>
-  <groups>
-    <hbase>
-      <user>hbase</user>
-    </hbase>
-  </groups>
-</hbase>
-<hbase-doc>
-  <metadata>
-    <summary>Hbase Documentation</summary>
-    <description>Documentation for Hbase</description>
-    <url>http://hbase.apache.org/</url>
-  </metadata>
-</hbase-doc>
-<hbase-master>
-  <metadata>
-    <summary>The Hadoop HBase master Server.</summary>
-    <description>HMaster is the "master server" for a HBase. There is only one HMaster for a single HBase deployment.</description>
-    <url>http://hbase.apache.org/</url>
-  </metadata>
-  <deps>
-    <hbase>/self</hbase>
-  </deps>
-  <services>
-    <hbase-master>
-       <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-       <oninstall>start</oninstall>
-       <configured>true</configured>
-       <config>
-         <configfile>/etc/hbase/conf/hbase-site.xml</configfile>
-         <property>
-          <name>hbase.rootdir</name>
-          <value>hdfs://localhost/hbase</value>
-         </property>
-       </config>
-    </hbase-master>
-  </services>
-</hbase-master>
-<hbase-regionserver>
-  <metadata>
-    <summary>The Hadoop HBase RegionServer server.</summary>
-    <description>HRegionServer makes a set of HRegions available to clients. It checks in with the HMaster. There are many HRegionServers in a single HBase deployment.</description>
-    <url>http://hbase.apache.org/</url>
-  </metadata>
-  <deps>
-    <hbase>/self</hbase>
-  </deps>
-  <services>
-    <hbase-regionserver>
-       <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-       <oninstall>stop</oninstall>
-       <configured>false</configured>
-    </hbase-regionserver>
-  </services>
-</hbase-regionserver>
-<hbase-thrift>
-  <metadata>
-    <summary>The Hadoop HBase Thrift Interface</summary>
-    <description>ThriftServer - this class starts up a Thrift server which implements the Hbase API specified in the Hbase.thrift IDL file.
+      <url>http://hbase.apache.org/</url>
+    </metadata>
+    <deps>
+      <zookeeper/>
+      <hadoop/>
+      <bigtop-utils/>
+    </deps>
+    <alternatives>
+      <hbase-conf>
+        <status>auto</status>
+        <value>/etc/hbase/conf.dist</value>
+        <link>/etc/hbase/conf</link>
+        <alt>/etc/hbase/conf.dist</alt>
+      </hbase-conf>
+    </alternatives>
+    <groups>
+      <hbase>
+        <user>hbase</user>
+      </hbase>
+    </groups>
+  </hbase>
+  <hbase-doc>
+    <metadata>
+      <summary>Hbase Documentation</summary>
+      <description>Documentation for Hbase</description>
+      <url>http://hbase.apache.org/</url>
+    </metadata>
+  </hbase-doc>
+  <hbase-master>
+    <metadata>
+      <summary>The Hadoop HBase master Server.</summary>
+      <description>HMaster is the "master server" for a HBase. There is only one HMaster for a single HBase deployment.</description>
+      <url>http://hbase.apache.org/</url>
+    </metadata>
+    <deps>
+      <hbase>/self</hbase>
+    </deps>
+  </hbase-master>
+  <hbase-regionserver>
+    <metadata>
+      <summary>The Hadoop HBase RegionServer server.</summary>
+      <description>HRegionServer makes a set of HRegions available to clients. It checks in with the HMaster. There are many HRegionServers in a single HBase deployment.</description>
+      <url>http://hbase.apache.org/</url>
+    </metadata>
+    <deps>
+      <hbase>/self</hbase>
+    </deps>
+  </hbase-regionserver>
+  <hbase-thrift>
+    <metadata>
+      <summary>The Hadoop HBase Thrift Interface</summary>
+      <description>ThriftServer - this class starts up a Thrift server which implements the Hbase API specified in the Hbase.thrift IDL file.
 "Thrift is a software framework for scalable cross-language services development. It combines a powerful software stack with a code generation engine to build services that work efficiently and seamlessly between C++, Java, Python, PHP, and Ruby. Thrift was developed at Facebook, and we are now releasing it as open source." For additional information, see http://developers.facebook.com/thrift/. Facebook has announced their intent to migrate Thrift into Apache Incubator.</description>
-    <url>http://hbase.apache.org/</url>
-  </metadata>
-  <deps>
-    <hbase>/self</hbase>
-  </deps>
-  <services>
-    <hbase-thrift>
-       <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-       <oninstall>start</oninstall>
-       <configured>false</configured>
-    </hbase-thrift>
-  </services>
-</hbase-thrift>
-<hbase-rest>
-  <metadata>
-    <summary>The Apache HBase REST gateway</summary>
-    <description>The Apache HBase REST gateway</description>
-    <url>http://hbase.apache.org/</url>
-  </metadata>
-  <deps>
-    <hbase>/self</hbase>
-  </deps>
-  <services>
-    <hbase-rest>
-       <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-       <oninstall>start</oninstall>
-       <configured>false</configured>
-    </hbase-rest>
-  </services>
-</hbase-rest>
-<hadoop>
-  <metadata>
-    <summary>Hadoop is a software platform for processing vast amounts of data</summary>
-    <description>Hadoop is a software platform that lets one easily write and
+      <url>http://hbase.apache.org/</url>
+    </metadata>
+    <deps>
+      <hbase>/self</hbase>
+    </deps>
+  </hbase-thrift>
+  <hbase-rest>
+    <metadata>
+      <summary>The Apache HBase REST gateway</summary>
+      <description>The Apache HBase REST gateway</description>
+      <url>http://hbase.apache.org/</url>
+    </metadata>
+    <deps>
+      <hbase>/self</hbase>
+    </deps>
+  </hbase-rest>
+  <hadoop>
+    <metadata>
+      <summary>Hadoop is a software platform for processing vast amounts of data</summary>
+      <description>Hadoop is a software platform that lets one easily write and
 run applications that process vast amounts of data.
 
 Here's what makes Hadoop especially useful:
@@ -572,43 +494,43 @@ MapReduce divides applications into many
 multiple replicas of data blocks for reliability, placing them on compute
 nodes around the cluster. MapReduce can then process the data where it is
 located.</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <bigtop-utils/>
-  </deps>
-  <alternatives>
-    <hadoop-conf>
-      <status>auto</status>
-      <link>/etc/hadoop/conf</link>
-      <value>/etc/hadoop/conf.empty</value>
-      <alt>/etc/hadoop/conf.empty</alt>
-    </hadoop-conf>
-  </alternatives>
-</hadoop>
-<hadoop-hdfs>
-  <metadata>
-    <summary>The Hadoop Distributed File System</summary>
-    <description>Hadoop Distributed File System (HDFS) is the primary storage system used by
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <bigtop-utils/>
+    </deps>
+    <alternatives>
+      <hadoop-conf>
+        <status>auto</status>
+        <link>/etc/hadoop/conf</link>
+        <value>/etc/hadoop/conf.empty</value>
+        <alt>/etc/hadoop/conf.empty</alt>
+      </hadoop-conf>
+    </alternatives>
+  </hadoop>
+  <hadoop-hdfs>
+    <metadata>
+      <summary>The Hadoop Distributed File System</summary>
+      <description>Hadoop Distributed File System (HDFS) is the primary storage system used by
 Hadoop applications. HDFS creates multiple replicas of data blocks and distributes
 them on compute nodes throughout a cluster to enable reliable, extremely rapid
 computations.</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <hadoop>/self</hadoop>
-    <bigtop-utils/>
-  </deps>
-  <groups>
-    <hdfs>
-      <user>hdfs</user>
-    </hdfs>
-  </groups>
-</hadoop-hdfs>
-<hadoop-yarn>
-  <metadata>
-    <summary>The Hadoop NextGen MapReduce (YARN)</summary>
-    <description>YARN (Hadoop NextGen MapReduce) is a general purpose data-computation framework.
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <hadoop>/self</hadoop>
+      <bigtop-utils/>
+    </deps>
+    <groups>
+      <hdfs>
+        <user>hdfs</user>
+      </hdfs>
+    </groups>
+  </hadoop-hdfs>
+  <hadoop-yarn>
+    <metadata>
+      <summary>The Hadoop NextGen MapReduce (YARN)</summary>
+      <description>YARN (Hadoop NextGen MapReduce) is a general purpose data-computation framework.
 The fundamental idea of YARN is to split up the two major functionalities of the
 JobTracker, resource management and job scheduling/monitoring, into separate daemons:
 ResourceManager and NodeManager.
@@ -621,311 +543,247 @@ ApplicationMaster (AM).
 An ApplicationMaster is, in effect, a framework specific library and is tasked with
 negotiating resources from the ResourceManager and working with the NodeManager(s) to
 execute and monitor the tasks.</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <hadoop>/self</hadoop>
-    <bigtop-utils/>
-  </deps>
-  <groups>
-    <yarn>
-      <user>yarn</user>
-    </yarn>
-  </groups>
-</hadoop-yarn>
-<hadoop-mapreduce>
-  <metadata>
-    <summary>The Hadoop MapReduce (MRv2)</summary>
-    <description>Hadoop MapReduce is a programming model and software framework for writing applications
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <hadoop>/self</hadoop>
+      <bigtop-utils/>
+    </deps>
+    <groups>
+      <yarn>
+        <user>yarn</user>
+      </yarn>
+    </groups>
+  </hadoop-yarn>
+  <hadoop-mapreduce>
+    <metadata>
+      <summary>The Hadoop MapReduce (MRv2)</summary>
+      <description>Hadoop MapReduce is a programming model and software framework for writing applications
 that rapidly process vast amounts of data in parallel on large clusters of compute nodes.</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <hadoop-yarn>/self</hadoop-yarn>
-    <bigtop-utils/>
-  </deps>
-  <groups>
-    <mapred>
-      <user>mapred</user>
-    </mapred>
-  </groups>
-</hadoop-mapreduce>
-<hadoop-httpfs>
-  <metadata>
-    <summary>HTTPFS for Hadoop</summary>
-    <description>The server providing HTTP REST API support for the complete FileSystem/FileContext
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <hadoop-yarn>/self</hadoop-yarn>
+      <bigtop-utils/>
+    </deps>
+    <groups>
+      <mapred>
+        <user>mapred</user>
+      </mapred>
+    </groups>
+  </hadoop-mapreduce>
+  <hadoop-httpfs>
+    <metadata>
+      <summary>HTTPFS for Hadoop</summary>
+      <description>The server providing HTTP REST API support for the complete FileSystem/FileContext
 interface in HDFS.</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <hadoop-hdfs>/self</hadoop-hdfs>
-    <bigtop-utils/>
-  </deps>
-  <groups>
-    <httpfs>
-      <user>httpfs</user>
-    </httpfs>
-  </groups>
-  <services>
-    <hadoop-httpfs>
-      <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-      <oninstall>stop</oninstall>
-      <configured>false</configured>
-    </hadoop-httpfs>
-  </services>
-</hadoop-httpfs>
-<hadoop-hdfs-namenode>
-  <metadata>
-    <summary>The Hadoop namenode manages the block locations of HDFS files</summary>
-    <description>The Hadoop Distributed Filesystem (HDFS) requires one unique server, the
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <hadoop-hdfs>/self</hadoop-hdfs>
+      <bigtop-utils/>
+    </deps>
+    <groups>
+      <httpfs>
+        <user>httpfs</user>
+      </httpfs>
+    </groups>
+  </hadoop-httpfs>
+  <hadoop-hdfs-namenode>
+    <metadata>
+      <summary>The Hadoop namenode manages the block locations of HDFS files</summary>
+      <description>The Hadoop Distributed Filesystem (HDFS) requires one unique server, the
 namenode, which manages the block locations of files on the filesystem.</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <hadoop-hdfs>/self</hadoop-hdfs>
-  </deps>
-  <services>
-    <hadoop-hdfs-namenode>
-      <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-      <oninstall>stop</oninstall>
-      <configured>false</configured>
-      <init>true</init>
-    </hadoop-hdfs-namenode>
-  </services>
-</hadoop-hdfs-namenode>
-<hadoop-hdfs-zkfc>
-  <metadata>
-    <summary>Hadoop HDFS failover controller</summary>
-    <description>The Hadoop HDFS failover controller is a ZooKeeper client which also
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <hadoop-hdfs>/self</hadoop-hdfs>
+    </deps>
+  </hadoop-hdfs-namenode>
+  <hadoop-hdfs-zkfc>
+    <metadata>
+      <summary>Hadoop HDFS failover controller</summary>
+      <description>The Hadoop HDFS failover controller is a ZooKeeper client which also
 monitors and manages the state of the NameNode. Each of the machines
 which runs a NameNode also runs a ZKFC, and that ZKFC is responsible
 for: Health monitoring, ZooKeeper session management, ZooKeeper-based
 election.</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <hadoop-hdfs>/self</hadoop-hdfs>
-  </deps>
-  <services>
-    <hadoop-hdfs-zkfc>
-      <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-      <oninstall>stop</oninstall>
-      <configured>false</configured>
-    </hadoop-hdfs-zkfc>
-  </services>
-</hadoop-hdfs-zkfc>
-<hadoop-hdfs-secondarynamenode>
-  <metadata>
-    <summary>Hadoop Secondary namenode</summary>
-    <description>The Secondary Name Node periodically compacts the Name Node EditLog
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <hadoop-hdfs>/self</hadoop-hdfs>
+    </deps>
+  </hadoop-hdfs-zkfc>
+  <hadoop-hdfs-secondarynamenode>
+    <metadata>
+      <summary>Hadoop Secondary namenode</summary>
+      <description>The Secondary Name Node periodically compacts the Name Node EditLog
 into a checkpoint.  This compaction ensures that Name Node restarts
 do not incur unnecessary downtime.</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <hadoop-hdfs>/self</hadoop-hdfs>
-  </deps>
-  <services>
-    <hadoop-hdfs-secondarynamenode>
-      <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-      <oninstall>stop</oninstall>
-      <configured>false</configured>
-    </hadoop-hdfs-secondarynamenode>
-  </services>
-</hadoop-hdfs-secondarynamenode>
-<hadoop-hdfs-datanode>
-  <metadata>
-    <summary>Hadoop Data Node</summary>
-    <description>The Data Nodes in the Hadoop Cluster are responsible for serving up
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <hadoop-hdfs>/self</hadoop-hdfs>
+    </deps>
+  </hadoop-hdfs-secondarynamenode>
+  <hadoop-hdfs-datanode>
+    <metadata>
+      <summary>Hadoop Data Node</summary>
+      <description>The Data Nodes in the Hadoop Cluster are responsible for serving up
 blocks of data over the network to Hadoop Distributed Filesystem
 (HDFS) clients.</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <hadoop-hdfs>/self</hadoop-hdfs>
-  </deps>
-  <services>
-    <hadoop-hdfs-datanode>
-      <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-      <oninstall>stop</oninstall>
-      <configured>false</configured>
-    </hadoop-hdfs-datanode>
-  </services>
-</hadoop-hdfs-datanode>
-<hadoop-yarn-resourcemanager>
-  <metadata>
-    <summary>YARN Resource Manager</summary>
-    <description>The resource manager manages the global assignment of compute resources to applications</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <hadoop-yarn>/self</hadoop-yarn>
-  </deps>
-  <services>
-    <hadoop-yarn-resourcemanager>
-      <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-      <oninstall>stop</oninstall>
-      <configured>false</configured>
-    </hadoop-yarn-resourcemanager>
-  </services>
-</hadoop-yarn-resourcemanager>
-<hadoop-yarn-nodemanager>
-  <metadata>
-    <summary>YARN Node Manager</summary>
-    <description>The NodeManager is the per-machine framework agent who is responsible for
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <hadoop-hdfs>/self</hadoop-hdfs>
+    </deps>
+  </hadoop-hdfs-datanode>
+  <hadoop-yarn-resourcemanager>
+    <metadata>
+      <summary>YARN Resource Manager</summary>
+      <description>The resource manager manages the global assignment of compute resources to applications</description>
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <hadoop-yarn>/self</hadoop-yarn>
+    </deps>
+  </hadoop-yarn-resourcemanager>
+  <hadoop-yarn-nodemanager>
+    <metadata>
+      <summary>YARN Node Manager</summary>
+      <description>The NodeManager is the per-machine framework agent who is responsible for
 containers, monitoring their resource usage (cpu, memory, disk, network) and
 reporting the same to the ResourceManager/Scheduler.</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <hadoop-yarn>/self</hadoop-yarn>
-  </deps>
-  <services>
-    <hadoop-yarn-nodemanager>
-      <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-      <oninstall>stop</oninstall>
-      <configured>false</configured>
-    </hadoop-yarn-nodemanager>
-  </services>
-</hadoop-yarn-nodemanager>
-<hadoop-yarn-proxyserver>
-  <metadata>
-    <summary>YARN Web Proxy</summary>
-    <description>The web proxy server sits in front of the YARN application master web UI.</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <hadoop-yarn>/self</hadoop-yarn>
-  </deps>
-  <services>
-    <hadoop-yarn-proxyserver>
-      <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-      <oninstall>stop</oninstall>
-      <configured>false</configured>
-    </hadoop-yarn-proxyserver>
-  </services>
-</hadoop-yarn-proxyserver>
-<hadoop-mapreduce-historyserver>
-  <metadata>
-    <summary>MapReduce History Server</summary>
-    <description>The History server keeps records of the different activities being performed on a Apache Hadoop cluster</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <hadoop-mapreduce>/self</hadoop-mapreduce>
-  </deps>
-  <services>
-    <hadoop-mapreduce-historyserver>
-      <runlevel>2</runlevel><runlevel>3</runlevel><runlevel>4</runlevel><runlevel>5</runlevel>
-      <oninstall>stop</oninstall>
-      <configured>false</configured>
-    </hadoop-mapreduce-historyserver>
-  </services>
-</hadoop-mapreduce-historyserver>
-<hadoop-conf-pseudo>
-  <metadata>
-    <summary>Pseudo-distributed Hadoop configuration</summary>
-    <description>Contains configuration files for a "pseudo-distributed" Hadoop deployment.
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <hadoop-yarn>/self</hadoop-yarn>
+    </deps>
+  </hadoop-yarn-nodemanager>
+  <hadoop-yarn-proxyserver>
+    <metadata>
+      <summary>YARN Web Proxy</summary>
+      <description>The web proxy server sits in front of the YARN application master web UI.</description>
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <hadoop-yarn>/self</hadoop-yarn>
+    </deps>
+  </hadoop-yarn-proxyserver>
+  <hadoop-mapreduce-historyserver>
+    <metadata>
+      <summary>MapReduce History Server</summary>
+      <description>The History server keeps records of the different activities being performed on a Apache Hadoop cluster</description>
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <hadoop-mapreduce>/self</hadoop-mapreduce>
+    </deps>
+  </hadoop-mapreduce-historyserver>
+  <hadoop-conf-pseudo>
+    <metadata>
+      <summary>Pseudo-distributed Hadoop configuration</summary>
+      <description>Contains configuration files for a "pseudo-distributed" Hadoop deployment.
 In this mode, each of the hadoop components runs as a separate Java process,
 but all on the same machine.</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <hadoop>/self</hadoop>
-    <hadoop-hdfs-namenode>/self</hadoop-hdfs-namenode>
-    <hadoop-hdfs-datanode>/self</hadoop-hdfs-datanode>
-    <hadoop-hdfs-secondarynamenode>/self</hadoop-hdfs-secondarynamenode>
-    <hadoop-yarn-resourcemanager>/self</hadoop-yarn-resourcemanager>
-    <hadoop-yarn-nodemanager>/self</hadoop-yarn-nodemanager>
-    <hadoop-mapreduce-historyserver>/self</hadoop-mapreduce-historyserver>
-  </deps>
-</hadoop-conf-pseudo>
-<hadoop-doc>
-  <metadata>
-    <summary>Hadoop Documentation</summary>
-    <description>Documentation for Hadoop</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-</hadoop-doc>
-<hadoop-client>
-  <metadata>
-    <summary>Hadoop client side dependencies</summary>
-    <description>Installation of this package will provide you with all the dependencies for Hadoop clients.</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <hadoop>/self</hadoop>
-    <hadoop-hdfs>/self</hadoop-hdfs>
-    <hadoop-yarn>/self</hadoop-yarn>
-    <hadoop-mapreduce>/self</hadoop-mapreduce>
-  </deps>
-</hadoop-client>
-<hadoop-hdfs-fuse>
-  <metadata>
-    <summary>Mountable HDFS</summary>
-    <description>These projects (enumerated below) allow HDFS to be mounted (on most flavors of Unix) as a standard file system using</description>
-    <url>http://hadoop.apache.org/core/</url>
-  </metadata>
-  <deps>
-    <hadoop-client>/self</hadoop-client>
-    <hadoop>/self</hadoop>
-    <hadoop-libhdfs>/self</hadoop-libhdfs>
-  </deps>
-</hadoop-hdfs-fuse>
-<hue-common>
-  <metadata>
-    <summary>A browser-based desktop interface for Hadoop</summary>
-    <description>Hue is a browser-based desktop interface for interacting with Hadoop.
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <hadoop>/self</hadoop>
+      <hadoop-hdfs-namenode>/self</hadoop-hdfs-namenode>
+      <hadoop-hdfs-datanode>/self</hadoop-hdfs-datanode>
+      <hadoop-hdfs-secondarynamenode>/self</hadoop-hdfs-secondarynamenode>
+      <hadoop-yarn-resourcemanager>/self</hadoop-yarn-resourcemanager>
+      <hadoop-yarn-nodemanager>/self</hadoop-yarn-nodemanager>
+      <hadoop-mapreduce-historyserver>/self</hadoop-mapreduce-historyserver>
+    </deps>
+  </hadoop-conf-pseudo>
+  <hadoop-doc>
+    <metadata>
+      <summary>Hadoop Documentation</summary>
+      <description>Documentation for Hadoop</description>
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+  </hadoop-doc>
+  <hadoop-client>
+    <metadata>
+      <summary>Hadoop client side dependencies</summary>
+      <description>Installation of this package will provide you with all the dependencies for Hadoop clients.</description>
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <hadoop>/self</hadoop>
+      <hadoop-hdfs>/self</hadoop-hdfs>
+      <hadoop-yarn>/self</hadoop-yarn>
+      <hadoop-mapreduce>/self</hadoop-mapreduce>
+    </deps>
+  </hadoop-client>
+  <hadoop-hdfs-fuse>
+    <metadata>
+      <summary>Mountable HDFS</summary>
+      <description>These projects (enumerated below) allow HDFS to be mounted (on most flavors of Unix) as a standard file system using</description>
+      <url>http://hadoop.apache.org/core/</url>
+    </metadata>
+    <deps>
+      <hadoop-client>/self</hadoop-client>
+      <hadoop>/self</hadoop>
+      <hadoop-libhdfs>/self</hadoop-libhdfs>
+    </deps>
+  </hadoop-hdfs-fuse>
+  <hue-common>
+    <metadata>
+      <summary>A browser-based desktop interface for Hadoop</summary>
+      <description>Hue is a browser-based desktop interface for interacting with Hadoop.
 It supports a file browser, job tracker interface, cluster health monitor, and more.</description>
-    <url>http://github.com/cloudera/hue</url>
-  </metadata>
-  <deps>
-    <hue-server>/self</hue-server>
-    <hue-beeswax>/self</hue-beeswax>
-  </deps>
-  <alternatives>
-    <hue-conf>
-      <status>auto</status>
-      <link>/etc/hue/conf</link>
-      <value>/etc/hue/conf.empty</value>
-      <alt>/etc/hue/conf.empty</alt>
-    </hue-conf>
-  </alternatives>
-</hue-common>
-<hue-server>
-  <metadata>
-    <summary>Service Scripts for Hue</summary>
-    <description>This package provides the service scripts for Hue server.</description>
-    <url>http://github.com/cloudera/hue</url>
-  </metadata>
-  <deps>
-    <hue-common>/self</hue-common>
-  </deps>
-</hue-server>
-<hue-beeswax>
-  <metadata>
-    <summary>A UI for Hive on Hue</summary>
-    <description>Beeswax is a web interface for Hive.
+      <url>http://github.com/cloudera/hue</url>
+    </metadata>
+    <deps>
+      <hue-server>/self</hue-server>
+      <hue-beeswax>/self</hue-beeswax>
+    </deps>
+    <alternatives>
+      <hue-conf>
+        <status>auto</status>
+        <link>/etc/hue/conf</link>
+        <value>/etc/hue/conf.empty</value>
+        <alt>/etc/hue/conf.empty</alt>
+      </hue-conf>
+    </alternatives>
+  </hue-common>
+  <hue-server>
+    <metadata>
+      <summary>Service Scripts for Hue</summary>
+      <description>This package provides the service scripts for Hue server.</description>
+      <url>http://github.com/cloudera/hue</url>
+    </metadata>
+    <deps>
+      <hue-common>/self</hue-common>
+    </deps>
+  </hue-server>
+  <hue-beeswax>
+    <metadata>
+      <summary>A UI for Hive on Hue</summary>
+      <description>Beeswax is a web interface for Hive.
 
 It allows users to construct and run queries on Hive, manage tables,
 and import and export data.</description>
-    <url>http://github.com/cloudera/hue</url>
-  </metadata>
-  <deps>
-    <hue-common>/self</hue-common>
-    <hive/>
-    <make/>
-  </deps>
-</hue-beeswax>
-<hue>
-  <metadata>
-    <summary>The hue metapackage</summary>
-    <description>Hue is a browser-based desktop interface for interacting with Hadoop. It supports a file browser, job tracker interface, cluster health monitor, and more.</description>
-    <url>http://github.com/cloudera/hue</url>
-  </metadata>
-  <deps>
-    <hue-server>/self</hue-server>
-    <hue-beeswax>/self</hue-beeswax>
-  </deps>
-</hue>
+      <url>http://github.com/cloudera/hue</url>
+    </metadata>
+    <deps>
+      <hue-common>/self</hue-common>
+      <hive/>
+      <make/>
+    </deps>
+  </hue-beeswax>
+  <hue>
+    <metadata>
+      <summary>The hue metapackage</summary>
+      <description>Hue is a browser-based desktop interface for interacting with Hadoop. It supports a file browser, job tracker interface, cluster health monitor, and more.</description>
+      <url>http://github.com/cloudera/hue</url>
+    </metadata>
+    <deps>
+      <hue-server>/self</hue-server>
+      <hue-beeswax>/self</hue-beeswax>
+    </deps>
+  </hue>
 </packages>

Modified: incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/package/src/main/resources/yum/package_data.xml
URL: http://svn.apache.org/viewvc/incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/package/src/main/resources/yum/package_data.xml?rev=1353693&r1=1353692&r2=1353693&view=diff
==============================================================================
--- incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/package/src/main/resources/yum/package_data.xml (original)
+++ incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/package/src/main/resources/yum/package_data.xml Mon Jun 25 18:23:02 2012
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="UTF-8"?>
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -58,6 +58,15 @@
       <sh-utils/>
       <textutils/>
     </deps>
+    <services>
+      <flume-node>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>true</configured>
+      </flume-node>
+    </services>
   </flume-node>
   <sqoop>
     <deps>
@@ -79,6 +88,16 @@
       <tag name="/bin/bash"/>
       <tag name="/bin/sh"/>
     </deps>
+    <services>
+      <sqoop-metastore>
+        <runlevel>2</runlevel> <!--workaround BIGTOP-644-->
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>true</configured>
+      </sqoop-metastore>
+    </services>
   </sqoop-metastore>
   <oozie>
     <users>
@@ -97,6 +116,16 @@
       <tag name="/usr/sbin/useradd"/>
       <jre>&gt;=1.6</jre>
     </deps>
+    <services>
+      <oozie>
+        <runlevel>2</runlevel> <!--workaround BIGTOP-644-->
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>true</configured>
+      </oozie>
+    </services>
   </oozie>
   <oozie-client>
     <deps>
@@ -123,6 +152,16 @@
       <tag name="/bin/sh"/>
       <redhat-lsb/>
     </deps>
+    <services>
+      <zookeeper-server>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>true</configured>
+        <init>true</init>
+      </zookeeper-server>
+    </services>
   </zookeeper-server>
   <pig>
     <deps>
@@ -146,6 +185,15 @@
         <shell>/sbin/nologin</shell>
       </hive>
     </users>
+    <services>
+      <hive-metastore>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>true</configured>
+      </hive-metastore>
+    </services>
   </hive-metastore>
   <hive-server>
     <users>
@@ -155,6 +203,15 @@
         <shell>/sbin/nologin</shell>
       </hive>
     </users>
+    <services>
+      <hive-server>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>true</configured>
+      </hive-server>
+    </services>
   </hive-server>
   <hbase>
     <users>
@@ -183,6 +240,27 @@
       <tag name="/bin/sh"/>
       <redhat-lsb/>
     </deps>
+    <services>
+      <hbase-master>
+        <runlevel>2</runlevel> <!--workaround BIGTOP-644--> 
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>true</configured>
+        <config>
+          <configfile>/etc/hbase/conf/hbase-site.xml</configfile>
+          <property>
+            <name>hbase.rootdir</name>
+            <value>hdfs://localhost/hbase</value>
+          </property>
+          <property> <!-- workaround for HBASE-5975 and HBASE-5963 -->
+            <name>hbase.cluster.distributed</name>
+            <value>true</value>
+          </property>
+        </config>
+      </hbase-master>
+    </services>
   </hbase-master>
   <hbase-regionserver>
     <deps>
@@ -190,6 +268,16 @@
       <tag name="/bin/sh"/>
       <redhat-lsb/>
     </deps>
+    <services>
+      <hbase-regionserver>
+        <runlevel>2</runlevel> <!--workaround BIGTOP-644-->
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hbase-regionserver>
+    </services>
   </hbase-regionserver>
   <hbase-thrift>
     <deps>
@@ -197,7 +285,33 @@
       <tag name="/bin/sh"/>
       <redhat-lsb/>
     </deps>
+    <services>
+      <hbase-thrift>
+        <runlevel>2</runlevel> <!--workaround BIGTOP-644-->
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hbase-thrift>
+    </services>
   </hbase-thrift>
+  <hbase-rest>
+    <deps>
+      <hbase>/self</hbase>
+      <redhat-lsb/>
+    </deps>
+    <services>
+      <hbase-rest>
+        <runlevel>2</runlevel> <!--workaround BIGTOP-644-->
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hbase-rest>
+    </services>
+  </hbase-rest>
   <hadoop>
     <deps>
       <sh-utils/>
@@ -228,13 +342,43 @@
       <tag name="/bin/bash"/>
       <tag name="/usr/bin/env"/>
     </deps>
+    <services>
+      <hadoop-hdfs-namenode>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+        <init>true</init>
+      </hadoop-hdfs-namenode>
+    </services>
   </hadoop-hdfs-namenode>
+  <hadoop-hdfs-zkfc>
+    <services>
+      <hadoop-hdfs-zkfc>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hadoop-hdfs-zkfc>
+    </services>
+  </hadoop-hdfs-zkfc>
   <hadoop-hdfs-secondarynamenode>
     <deps>
       <tag name="/bin/sh"/>
       <tag name="/bin/bash"/>
       <tag name="/usr/bin/env"/>
     </deps>
+    <services>
+      <hadoop-hdfs-secondarynamenode>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hadoop-hdfs-secondarynamenode>
+    </services>
   </hadoop-hdfs-secondarynamenode>
   <hadoop-hdfs-datanode>
     <deps>
@@ -242,6 +386,15 @@
       <tag name="/bin/bash"/>
       <tag name="/usr/bin/env"/>
     </deps>
+    <services>
+      <hadoop-hdfs-datanode>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hadoop-hdfs-datanode>
+    </services>
   </hadoop-hdfs-datanode>
   <hadoop-yarn>
     <users>
@@ -258,6 +411,15 @@
       <tag name="/bin/bash"/>
       <tag name="/usr/bin/env"/>
     </deps>
+    <services>
+      <hadoop-yarn-resourcemanager>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hadoop-yarn-resourcemanager>
+    </services>
   </hadoop-yarn-resourcemanager>
   <hadoop-yarn-nodemanager>
     <deps>
@@ -265,7 +427,53 @@
       <tag name="/bin/bash"/>
       <tag name="/usr/bin/env"/>
     </deps>
+    <services>
+      <hadoop-yarn-nodemanager>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hadoop-yarn-nodemanager>
+    </services>
   </hadoop-yarn-nodemanager>
+  <hadoop-yarn-proxyserver>
+    <deps>
+      <tag name="/bin/sh"/>
+      <tag name="/bin/bash"/>
+    </deps>
+    <services>
+      <hadoop-yarn-proxyserver>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+        <config>
+          <configfile>/etc/hadoop/conf/yarn-site.xml</configfile>
+          <property>
+            <name>yarn.web-proxy.address</name>
+            <value>0.0.0.0:8032</value>
+          </property>
+       </config>
+      </hadoop-yarn-proxyserver>
+    </services>
+  </hadoop-yarn-proxyserver>
+  <hadoop-mapreduce-historyserver>
+    <deps>
+      <tag name="/bin/sh"/>
+      <tag name="/bin/bash"/>
+    </deps>
+    <services>
+      <hadoop-mapreduce-historyserver>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hadoop-mapreduce-historyserver>
+    </services>
+  </hadoop-mapreduce-historyserver>
   <hadoop-mapreduce>
     <users>
       <mapred>
@@ -283,6 +491,15 @@
         <shell>/bin/bash</shell>
       </httpfs>
     </users>
+    <services>
+      <hadoop-httpfs>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>false</configured>
+      </hadoop-httpfs>
+    </services>
   </hadoop-httpfs>
   <hadoop-conf-pseudo>
     <deps>
@@ -311,4 +528,15 @@ package or when debugging this package.<
       <url>http://hadoop.apache.org/core/</url>
     </metadata>
   </hadoop-debuginfo>
+  <hue-server>
+    <services>
+      <hue>
+        <runlevel>3</runlevel>
+        <runlevel>4</runlevel>
+        <runlevel>5</runlevel>
+        <oninstall>stop</oninstall>
+        <configured>true</configured>
+      </hue>
+     </services>
+  </hue-server>
 </packages>



Mime
View raw message