incubator-bigtop-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From r..@apache.org
Subject svn commit: r1326670 [7/47] - in /incubator/bigtop/trunk: ./ bigtop-deploy/puppet/manifests/ bigtop-deploy/puppet/modules/hadoop-hbase/manifests/ bigtop-deploy/puppet/modules/hadoop-hbase/templates/ bigtop-deploy/puppet/modules/hadoop-oozie/manifests/ ...
Date Mon, 16 Apr 2012 16:10:32 GMT
Modified: incubator/bigtop/trunk/bigtop-packages/src/rpm/flume/SPECS/flume.spec
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/bigtop-packages/src/rpm/flume/SPECS/flume.spec?rev=1326670&r1=1326669&r2=1326670&view=diff
==============================================================================
--- incubator/bigtop/trunk/bigtop-packages/src/rpm/flume/SPECS/flume.spec (original)
+++ incubator/bigtop/trunk/bigtop-packages/src/rpm/flume/SPECS/flume.spec Mon Apr 16 16:10:22 2012
@@ -62,7 +62,7 @@ Source0: apache-%{name}-%{flume_base_ver
 Source1: do-component-build
 Source2: install_%{name}.sh
 Source3: %{name}-node.init
-Requires: coreutils, /usr/sbin/useradd, hadoop
+Requires: coreutils, /usr/sbin/useradd, hadoop-hdfs
 Requires: bigtop-utils
 BuildRequires: ant xml-commons xml-commons-apis
 
@@ -129,7 +129,8 @@ chmod 755 $init_file
 
 # Get rid of hadoop jar, and instead link to installed hadoop
 rm $RPM_BUILD_ROOT/usr/lib/flume/lib/hadoop-*
-ln -s /usr/lib/hadoop/hadoop-core.jar $RPM_BUILD_ROOT/usr/lib/flume/lib/hadoop-core.jar
+ln -s /usr/lib/hadoop/hadoop-common.jar $RPM_BUILD_ROOT/usr/lib/flume/lib/hadoop-common.jar
+ln -s /usr/lib/hadoop/hadoop-auth.jar $RPM_BUILD_ROOT/usr/lib/flume/lib/hadoop-auth.jar
 
 %pre
 getent group flume >/dev/null || groupadd -r flume

Added: incubator/bigtop/trunk/bigtop-packages/src/rpm/hadoop/SOURCES/.gitignore
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/bigtop-packages/src/rpm/hadoop/SOURCES/.gitignore?rev=1326670&view=auto
==============================================================================
    (empty)

Modified: incubator/bigtop/trunk/bigtop-packages/src/rpm/hadoop/SPECS/hadoop.spec
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/bigtop-packages/src/rpm/hadoop/SPECS/hadoop.spec?rev=1326670&r1=1326669&r2=1326670&view=diff
==============================================================================
--- incubator/bigtop/trunk/bigtop-packages/src/rpm/hadoop/SPECS/hadoop.spec (original)
+++ incubator/bigtop/trunk/bigtop-packages/src/rpm/hadoop/SPECS/hadoop.spec Mon Apr 16 16:10:22 2012
@@ -23,16 +23,42 @@
 
 %define hadoop_name hadoop
 %define etc_hadoop /etc/%{name}
+%define etc_yarn /etc/yarn
+%define etc_httpfs /etc/%{name}-httpfs
 %define config_hadoop %{etc_hadoop}/conf
+%define config_yarn %{etc_yarn}/conf
+%define config_httpfs %{etc_httpfs}/conf
 %define lib_hadoop_dirname /usr/lib
 %define lib_hadoop %{lib_hadoop_dirname}/%{name}
+%define lib_httpfs %{lib_hadoop_dirname}/%{name}-httpfs
+%define lib_hdfs %{lib_hadoop_dirname}/%{name}-hdfs
+%define lib_yarn %{lib_hadoop_dirname}/%{name}-yarn
+%define lib_mapreduce %{lib_hadoop_dirname}/%{name}-mapreduce
 %define log_hadoop_dirname /var/log
 %define log_hadoop %{log_hadoop_dirname}/%{name}
+%define log_yarn %{log_hadoop_dirname}/%{name}-yarn
+%define log_hdfs %{log_hadoop_dirname}/%{name}-hdfs
+%define log_httpfs %{log_hadoop_dirname}/%{name}-httpfs
+%define log_mapreduce %{log_hadoop_dirname}/%{name}-mapreduce
+%define run_hadoop_dirname /var/run
+%define run_hadoop %{run_hadoop_dirname}/hadoop
+%define run_yarn %{run_hadoop_dirname}/%{name}-yarn
+%define run_hdfs %{run_hadoop_dirname}/%{name}-hdfs
+%define run_httpfs %{run_hadoop_dirname}/%{name}-httpfs
+%define run_mapreduce %{run_hadoop_dirname}/%{name}-mapreduce
+%define state_hadoop_dirname /var/lib
+%define state_hadoop %{state_hadoop_dirname}/hadoop
+%define state_yarn %{state_hadoop_dirname}/%{name}-yarn
+%define state_hdfs %{state_hadoop_dirname}/%{name}-hdfs
+%define state_mapreduce %{state_hadoop_dirname}/%{name}-mapreduce
 %define bin_hadoop %{_bindir}
 %define man_hadoop %{_mandir}
-%define src_hadoop /usr/src/%{name}
-%define hadoop_username mapred
-%define hadoop_services namenode secondarynamenode datanode jobtracker tasktracker
+%define doc_hadoop %{_docdir}/%{name}-%{hadoop_version}
+%define httpfs_services httpfs
+%define mapreduce_services mapreduce-historyserver
+%define hdfs_services hdfs-namenode hdfs-secondarynamenode hdfs-datanode
+%define yarn_services yarn-resourcemanager yarn-nodemanager yarn-proxyserver
+%define hadoop_services %{hdfs_services} %{mapreduce_services} %{yarn_services} %{httpfs_services}
 # Hadoop outputs built binaries into %{hadoop_build}
 %define hadoop_build_path build
 %define static_images_dir src/webapps/static/images
@@ -47,11 +73,12 @@
 # CentOS 5 does not have any dist macro
 # So I will suppose anything that is not Mageia or a SUSE will be a RHEL/CentOS/Fedora
 %if %{!?suse_version:1}0 && %{!?mgaversion:1}0
-# brp-repack-jars uses unzip to expand jar files
+
+# FIXME: brp-repack-jars uses unzip to expand jar files
 # Unfortunately aspectjtools-1.6.5.jar pulled by ivy contains some files and directories without any read permission
 # and make whole process to fail.
 # So for now brp-repack-jars is being deactivated until this is fixed.
-# See CDH-2151
+# See BIGTOP-294
 %define __os_install_post \
     /usr/lib/rpm/redhat/brp-compress ; \
     /usr/lib/rpm/redhat/brp-strip-static-archive %{__strip} ; \
@@ -59,6 +86,8 @@
     /usr/lib/rpm/brp-python-bytecompile ; \
     %{nil}
 
+
+%define libexecdir %{_libexecdir}
 %define doc_hadoop %{_docdir}/%{name}-%{hadoop_version}
 %define alternatives_cmd alternatives
 %global initd_dir %{_sysconfdir}/rc.d/init.d
@@ -78,12 +107,14 @@
     /usr/lib/rpm/brp-compress ; \
     %{nil}
 
+%define libexecdir /usr/lib
 %define doc_hadoop %{_docdir}/%{name}
 %define alternatives_cmd update-alternatives
 %global initd_dir %{_sysconfdir}/rc.d
 %endif
 
 %if  0%{?mgaversion}
+%define libexecdir /usr/libexec/
 %define doc_hadoop %{_docdir}/%{name}-%{hadoop_version}
 %define alternatives_cmd update-alternatives
 %global initd_dir %{_sysconfdir}/rc.d/init.d
@@ -110,16 +141,32 @@ Source0: %{name}-%{hadoop_base_version}.
 Source1: do-component-build
 Source2: install_%{name}.sh
 Source3: hadoop.default
-Source4: hadoop-init.tmpl
-Source5: hadoop-init.tmpl.suse
+Source4: hadoop-fuse.default
+Source5: httpfs.default
 Source6: hadoop.1
 Source7: hadoop-fuse-dfs.1
-Source8: hadoop-fuse.default
-Source9: hdfs.conf
-Source10: mapred.conf
-Buildroot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
+Source8: hdfs.conf
+Source9: yarn.conf
+Source10: mapreduce.conf
+Source11: init.d.tmpl 
+Source12: hadoop-hdfs-namenode.svc
+Source13: hadoop-hdfs-datanode.svc
+Source14: hadoop-hdfs-secondarynamenode.svc
+Source15: hadoop-mapreduce-historyserver.svc
+Source16: hadoop-yarn-resourcemanager.svc
+Source17: hadoop-yarn-nodemanager.svc
+Source18: hadoop-httpfs.svc
+Source19: mapreduce.default
+Source20: hdfs.default
+Source21: yarn.default
+Source22: hadoop-layout.sh
+Buildroot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id} -u -n)
 BuildRequires: python >= 2.4, git, fuse-devel,fuse, automake, autoconf
-Requires: coreutils, /usr/sbin/useradd, /usr/sbin/usermod, /sbin/chkconfig, /sbin/service, bigtop-utils
+Requires: coreutils, /usr/sbin/useradd, /usr/sbin/usermod, /sbin/chkconfig, /sbin/service, bigtop-utils, zookeeper >= 3.4.0
+# Sadly, Sun/Oracle JDK in RPM form doesn't provide libjvm.so, which means we have
+# to set AutoReq to no in order to minimize confusion. Not ideal, but seems to work.
+# I wish there was a way to disable just one auto dependency (libjvm.so)
+AutoReq: no
 Provides: hadoop
 
 %if  %{?suse_version:1}0
@@ -163,71 +210,151 @@ multiple replicas of data blocks for rel
 nodes around the cluster. MapReduce can then process the data where it is
 located.
 
+%package hdfs
+Summary: The Hadoop Distributed File System
+Group: System/Daemons
+Requires: %{name} = %{version}-%{release}, bigtop-jsvc
 
-%package namenode
-Summary: The Hadoop namenode manages the block locations of HDFS files
+%description hdfs
+Hadoop Distributed File System (HDFS) is the primary storage system used by 
+Hadoop applications. HDFS creates multiple replicas of data blocks and distributes 
+them on compute nodes throughout a cluster to enable reliable, extremely rapid 
+computations.
+
+%package yarn
+Summary: The Hadoop NextGen MapReduce (YARN)
 Group: System/Daemons
 Requires: %{name} = %{version}-%{release}
+
+%description yarn
+YARN (Hadoop NextGen MapReduce) is a general purpose data-computation framework.
+The fundamental idea of YARN is to split up the two major functionalities of the 
+JobTracker, resource management and job scheduling/monitoring, into separate daemons:
+ResourceManager and NodeManager.
+
+The ResourceManager is the ultimate authority that arbitrates resources among all 
+the applications in the system. The NodeManager is a per-node slave managing allocation
+of computational resources on a single node. Both work in support of per-application 
+ApplicationMaster (AM).
+
+An ApplicationMaster is, in effect, a framework specific library and is tasked with 
+negotiating resources from the ResourceManager and working with the NodeManager(s) to 
+execute and monitor the tasks. 
+
+
+%package mapreduce
+Summary: The Hadoop MapReduce (MRv2)
+Group: System/Daemons
+Requires: %{name}-yarn = %{version}-%{release}
+
+%description mapreduce
+Hadoop MapReduce is a programming model and software framework for writing applications 
+that rapidly process vast amounts of data in parallel on large clusters of compute nodes.
+
+
+%package hdfs-namenode
+Summary: The Hadoop namenode manages the block locations of HDFS files
+Group: System/Daemons
+Requires: %{name}-hdfs = %{version}-%{release}
 Requires(pre): %{name} = %{version}-%{release}
 
-%description namenode
+%description hdfs-namenode
 The Hadoop Distributed Filesystem (HDFS) requires one unique server, the
 namenode, which manages the block locations of files on the filesystem.
 
 
-%package secondarynamenode
+%package hdfs-secondarynamenode
 Summary: Hadoop Secondary namenode
 Group: System/Daemons
-Requires: %{name} = %{version}-%{release}
+Requires: %{name}-hdfs = %{version}-%{release}
 Requires(pre): %{name} = %{version}-%{release}
 
-%description secondarynamenode
+%description hdfs-secondarynamenode
 The Secondary Name Node periodically compacts the Name Node EditLog
 into a checkpoint.  This compaction ensures that Name Node restarts
 do not incur unnecessary downtime.
 
 
-%package jobtracker
-Summary: Hadoop Job Tracker
+%package hdfs-datanode
+Summary: Hadoop Data Node
 Group: System/Daemons
-Requires: %{name} = %{version}-%{release}
+Requires: %{name}-hdfs = %{version}-%{release}
 Requires(pre): %{name} = %{version}-%{release}
 
-%description jobtracker
-The jobtracker is a central service which is responsible for managing
-the tasktracker services running on all nodes in a Hadoop Cluster.
-The jobtracker allocates work to the tasktracker nearest to the data
-with an available work slot.
+%description hdfs-datanode
+The Data Nodes in the Hadoop Cluster are responsible for serving up
+blocks of data over the network to Hadoop Distributed Filesystem
+(HDFS) clients.
 
+%package httpfs
+Summary: HTTPFS for Hadoop
+Group: System/Daemons
+Requires: %{name}-hdfs = %{version}-%{release}, bigtop-tomcat
+Requires(pre): %{name} = %{version}-%{release}
 
-%package datanode
-Summary: Hadoop Data Node
+%description httpfs
+The server providing HTTP REST API support for the complete FileSystem/FileContext
+interface in HDFS.
+
+%package yarn-resourcemanager
+Summary: Yarn Resource Manager
 Group: System/Daemons
-Requires: %{name} = %{version}-%{release}
+Requires: %{name}-yarn = %{version}-%{release}
 Requires(pre): %{name} = %{version}-%{release}
 
-%description datanode
-The Data Nodes in the Hadoop Cluster are responsible for serving up
-blocks of data over the network to Hadoop Distributed Filesystem
-(HDFS) clients.
+%description yarn-resourcemanager
+The resource manager manages the global assignment of compute resources to applications
+
+%package yarn-nodemanager
+Summary: Yarn Node Manager
+Group: System/Daemons
+Requires: %{name}-yarn = %{version}-%{release}
+Requires(pre): %{name} = %{version}-%{release}
 
+%description yarn-nodemanager
+The NodeManager is the per-machine framework agent who is responsible for
+containers, monitoring their resource usage (cpu, memory, disk, network) and
+reporting the same to the ResourceManager/Scheduler.
 
-%package tasktracker
-Summary: Hadoop Task Tracker
+%package yarn-proxyserver
+Summary: Yarn Web Proxy
 Group: System/Daemons
-Requires: %{name} = %{version}-%{release}
+Requires: %{name}-yarn = %{version}-%{release}
+Requires(pre): %{name} = %{version}-%{release}
+
+%description yarn-proxyserver
+The web proxy server sits in front of the YARN application master web UI.
+
+%package mapreduce-historyserver
+Summary: MapReduce History Server
+Group: System/Daemons
+Requires: %{name}-mapreduce = %{version}-%{release}
 Requires(pre): %{name} = %{version}-%{release}
 
-%description tasktracker
-The tasktracker has a fixed number of work slots.  The jobtracker
-assigns MapReduce work to the tasktracker that is nearest the data
-with an available work slot.
+%description mapreduce-historyserver
+The History server keeps records of the different activities being performed on a Apache Hadoop cluster
+
+%package client
+Summary: Hadoop client side dependencies
+Group: System/Daemons
+Requires: %{name} = %{version}-%{release}
+Requires: %{name}-hdfs = %{version}-%{release}
+Requires: %{name}-yarn = %{version}-%{release}
+Requires: %{name}-mapreduce = %{version}-%{release}
 
+%description client
+Installation of this package will provide you with all the dependencies for Hadoop clients.
 
 %package conf-pseudo
 Summary: Hadoop installation in pseudo-distributed mode
 Group: System/Daemons
-Requires: %{name} = %{version}-%{release}, %{name}-namenode = %{version}-%{release}, %{name}-datanode = %{version}-%{release}, %{name}-secondarynamenode = %{version}-%{release}, %{name}-tasktracker = %{version}-%{release}, %{name}-jobtracker = %{version}-%{release}
+Requires: %{name} = %{version}-%{release}
+Requires: %{name}-hdfs-namenode = %{version}-%{release}
+Requires: %{name}-hdfs-datanode = %{version}-%{release}
+Requires: %{name}-hdfs-secondarynamenode = %{version}-%{release}
+Requires: %{name}-yarn-resourcemanager = %{version}-%{release}
+Requires: %{name}-yarn-nodemanager = %{version}-%{release}
+Requires: %{name}-mapreduce-historyserver = %{version}-%{release}
 
 %description conf-pseudo
 Installation of this RPM will setup your machine to run in pseudo-distributed mode
@@ -239,81 +366,48 @@ Group: Documentation
 %description doc
 Documentation for Hadoop
 
-%package source
-Summary: Source code for Hadoop
-Group: System/Daemons
-AutoReq: no
-
-%description source
-The Java source code for Hadoop and its contributed packages. This is handy when
-trying to debug programs that depend on Hadoop.
-
-%package fuse
-Summary: Mountable HDFS
-Group: Development/Libraries
-Requires: %{name} = %{version}-%{release}, fuse
-AutoReq: no
-
-%if  %{?suse_version:1}0
-Requires: libfuse2
-%else
-Requires: fuse-libs
-%endif
-
-
-%description fuse
-These projects (enumerated below) allow HDFS to be mounted (on most flavors of Unix) as a standard file system using the mount command. Once mounted, the user can operate on an instance of hdfs using standard Unix utilities such as 'ls', 'cd', 'cp', 'mkdir', 'find', 'grep', or use standard Posix libraries like open, write, read, close from C, C++, Python, Ruby, Perl, Java, bash, etc.
-
-%package native
-Summary: Native libraries for Hadoop Compression
-Group: Development/Libraries
-Requires: %{name} = %{version}-%{release}
-AutoReq: no
-
-%description native
-Native libraries for Hadoop compression
-
 %package libhdfs
 Summary: Hadoop Filesystem Library
 Group: Development/Libraries
-Requires: %{name} = %{version}-%{release}
+Requires: %{name}-hdfs = %{version}-%{release}
 # TODO: reconcile libjvm
 AutoReq: no
 
 %description libhdfs
 Hadoop Filesystem Library
 
-%package pipes
-Summary: Hadoop Pipes Library
+
+%package hdfs-fuse
+Summary: Mountable HDFS
 Group: Development/Libraries
 Requires: %{name} = %{version}-%{release}
+Requires: %{name}-libhdfs = %{version}-%{release}
+Requires: %{name}-client = %{version}-%{release}
+Requires: fuse
+AutoReq: no
 
-%description pipes
-Hadoop Pipes Library
+%if %{?suse_version:1}0
+Requires: libfuse2
+%else
+Requires: fuse-libs
+%endif
 
-%package sbin
-Summary: Binaries for secured Hadoop clusters
-Group: System/Daemons
-Requires: %{name} = %{version}-%{release}
 
-%description sbin
-This package contains a setuid program, 'task-controller', which is used for
-launching MapReduce tasks in a secured MapReduce cluster. This program allows
-the tasks to run as the Unix user who submitted the job, rather than the
-Unix user running the MapReduce daemons.
-This package also contains 'jsvc', a daemon wrapper necessary to allow
-DataNodes to bind to a low (privileged) port and then drop root privileges
-before continuing operation.
+%description hdfs-fuse
+These projects (enumerated below) allow HDFS to be mounted (on most flavors of Unix) as a standard file system using
+
 
 %prep
-%setup -n %{name}-%{hadoop_base_version}
+# %setup -n %{name}-%{hadoop_base_version}-src 
+%setup -n apache-hadoop-common-8c0466d
+
 
 %build
 # This assumes that you installed Java JDK 6 and set JAVA_HOME
 # This assumes that you installed Java JDK 5 and set JAVA5_HOME
 # This assumes that you installed Forrest and set FORREST_HOME
 
-env HADOOP_VERSION=%{hadoop_version} HADOOP_ARCH=%{hadoop_arch} bash %{SOURCE1}
+env HADOOP_VERSION=%{hadoop_base_version} HADOOP_ARCH=%{hadoop_arch} bash %{SOURCE1}
 
 %clean
 %__rm -rf $RPM_BUILD_ROOT
@@ -328,11 +422,13 @@ env HADOOP_VERSION=%{hadoop_version} HAD
 
 bash %{SOURCE2} \
   --distro-dir=$RPM_SOURCE_DIR \
-  --build-dir=$PWD/build/%{name}-%{version} \
-  --src-dir=$RPM_BUILD_ROOT%{src_hadoop} \
-  --lib-dir=$RPM_BUILD_ROOT%{lib_hadoop} \
-  --system-lib-dir=%{_libdir} \
-  --etc-dir=$RPM_BUILD_ROOT%{etc_hadoop} \
+  --build-dir=$PWD/build \
+  --httpfs-dir=$RPM_BUILD_ROOT%{lib_httpfs} \
+  --system-include-dir=$RPM_BUILD_ROOT%{_includedir} \
+  --system-lib-dir=$RPM_BUILD_ROOT%{_libdir} \
+  --system-libexec-dir=$RPM_BUILD_ROOT/%{lib_hadoop}/libexec \
+  --hadoop-etc-dir=$RPM_BUILD_ROOT%{etc_hadoop} \
+  --httpfs-etc-dir=$RPM_BUILD_ROOT%{etc_httpfs} \
   --prefix=$RPM_BUILD_ROOT \
   --doc-dir=$RPM_BUILD_ROOT%{doc_hadoop} \
   --example-dir=$RPM_BUILD_ROOT%{doc_hadoop}/examples \
@@ -340,74 +436,75 @@ bash %{SOURCE2} \
   --installed-lib-dir=%{lib_hadoop} \
   --man-dir=$RPM_BUILD_ROOT%{man_hadoop} \
 
-%__mv -f $RPM_BUILD_ROOT/usr/share/doc/libhdfs-devel $RPM_BUILD_ROOT/%{_docdir}/libhdfs-%{hadoop_version}
+# Forcing Zookeeper dependency to be on the packaged jar
+%__ln_s -f /usr/lib/zookeeper/zookeeper.jar $RPM_BUILD_ROOT/%{lib_hadoop}/lib/zookeeper*.jar
 
 # Init.d scripts
 %__install -d -m 0755 $RPM_BUILD_ROOT/%{initd_dir}/
 
-
-%if  %{?suse_version:1}0
-orig_init_file=$RPM_SOURCE_DIR/hadoop-init.tmpl.suse
-%else
-orig_init_file=$RPM_SOURCE_DIR/hadoop-init.tmpl
-%endif
+# Install top level /etc/default files
+%__install -d -m 0755 $RPM_BUILD_ROOT/etc/default
+%__cp $RPM_SOURCE_DIR/hadoop.default $RPM_BUILD_ROOT/etc/default/hadoop
+# FIXME: BIGTOP-463
+echo 'export JSVC_HOME=%{libexecdir}/bigtop-utils' >> $RPM_BUILD_ROOT/etc/default/hadoop
+%__cp $RPM_SOURCE_DIR/%{name}-fuse.default $RPM_BUILD_ROOT/etc/default/%{name}-fuse
 
 # Generate the init.d scripts
 for service in %{hadoop_services}
 do
        init_file=$RPM_BUILD_ROOT/%{initd_dir}/%{name}-${service}
-       %__cp $orig_init_file $init_file
-       %__sed -i -e 's|@HADOOP_COMMON_ROOT@|%{lib_hadoop}|' $init_file
-       %__sed -i -e "s|@HADOOP_DAEMON@|${service}|" $init_file
-       %__sed -i -e 's|@HADOOP_CONF_DIR@|%{config_hadoop}|' $init_file
-
-
-       case "$service" in
-         hadoop_services|namenode|secondarynamenode|datanode)
-             %__sed -i -e 's|@HADOOP_DAEMON_USER@|hdfs|' $init_file
-             ;;
-         jobtracker|tasktracker)
-             %__sed -i -e 's|@HADOOP_DAEMON_USER@|mapred|' $init_file
-             ;;
-       esac
-
+       bash $RPM_SOURCE_DIR/init.d.tmpl $RPM_SOURCE_DIR/%{name}-${service}.svc > $init_file
        chmod 755 $init_file
+       cp $RPM_SOURCE_DIR/${service/-*/}.default $RPM_BUILD_ROOT/etc/default/%{name}-${service}
+       chmod 644 $RPM_BUILD_ROOT/etc/default/%{name}-${service}
 done
-%__install -d -m 0755 $RPM_BUILD_ROOT/etc/default
-%__cp $RPM_SOURCE_DIR/hadoop.default $RPM_BUILD_ROOT/etc/default/hadoop
-%__cp $RPM_SOURCE_DIR/hadoop-fuse.default $RPM_BUILD_ROOT/etc/default/hadoop-fuse
 
+# Install security limits
 %__install -d -m 0755 $RPM_BUILD_ROOT/etc/security/limits.d
-%__install -m 0644 %{SOURCE9} $RPM_BUILD_ROOT/etc/security/limits.d/hdfs.conf
-%__install -m 0644 %{SOURCE10} $RPM_BUILD_ROOT/etc/security/limits.d/mapred.conf
-
-# /var/lib/hadoop/cache
-%__install -d -m 1777 $RPM_BUILD_ROOT/var/lib/%{name}/cache
-# /var/log/hadoop
-%__install -d -m 0755 $RPM_BUILD_ROOT/var/log
-%__install -d -m 0775 $RPM_BUILD_ROOT/var/run/%{name}
-%__install -d -m 0775 $RPM_BUILD_ROOT/%{log_hadoop}
-
+%__install -m 0644 %{SOURCE8} $RPM_BUILD_ROOT/etc/security/limits.d/hdfs.conf
+%__install -m 0644 %{SOURCE9} $RPM_BUILD_ROOT/etc/security/limits.d/yarn.conf
+%__install -m 0644 %{SOURCE10} $RPM_BUILD_ROOT/etc/security/limits.d/mapreduce.conf
+
+# /var/lib/*/cache
+%__install -d -m 1777 $RPM_BUILD_ROOT/%{state_yarn}/cache
+%__install -d -m 1777 $RPM_BUILD_ROOT/%{state_hdfs}/cache
+%__install -d -m 1777 $RPM_BUILD_ROOT/%{state_mapreduce}/cache
+# /var/log/*
+%__install -d -m 0775 $RPM_BUILD_ROOT/%{log_yarn}
+%__install -d -m 0775 $RPM_BUILD_ROOT/%{log_hdfs}
+%__install -d -m 0775 $RPM_BUILD_ROOT/%{log_mapreduce}
+%__install -d -m 0775 $RPM_BUILD_ROOT/%{log_httpfs}
+# /var/run/*
+%__install -d -m 0775 $RPM_BUILD_ROOT/%{run_yarn}
+%__install -d -m 0775 $RPM_BUILD_ROOT/%{run_hdfs}
+%__install -d -m 0775 $RPM_BUILD_ROOT/%{run_mapreduce}
+%__install -d -m 0775 $RPM_BUILD_ROOT/%{run_httpfs}
 
 %pre
 getent group hadoop >/dev/null || groupadd -r hadoop
-getent group hdfs >/dev/null   || groupadd -r hdfs
-getent group mapred >/dev/null || groupadd -r mapred
-
-getent passwd mapred >/dev/null || /usr/sbin/useradd --comment "Hadoop MapReduce" --shell /bin/bash -M -r -g mapred -G hadoop --home %{lib_hadoop} mapred
 
-# Create an hdfs user if one does not already exist.
-getent passwd hdfs >/dev/null || /usr/sbin/useradd --comment "Hadoop HDFS" --shell /bin/bash -M -r -g hdfs -G hadoop --home %{lib_hadoop} hdfs
+%pre hdfs
+getent group hdfs >/dev/null   || groupadd -r hdfs
+getent passwd hdfs >/dev/null || /usr/sbin/useradd --comment "Hadoop HDFS" --shell /bin/bash -M -r -g hdfs -G hadoop --home %{state_hdfs} hdfs
 
+%pre httpfs 
+getent group httpfs >/dev/null   || groupadd -r httpfs
+getent passwd httpfs >/dev/null || /usr/sbin/useradd --comment "Hadoop HTTPFS" --shell /bin/bash -M -r -g httpfs -G httpfs --home %{run_httpfs} httpfs
+
+%pre yarn
+getent group yarn >/dev/null   || groupadd -r yarn
+getent passwd yarn >/dev/null || /usr/sbin/useradd --comment "Hadoop Yarn" --shell /bin/bash -M -r -g yarn -G hadoop --home %{state_yarn} yarn
+
+%pre mapreduce
+getent group mapred >/dev/null   || groupadd -r mapred
+getent passwd mapred >/dev/null || /usr/sbin/useradd --comment "Hadoop MapReduce" --shell /bin/bash -M -r -g mapred -G hadoop --home %{state_mapreduce} mapred
 
 %post
 %{alternatives_cmd} --install %{config_hadoop} %{name}-conf %{etc_hadoop}/conf.empty 10
-%{alternatives_cmd} --install %{bin_hadoop}/%{hadoop_name} %{hadoop_name}-default %{bin_hadoop}/%{name} 20 \
-  --slave %{log_hadoop_dirname}/%{hadoop_name} %{hadoop_name}-log %{log_hadoop} \
-  --slave %{lib_hadoop_dirname}/%{hadoop_name} %{hadoop_name}-lib %{lib_hadoop} \
-  --slave /etc/%{hadoop_name} %{hadoop_name}-etc %{etc_hadoop} \
-  --slave %{man_hadoop}/man1/%{hadoop_name}.1.*z %{hadoop_name}-man %{man_hadoop}/man1/%{name}.1.*z
 
+%post httpfs
+%{alternatives_cmd} --install %{config_httpfs} %{name}-httpfs-conf %{etc_httpfs}/conf.empty 10
+chkconfig --add %{name}-httpfs
 
 %preun
 if [ "$1" = 0 ]; then
@@ -417,43 +514,100 @@ if [ "$1" = 0 ]; then
      service hadoop-$service stop 1>/dev/null 2>/dev/null || :
   done
   %{alternatives_cmd} --remove %{name}-conf %{etc_hadoop}/conf.empty || :
-  %{alternatives_cmd} --remove %{hadoop_name}-default %{bin_hadoop}/%{name} || :
 fi
 
+%preun httpfs
+if [ $1 = 0 ]; then
+  service %{name}-httpfs stop > /dev/null 2>&1
+  chkconfig --del %{name}-httpfs
+  %{alternatives_cmd} --remove %{name}-httpfs-conf %{etc_httpfs}/conf.empty || :
+fi
+
+%postun httpfs
+if [ $1 -ge 1 ]; then
+  service %{name}-httpfs condrestart >/dev/null 2>&1
+fi
+
+
+%files yarn
+%defattr(-,root,root)
+%config(noreplace) %{etc_hadoop}/conf.empty/yarn-env.sh
+%config(noreplace) %{etc_hadoop}/conf.empty/yarn-site.xml
+%config(noreplace) /etc/security/limits.d/yarn.conf
+%{lib_hadoop}/libexec/yarn-config.sh
+%{lib_yarn}
+%attr(6050,root,yarn) %{lib_yarn}/bin/container-executor
+%{bin_hadoop}/yarn
+%attr(0775,yarn,hadoop) %{run_yarn}
+%attr(0775,yarn,hadoop) %{log_yarn}
+%attr(0775,yarn,hadoop) %{state_yarn}
+%attr(1777,yarn,hadoop) %{state_yarn}/cache
+
+%files hdfs
+%defattr(-,root,root)
+%config(noreplace) %{etc_hadoop}/conf.empty/hdfs-site.xml
+%config(noreplace) /etc/default/hadoop-fuse
+%config(noreplace) /etc/security/limits.d/hdfs.conf
+%{lib_hdfs}
+%{lib_hadoop}/libexec/hdfs-config.sh
+%{bin_hadoop}/hdfs
+%attr(0775,hdfs,hadoop) %{run_hdfs}
+%attr(0775,hdfs,hadoop) %{log_hdfs}
+%attr(0775,hdfs,hadoop) %{state_hdfs}
+%attr(1777,hdfs,hadoop) %{state_hdfs}/cache
+
+%files mapreduce
+%defattr(-,root,root)
+%config(noreplace) /etc/security/limits.d/mapreduce.conf
+%{lib_mapreduce}
+%{lib_hadoop}/libexec/mapred-config.sh
+%{bin_hadoop}/mapred
+%attr(0775,mapred,hadoop) %{run_mapreduce}
+%attr(0775,mapred,hadoop) %{log_mapreduce}
+%attr(0775,mapred,hadoop) %{state_mapreduce}
+%attr(1777,mapred,hadoop) %{state_mapreduce}/cache
+
+
 %files
 %defattr(-,root,root)
-%config(noreplace) %{etc_hadoop}/conf.empty
+%config(noreplace) %{etc_hadoop}/conf.empty/core-site.xml
+%config(noreplace) %{etc_hadoop}/conf.empty/hadoop-metrics.properties
+%config(noreplace) %{etc_hadoop}/conf.empty/hadoop-metrics2.properties
+%config(noreplace) %{etc_hadoop}/conf.empty/log4j.properties
+%config(noreplace) %{etc_hadoop}/conf.empty/slaves
+%config(noreplace) %{etc_hadoop}/conf.empty/ssl-client.xml.example
+%config(noreplace) %{etc_hadoop}/conf.empty/ssl-server.xml.example
 %config(noreplace) /etc/default/hadoop
-%config(noreplace) /etc/security/limits.d/hdfs.conf
-%config(noreplace) /etc/security/limits.d/mapred.conf
-%{lib_hadoop}
-%{bin_hadoop}/%{name}
-%{man_hadoop}/man1/hadoop.1.*z
-%attr(0775,root,hadoop) /var/run/%{name}
-%attr(0775,root,hadoop) %{log_hadoop}
-
-%exclude %{lib_hadoop}/lib/native
-%exclude %{lib_hadoop}/sbin/%{hadoop_arch}
-%exclude %{lib_hadoop}/bin/fuse_dfs
-# FIXME: The following is a workaround for BIGTOP-139
-%exclude %{lib_hadoop}/bin/task-controller
-%exclude %{lib_hadoop}/libexec/jsvc*
+%{lib_hadoop}/*.jar
+%{lib_hadoop}/lib
+%{lib_hadoop}/sbin
+%{lib_hadoop}/bin
+%{lib_hadoop}/etc
+%{lib_hadoop}/libexec/hadoop-config.sh
+%{lib_hadoop}/libexec/hadoop-layout.sh
+%{bin_hadoop}/hadoop
+%{man_hadoop}/man1/hadoop.1.*
 
 %files doc
 %defattr(-,root,root)
 %doc %{doc_hadoop}
 
-%files source
+%files httpfs
 %defattr(-,root,root)
-%{src_hadoop}
-
-
+%config(noreplace) %{etc_httpfs}/conf.empty
+%config(noreplace) /etc/default/%{name}-httpfs
+%{lib_hadoop}/libexec/httpfs-config.sh
+%{initd_dir}/%{name}-httpfs
+%{lib_httpfs}
+%attr(0775,httpfs,httpfs) %{run_httpfs}
+%attr(0775,httpfs,httpfs) %{log_httpfs}
 
 # Service file management RPMs
 %define service_macro() \
 %files %1 \
 %defattr(-,root,root) \
 %{initd_dir}/%{name}-%1 \
+%config(noreplace) /etc/default/%{name}-%1 \
 %post %1 \
 chkconfig --add %{name}-%1 \
 \
@@ -466,62 +620,44 @@ fi \
 if [ $1 -ge 1 ]; then \
   service %{name}-%1 condrestart >/dev/null 2>&1 \
 fi
-%service_macro namenode
-%service_macro secondarynamenode
-%service_macro datanode
-%service_macro jobtracker
-%service_macro tasktracker
+
+%service_macro hdfs-namenode
+%service_macro hdfs-secondarynamenode
+%service_macro hdfs-datanode
+%service_macro yarn-resourcemanager
+%service_macro yarn-nodemanager
+%service_macro yarn-proxyserver
+%service_macro mapreduce-historyserver
 
 # Pseudo-distributed Hadoop installation
 %post conf-pseudo
 %{alternatives_cmd} --install %{config_hadoop} %{name}-conf %{etc_hadoop}/conf.pseudo 30
 
-
-%files conf-pseudo
-%defattr(-,root,root)
-%config(noreplace) %attr(755,root,root) %{etc_hadoop}/conf.pseudo
-%dir %attr(0755,root,hadoop) /var/lib/%{name}
-%dir %attr(1777,root,hadoop) /var/lib/%{name}/cache
-
 %preun conf-pseudo
 if [ "$1" = 0 ]; then
         %{alternatives_cmd} --remove %{name}-conf %{etc_hadoop}/conf.pseudo
         rm -f %{etc_hadoop}/conf
 fi
 
-%files native
-%defattr(-,root,root)
-%{lib_hadoop}/lib/native
-
-%files fuse
+%files conf-pseudo
 %defattr(-,root,root)
-%config(noreplace) /etc/default/hadoop-fuse
-%attr(0755,root,root) %{lib_hadoop}/bin/fuse_dfs
-%attr(0755,root,root) %{lib_hadoop}/bin/fuse_dfs_wrapper.sh
-%attr(0755,root,root) %{bin_hadoop}/hadoop-fuse-dfs
-%attr(0644,root,root) %{man_hadoop}/man1/hadoop-fuse-dfs.1.*
-%config(noreplace) /etc/default/hadoop-fuse
+%config(noreplace) %attr(755,root,root) %{etc_hadoop}/conf.pseudo
 
-%files pipes
+%files client
 %defattr(-,root,root)
-%{_libdir}/libhadooppipes*
-%{_libdir}/libhadooputil*
-%{_includedir}/hadoop/*
+%{lib_hadoop}/client
 
 %files libhdfs
 %defattr(-,root,root)
 %{_libdir}/libhdfs*
 %{_includedir}/hdfs.h
 # -devel should be its own package
-%doc %{_docdir}/libhdfs-%{hadoop_version}
+#%doc %{_docdir}/libhdfs-%{hadoop_version}
 
-%files sbin
+%files fuse
 %defattr(-,root,root)
-%dir %{lib_hadoop}/sbin
-%dir %{lib_hadoop}/sbin/%{hadoop_arch}
-%attr(4750,root,mapred) %{lib_hadoop}/sbin/%{hadoop_arch}/task-controller
-%attr(0755,root,root) %{lib_hadoop}/sbin/%{hadoop_arch}/jsvc
-
-# FIXME: The following is a workaround for BIGTOP-139
-%attr(4750,root,mapred) %{lib_hadoop}/bin/task-controller
-%attr(0755,root,root) %{lib_hadoop}/libexec/jsvc*
+%attr(0755,root,root) %{lib_hadoop}/bin/fuse_dfs
+%attr(0755,root,root) %{lib_hadoop}/bin/fuse_dfs_wrapper.sh
+%attr(0755,root,root) %{bin_hadoop}/hadoop-fuse-dfs
+
+

Modified: incubator/bigtop/trunk/bigtop-packages/src/rpm/hbase/SPECS/hbase.spec
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/bigtop-packages/src/rpm/hbase/SPECS/hbase.spec?rev=1326670&r1=1326669&r2=1326670&view=diff
==============================================================================
--- incubator/bigtop/trunk/bigtop-packages/src/rpm/hbase/SPECS/hbase.spec (original)
+++ incubator/bigtop/trunk/bigtop-packages/src/rpm/hbase/SPECS/hbase.spec Mon Apr 16 16:10:22 2012
@@ -26,7 +26,7 @@
 %define hbase_services master regionserver thrift rest
 %define hadoop_home /usr/lib/hadoop
 %define zookeeper_home /usr/lib/zookeeper
-%define hbase_jar_deps %{hadoop_home}/hadoop-core.jar %{zookeeper_home}/zookeeper.jar
+%define hbase_jar_deps_hadoop hadoop-annotations,hadoop-auth,hadoop-common,hadoop-hdfs,hadoop-mapreduce-client-common,hadoop-mapreduce-client-core,hadoop-yarn-api,hadoop-yarn-common 
 
 %if  %{?suse_version:1}0
 
@@ -51,6 +51,24 @@
 
 %else
 
+# CentOS 5 does not have any dist macro
+# So I will suppose anything that is not Mageia or a SUSE will be a RHEL/CentOS/Fedora
+%if %{!?mgaversion:1}0
+
+# FIXME: brp-repack-jars uses unzip to expand jar files
+# Unfortunately guice-2.0.jar pulled by ivy contains some files and directories without any read permission
+# and make whole process to fail.
+# So for now brp-repack-jars is being deactivated until this is fixed.
+# See BIGTOP-294
+%define __os_install_post \
+    /usr/lib/rpm/redhat/brp-compress ; \
+    /usr/lib/rpm/redhat/brp-strip-static-archive %{__strip} ; \
+    /usr/lib/rpm/redhat/brp-strip-comment-note %{__strip} %{__objdump} ; \
+    /usr/lib/rpm/brp-python-bytecompile ; \
+    %{nil}
+%endif
+
+
 %define doc_hbase %{_docdir}/%{name}-%{hbase_version}
 %global initd_dir %{_sysconfdir}/rc.d/init.d
 %define alternatives_cmd alternatives
@@ -73,9 +91,10 @@ Source3: hbase.sh
 Source4: hbase.sh.suse
 Source5: hbase.default
 Source6: hbase.nofiles.conf
+Patch0: HBASE-5212.patch
 BuildArch: noarch
 Requires: coreutils, /usr/sbin/useradd, /sbin/chkconfig, /sbin/service
-Requires: hadoop >= 0.20.2, zookeeper >= 3.3.1, bigtop-utils
+Requires: hadoop-hdfs, zookeeper >= 3.3.1, bigtop-utils
 
 %if  0%{?mgaversion}
 Requires: bsh-utils
@@ -217,6 +236,7 @@ The Apache HBase REST gateway
 
 %prep
 %setup -n %{name}-%{hbase_base_version}
+%patch0 -p0
 
 %build
 env HBASE_VERSION=%{version} bash %{SOURCE1}
@@ -260,9 +280,8 @@ done
 %__install -d -m 0755 $RPM_BUILD_ROOT/usr/bin
 
 # Pull zookeeper and hadoop from their packages
-rm -f $RPM_BUILD_ROOT/%{lib_hbase}/hadoop-*
-rm -f $RPM_BUILD_ROOT/%{lib_hbase}/zookeeper-*
-ln -f -s %{hbase_jar_deps} $RPM_BUILD_ROOT/%{lib_hbase}
+rm -f $RPM_BUILD_ROOT/%{lib_hbase}/{%{hbase_jar_deps_hadoop},zookeeper}*.jar
+ln -f -s %{hadoop_home}/{%{hbase_jar_deps_hadoop}}.jar %{zookeeper_home}/zookeeper.jar $RPM_BUILD_ROOT/%{lib_hbase}
 
 %pre
 getent group hbase 2>/dev/null >/dev/null || /usr/sbin/groupadd -r hbase

Modified: incubator/bigtop/trunk/bigtop-packages/src/rpm/hive/SPECS/hive.spec
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/bigtop-packages/src/rpm/hive/SPECS/hive.spec?rev=1326670&r1=1326669&r2=1326670&view=diff
==============================================================================
--- incubator/bigtop/trunk/bigtop-packages/src/rpm/hive/SPECS/hive.spec (original)
+++ incubator/bigtop/trunk/bigtop-packages/src/rpm/hive/SPECS/hive.spec Mon Apr 16 16:10:22 2012
@@ -69,7 +69,8 @@ Source4: hive-site.xml
 Source5: hive-server.default
 Source6: hive-metastore.default
 Source7: hive.1
-Requires: hadoop >= 0.20.2, bigtop-utils
+Source8: hive-site.xml
+Requires: hadoop-client, bigtop-utils
 Obsoletes: %{name}-webinterface
 
 %description 

Modified: incubator/bigtop/trunk/bigtop-packages/src/rpm/mahout/SPECS/mahout.spec
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/bigtop-packages/src/rpm/mahout/SPECS/mahout.spec?rev=1326670&r1=1326669&r2=1326670&view=diff
==============================================================================
--- incubator/bigtop/trunk/bigtop-packages/src/rpm/mahout/SPECS/mahout.spec (original)
+++ incubator/bigtop/trunk/bigtop-packages/src/rpm/mahout/SPECS/mahout.spec Mon Apr 16 16:10:22 2012
@@ -44,7 +44,8 @@ License: ASL 2.0 
 Source0: %{name}-distribution-%{mahout_base_version}-src.tar.gz
 Source1: do-component-build 
 Source2: install_%{name}.sh
-Requires: hadoop >= 0.20.2, bigtop-utils
+Patch0: MAHOUT-822.patch
+Requires: hadoop-client, bigtop-utils
 
 
 %description 
@@ -66,6 +67,7 @@ also on potential use cases. Come to the
     
 %prep
 %setup -n %{name}-distribution-%{mahout_base_version}
+%patch0 -p0
 
 %build
 bash $RPM_SOURCE_DIR/do-component-build
@@ -73,7 +75,7 @@ bash $RPM_SOURCE_DIR/do-component-build
 %install
 %__rm -rf $RPM_BUILD_ROOT
 sh $RPM_SOURCE_DIR/install_mahout.sh \
-          --build-dir=distribution/target/mahout-distribution-%{mahout_base_version}/mahout-distribution-%{mahout_base_version} \
+          --build-dir=build \
           --prefix=$RPM_BUILD_ROOT \
           --doc-dir=%{doc_mahout} 
 rm -f $RPM_BUILD_ROOT/usr/lib/mahout/lib/hadoop*.jar

Modified: incubator/bigtop/trunk/bigtop-packages/src/rpm/oozie/SPECS/oozie.spec
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/bigtop-packages/src/rpm/oozie/SPECS/oozie.spec?rev=1326670&r1=1326669&r2=1326670&view=diff
==============================================================================
--- incubator/bigtop/trunk/bigtop-packages/src/rpm/oozie/SPECS/oozie.spec (original)
+++ incubator/bigtop/trunk/bigtop-packages/src/rpm/oozie/SPECS/oozie.spec Mon Apr 16 16:10:22 2012
@@ -59,10 +59,14 @@ Source2: install_oozie.sh
 Source3: oozie.1
 Source4: oozie-env.sh
 Source5: oozie.init
+Source6: catalina.properties
+Source7: context.xml
+Source8: hive.xml
+Patch0: patch
 Requires(pre): /usr/sbin/groupadd, /usr/sbin/useradd
-Requires(post): /sbin/chkconfig, hadoop
+Requires(post): /sbin/chkconfig
 Requires(preun): /sbin/chkconfig, /sbin/service
-Requires: zip, unzip, oozie-client = %{version}
+Requires: oozie-client = %{version}, hadoop-client, bigtop-tomcat
 BuildArch: noarch
 
 %description 
@@ -127,6 +131,7 @@ Requires: bigtop-utils
 
 %prep
 %setup -n oozie-%{oozie_base_version}
+%patch0 -p0
 
 %build
     mkdir -p distro/downloads
@@ -136,6 +141,11 @@ Requires: bigtop-utils
 %__rm -rf $RPM_BUILD_ROOT
     sh %{SOURCE2} --extra-dir=$RPM_SOURCE_DIR --build-dir=. --server-dir=$RPM_BUILD_ROOT --client-dir=$RPM_BUILD_ROOT --docs-dir=$RPM_BUILD_ROOT%{doc_oozie} --initd-dir=$RPM_BUILD_ROOT%{initd_dir} --conf-dir=$RPM_BUILD_ROOT%{conf_oozie_dist}
 
+%__ln_s -f %{data_oozie}/ext-2.2 $RPM_BUILD_ROOT/%{lib_oozie}/webapps/oozie/ext-2.2
+%__rm  -rf              $RPM_BUILD_ROOT/%{lib_oozie}/webapps/oozie/docs
+%__ln_s -f %{doc_oozie} $RPM_BUILD_ROOT/%{lib_oozie}/webapps/oozie/docs
+
+
 %__install -d -m 0755 $RPM_BUILD_ROOT/usr/bin
 
 %__install -d  -m 0755  %{buildroot}/%{_localstatedir}/log/oozie
@@ -169,17 +179,16 @@ fi
 
 %files 
 %defattr(-,root,root)
-%{lib_oozie}/bin/addtowar.sh
-%{lib_oozie}/bin/oozie-run.sh
-%{lib_oozie}/bin/oozie-setup.sh
-%{lib_oozie}/bin/oozie-start.sh
-%{lib_oozie}/bin/oozie-stop.sh
 %{lib_oozie}/bin/oozie-sys.sh
 %{lib_oozie}/bin/oozie-env.sh
 %{lib_oozie}/bin/oozied.sh
-%{lib_oozie}/oozie.war
+%{lib_oozie}/bin/ooziedb.sh
+%{lib_oozie}/webapps
+%{lib_oozie}/libtools
+%{lib_oozie}/libserver
 %{lib_oozie}/oozie-sharelib.tar.gz
 %{lib_oozie}/oozie-server
+%{lib_oozie}/libext
 %{initd_dir}/oozie
 %defattr(-, oozie, oozie)
 %dir %{_localstatedir}/log/oozie

Modified: incubator/bigtop/trunk/bigtop-packages/src/rpm/pig/SPECS/pig.spec
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/bigtop-packages/src/rpm/pig/SPECS/pig.spec?rev=1326670&r1=1326669&r2=1326670&view=diff
==============================================================================
--- incubator/bigtop/trunk/bigtop-packages/src/rpm/pig/SPECS/pig.spec (original)
+++ incubator/bigtop/trunk/bigtop-packages/src/rpm/pig/SPECS/pig.spec Mon Apr 16 16:10:22 2012
@@ -25,11 +25,11 @@
 # So I will suppose anything that is not Mageia or a SUSE will be a RHEL/CentOS/Fedora
 %if %{!?suse_version:1}0 && %{!?mgaversion:1}0
 
-# brp-repack-jars uses unzip to expand jar files
+# FIXME: brp-repack-jars uses unzip to expand jar files
 # Unfortunately aspectjtools-1.6.5.jar pulled by ivy contains some files and directories without any read permission
 # and make whole process to fail.
 # So for now brp-repack-jars is being deactivated until this is fixed.
-# See CDH-2151
+# See BIGTOP-294
 %define __os_install_post \
     /usr/lib/rpm/redhat/brp-compress ; \
     /usr/lib/rpm/redhat/brp-strip-static-archive %{__strip} ; \
@@ -81,7 +81,7 @@ Source2: install_pig.sh
 Source3: log4j.properties
 Source4: pig.1
 Source5: pig.properties
-Requires: hadoop, bigtop-utils
+Requires: hadoop-client, bigtop-utils
 
 %description 
 Pig is a platform for analyzing large data sets that consists of a high-level language 
@@ -107,7 +107,7 @@ language called Pig Latin, which has the
 
 
 %prep
-%setup -n pig-%{pig_base_version}
+%setup -n %{name}-%{pig_base_version}
 
 %build
 env PIG_BASE_VERSION=%{pig_base_version} bash %{SOURCE1}

Modified: incubator/bigtop/trunk/bigtop-packages/src/rpm/sqoop/SPECS/sqoop.spec
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/bigtop-packages/src/rpm/sqoop/SPECS/sqoop.spec?rev=1326670&r1=1326669&r2=1326670&view=diff
==============================================================================
--- incubator/bigtop/trunk/bigtop-packages/src/rpm/sqoop/SPECS/sqoop.spec (original)
+++ incubator/bigtop/trunk/bigtop-packages/src/rpm/sqoop/SPECS/sqoop.spec Mon Apr 16 16:10:22 2012
@@ -62,7 +62,7 @@ Source3: sqoop-metastore.sh
 Source4: sqoop-metastore.sh.suse
 Buildarch: noarch
 BuildRequires: asciidoc, xmlto
-Requires: hadoop, bigtop-utils
+Requires: hadoop-client, bigtop-utils
 
 %description 
 Sqoop allows easy imports and exports of data sets between databases and the Hadoop Distributed File System (HDFS).

Modified: incubator/bigtop/trunk/bigtop-test-framework/pom.xml
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/bigtop-test-framework/pom.xml?rev=1326670&r1=1326669&r2=1326670&view=diff
==============================================================================
--- incubator/bigtop/trunk/bigtop-test-framework/pom.xml (original)
+++ incubator/bigtop/trunk/bigtop-test-framework/pom.xml Mon Apr 16 16:10:22 2012
@@ -21,14 +21,14 @@
   <parent>
     <groupId>org.apache.bigtop</groupId>
     <artifactId>bigtop</artifactId>
-    <version>0.3.0-incubating-SNAPSHOT</version>
+    <version>0.4.0-incubating-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
     
   <groupId>org.apache.bigtop.itest</groupId>
   <artifactId>itest-common</artifactId>
   <name>iTest: system and integration testing in the cloud</name>
-  <version>0.3.0-incubating-SNAPSHOT</version>
+  <version>0.4.0-incubating-SNAPSHOT</version>
 
 
   <dependencies>

Modified: incubator/bigtop/trunk/bigtop-tests/test-artifacts/flume/pom.xml
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/bigtop-tests/test-artifacts/flume/pom.xml?rev=1326670&r1=1326669&r2=1326670&view=diff
==============================================================================
--- incubator/bigtop/trunk/bigtop-tests/test-artifacts/flume/pom.xml (original)
+++ incubator/bigtop/trunk/bigtop-tests/test-artifacts/flume/pom.xml Mon Apr 16 16:10:22 2012
@@ -21,19 +21,19 @@
   <parent>
     <groupId>org.apache.bigtop.itest</groupId>
     <artifactId>bigtop-smokes</artifactId>
-    <version>0.3.0-incubating-SNAPSHOT</version>
+    <version>0.4.0-incubating-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
   <groupId>org.apache.bigtop.itest</groupId>
   <artifactId>flume-smoke</artifactId>
-  <version>0.3.0-incubating-SNAPSHOT</version>
+  <version>0.4.0-incubating-SNAPSHOT</version>
 
   <name>flumesmoke</name>
 
   <dependencies>
      <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-core</artifactId>
+      <artifactId>hadoop-common</artifactId>
     </dependency>
   </dependencies>
 </project>

Modified: incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/pom.xml
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/pom.xml?rev=1326670&r1=1326669&r2=1326670&view=diff
==============================================================================
--- incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/pom.xml (original)
+++ incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/pom.xml Mon Apr 16 16:10:22 2012
@@ -20,24 +20,32 @@
   <parent>
     <groupId>org.apache.bigtop.itest</groupId>
     <artifactId>bigtop-smokes</artifactId>
-    <version>0.3.0-incubating-SNAPSHOT</version>
+    <version>0.4.0-incubating-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.bigtop.itest</groupId>
   <artifactId>hadoop-smoke</artifactId>
-  <version>0.3.0-incubating-SNAPSHOT</version>
-    <name>hadoopsmoke</name> 
+  <version>0.4.0-incubating-SNAPSHOT</version>
+  <name>hadoopsmoke</name>
 
   <dependencies>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-core</artifactId>
+      <artifactId>hadoop-mapreduce-client-core</artifactId>
+      <version>${hadoop.version}</version>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-test</artifactId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop.version}</version>
+      <type>test-jar</type>
     </dependency>
   </dependencies>
 

Modified: incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoopexamples/TestHadoopExamples.groovy
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoopexamples/TestHadoopExamples.groovy?rev=1326670&r1=1326669&r2=1326670&view=diff
==============================================================================
--- incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoopexamples/TestHadoopExamples.groovy (original)
+++ incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoopexamples/TestHadoopExamples.groovy Mon Apr 16 16:10:22 2012
@@ -49,53 +49,52 @@ class TestHadoopExamples {
   }
   static final String HADOOP_EXAMPLES_JAR =
     HADOOP_HOME + "/" + hadoopExamplesJar;
-  private static final String hadoop = "$HADOOP_HOME/bin/hadoop";
 
   static Shell sh = new Shell("/bin/bash -s");
   private static final String EXAMPLES = "examples";
   private static final String EXAMPLES_OUT = "examples-output";
   private static Configuration conf;
-  private static String HADOOP_OPTIONS;
+
+  private static String mr_version = System.getProperty("mr.version", "mr2");
+  static final String RANDOMTEXTWRITER_TOTALBYTES = (mr_version == "mr1") ?
+      "test.randomtextwrite.total_bytes" : "mapreduce.randomtextwriter.totalbytes";
 
   @BeforeClass
   static void setUp() {
     conf = new Configuration();
-    conf.addResource('mapred-site.xml');
-    HADOOP_OPTIONS =
-      "-fs ${conf.get('fs.default.name')} -jt ${conf.get('mapred.job.tracker')}";
     // Unpack resource
     JarContent.unpackJarContainer(TestHadoopExamples.class, '.' , null)
 
-    sh.exec("$hadoop fs $HADOOP_OPTIONS -test -e $EXAMPLES");
+    sh.exec("hadoop fs -test -e $EXAMPLES");
     if (sh.getRet() == 0) {
-      sh.exec("$hadoop fs $HADOOP_OPTIONS -rmr -skipTrash $EXAMPLES");
+      sh.exec("hadoop fs -rmr -skipTrash $EXAMPLES");
       assertTrue("Deletion of previous $EXAMPLES from HDFS failed",
           sh.getRet() == 0);
     }
-    sh.exec("$hadoop fs $HADOOP_OPTIONS -test -e $EXAMPLES_OUT");
+    sh.exec("hadoop fs -test -e $EXAMPLES_OUT");
     if (sh.getRet() == 0) {
-      sh.exec("$hadoop fs $HADOOP_OPTIONS -rmr -skipTrash $EXAMPLES_OUT");
+      sh.exec("hadoop fs -rmr -skipTrash $EXAMPLES_OUT");
       assertTrue("Deletion of previous examples output from HDFS failed",
           sh.getRet() == 0);
     }
 
-// copy test files to HDFS
-    sh.exec("hadoop fs $HADOOP_OPTIONS -put $EXAMPLES $EXAMPLES",
-        "hadoop fs $HADOOP_OPTIONS -mkdir $EXAMPLES_OUT");
+    // copy test files to HDFS
+    sh.exec("hadoop fs -put $EXAMPLES $EXAMPLES",
+        "hadoop fs -mkdir $EXAMPLES_OUT");
     assertTrue("Could not create output directory", sh.getRet() == 0);
   }
 
   static Map examples =
     [
-        pi                :'20 10',
+        pi                :'2 1000',
         wordcount         :"$EXAMPLES/text $EXAMPLES_OUT/wordcount",
         multifilewc       :"$EXAMPLES/text $EXAMPLES_OUT/multifilewc",
-//        aggregatewordcount:"$EXAMPLES/text $EXAMPLES_OUT/aggregatewordcount 5 textinputformat",
-//        aggregatewordhist :"$EXAMPLES/text $EXAMPLES_OUT/aggregatewordhist 5 textinputformat",
+        aggregatewordcount:"$EXAMPLES/text $EXAMPLES_OUT/aggregatewordcount 2 textinputformat",
+        aggregatewordhist :"$EXAMPLES/text $EXAMPLES_OUT/aggregatewordhist 2 textinputformat",
         grep              :"$EXAMPLES/text $EXAMPLES_OUT/grep '[Cc]uriouser'",
-        sleep             :"-m 10 -r 10",
+//        sleep             :"-m 10 -r 10",
         secondarysort     :"$EXAMPLES/ints $EXAMPLES_OUT/secondarysort",
-        randomtextwriter  :"-Dtest.randomtextwrite.total_bytes=1073741824 $EXAMPLES_OUT/randomtextwriter"
+        randomtextwriter  :"-D $RANDOMTEXTWRITER_TOTALBYTES=1073741824 $EXAMPLES_OUT/randomtextwriter"
     ];
 
   private String testName;
@@ -117,7 +116,7 @@ class TestHadoopExamples {
 
   @Test
   void testMRExample() {
-    sh.exec("$hadoop jar $testJar $testName $HADOOP_OPTIONS $testArgs");
+    sh.exec("hadoop jar $testJar $testName $testArgs");
 
     assertTrue("Example $testName failed", 
                sh.getRet() == 0);

Modified: incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoopsmoke/TestHadoopSmoke.groovy
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoopsmoke/TestHadoopSmoke.groovy?rev=1326670&r1=1326669&r2=1326670&view=diff
==============================================================================
--- incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoopsmoke/TestHadoopSmoke.groovy (original)
+++ incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoopsmoke/TestHadoopSmoke.groovy Mon Apr 16 16:10:22 2012
@@ -24,26 +24,31 @@ import org.junit.AfterClass
 import org.junit.BeforeClass
 import org.junit.Test
 import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.hdfs.DFSConfigKeys
 import static org.junit.Assert.assertEquals
-
-// TODO: we have to stub it for 0.20.2 release, once we move to 0.21+ this can go
-// import org.apache.hadoop.hdfs.DFSConfigKeys
-class DFSConfigKeys {
-  static public final FS_DEFAULT_NAME_KEY = "fs.default.name";
-}
+import static org.junit.Assert.assertNotNull
 
 class TestHadoopSmoke {
   static Shell sh = new Shell("/bin/bash -s")
 
-  static String hadoopHome   = System.getProperty('HADOOP_HOME', '/thisfileneverwillexist')
-  static String testDir      = "test.hadoopsmoke." + (new Date().getTime())
-  static String nn           = (new Configuration()).get(DFSConfigKeys.FS_DEFAULT_NAME_KEY)
-
-  String cmd = "hadoop  jar ${hadoopHome}/contrib/streaming/hadoop*streaming*.jar" +
-                 " -D mapred.map.tasks=1 -D mapred.reduce.tasks=1 -D mapred.job.name=Experiment "
-  String cmd2 =" -input  ${testDir}/cachefile/input.txt -mapper map.sh -file map.sh -reducer cat" +
-                 " -output ${testDir}/cachefile/out -verbose "
-  String arg = "${nn}/user/${System.properties['user.name']}/${testDir}/cachefile/cachedir.jar#testlink "
+  static String hadoopHome = System.getProperty('HADOOP_HOME', '/usr/lib/hadoop')
+  static String streamingHome = System.getenv('STREAMING_HOME')
+  static final String STREAMING_HOME =
+    (streamingHome == null) ? hadoopHome + "/contrib/streaming" : streamingHome;
+  static String streaming_jar =
+    JarContent.getJarName(STREAMING_HOME, 'hadoop.*streaming.*.jar');
+  static {
+    assertNotNull("Can't find hadoop-streaming.jar", streaming_jar);
+  }
+  static final String STREAMING_JAR = STREAMING_HOME + "/" + streaming_jar;
+  static String testDir = "test.hadoopsmoke." + (new Date().getTime())
+  static String nn = (new Configuration()).get(DFSConfigKeys.FS_DEFAULT_NAME_KEY)
+
+  String cmd = "hadoop jar ${STREAMING_JAR}" +
+      " -D mapred.map.tasks=1 -D mapred.reduce.tasks=1 -D mapred.job.name=Experiment"
+  String cmd2 = " -input ${testDir}/cachefile/input.txt -mapper map.sh -file map.sh -reducer cat" +
+      " -output ${testDir}/cachefile/out -verbose"
+  String arg = "${nn}/user/${System.properties['user.name']}/${testDir}/cachefile/cachedir.jar#testlink"
 
   @BeforeClass
   static void  setUp() throws IOException {

Added: incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadooptests/FSCmdExecutor.java
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadooptests/FSCmdExecutor.java?rev=1326670&view=auto
==============================================================================
--- incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadooptests/FSCmdExecutor.java (added)
+++ incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadooptests/FSCmdExecutor.java Mon Apr 16 16:10:22 2012
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.bigtop.itest.hadooptests;
+
+import java.io.File;
+import java.util.StringTokenizer;
+
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.cli.CLITestHelper;
+import org.apache.hadoop.cli.util.CommandExecutor;
+import org.apache.hadoop.util.ToolRunner;
+
+public class FSCmdExecutor extends CommandExecutor {
+  protected String namenode = null;
+  protected FsShell shell = null;
+
+  public FSCmdExecutor(String namenode, FsShell shell) {
+    this.namenode = namenode;
+    this.shell = shell;
+  }
+
+  protected void execute(final String cmd) throws Exception{
+    String[] args = getCommandAsArgs(cmd, "NAMENODE", this.namenode);
+    ToolRunner.run(shell, args);
+  }
+
+  @Override
+  protected String[] getCommandAsArgs(final String cmd, final String masterKey,
+                                      final String master) {
+    StringTokenizer tokenizer = new StringTokenizer(cmd, " ");
+    String[] args = new String[tokenizer.countTokens()];
+    int i = 0;
+    while (tokenizer.hasMoreTokens()) {
+      args[i] = tokenizer.nextToken();
+      args[i] = args[i].replaceAll(masterKey, master);
+      args[i] = args[i].replaceAll("CLITEST_DATA", 
+        new File(CLITestHelper.TEST_CACHE_DATA_DIR).toURI().toString().replace(' ', '+'));
+      args[i] = args[i].replaceAll("TEST_DIR_ABSOLUTE", TestCLI.TEST_DIR_ABSOLUTE);
+      args[i] = args[i].replaceAll("USERNAME", System.getProperty("user.name"));
+
+      i++;
+    }
+    return args;
+  }
+}

Added: incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadooptests/TestCLI.java
URL: http://svn.apache.org/viewvc/incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadooptests/TestCLI.java?rev=1326670&view=auto
==============================================================================
--- incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadooptests/TestCLI.java (added)
+++ incubator/bigtop/trunk/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadooptests/TestCLI.java Mon Apr 16 16:10:22 2012
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.bigtop.itest.hadooptests;
+
+import java.io.File;
+
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.cli.CLITestHelper;
+import org.apache.hadoop.cli.util.CLICommand;
+import org.apache.hadoop.cli.util.CLICommandFS;
+import org.apache.hadoop.cli.util.CommandExecutor;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Tests for the Command Line Interface (CLI)
+ */
+public class TestCLI extends CLITestHelper {
+  public static final String TEST_DIR_ABSOLUTE = "/tmp/testcli";
+  private String nn;
+  private String sug;
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    readTestConfigFile();
+    conf = new HdfsConfiguration();
+    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, 
+                    true);
+    clitestDataDir =
+      new File(TEST_CACHE_DATA_DIR).toURI().toString().replace(' ', '+');
+    nn = conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
+    sug = conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY);
+    // Many of the tests expect a replication value of 1 in the output
+    conf.setInt("dfs.replication", 1);
+  }
+
+  @After
+  @Override
+  public void tearDown() throws Exception {
+    super.tearDown();
+  }
+
+  @Override
+  protected String getTestFile() {
+    return "testConf.xml";
+  }
+
+  @Test
+  @Override
+  public void testAll() {
+    super.testAll();
+  }
+
+  @Override
+  protected String expandCommand(final String cmd) {
+    String expCmd = super.expandCommand(cmd);
+    String testcliDir = TEST_DIR_ABSOLUTE;
+    expCmd = expCmd.replaceAll("TEST_DIR_ABSOLUTE", testcliDir);
+    expCmd = expCmd.replaceAll("SUPERGROUP", sug);
+    return expCmd;
+  }
+
+  @Override
+  protected CommandExecutor.Result execute(CLICommand cmd) throws Exception {
+    if (cmd.getType() instanceof CLICommandFS) {
+      CommandExecutor cmdExecutor = new FSCmdExecutor(nn, new FsShell(conf));
+      return cmdExecutor.executeCommand(cmd.getCmd());
+    } else {
+      throw new IllegalArgumentException("Unknown type of test command: " + cmd.getType());
+    }
+  }
+}



Mime
View raw message