hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From c..@apache.org
Subject svn commit: r944521 [3/3] - in /hadoop/common/trunk: ./ ivy/ src/test/ src/test/aop/build/ src/test/system/ src/test/system/aop/ src/test/system/aop/org/ src/test/system/aop/org/apache/ src/test/system/aop/org/apache/hadoop/ src/test/system/aop/org/apa...
Date Fri, 14 May 2010 23:56:35 GMT
Added: hadoop/common/trunk/src/test/system/c++/runAs/configure.ac
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/system/c%2B%2B/runAs/configure.ac?rev=944521&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/system/c++/runAs/configure.ac (added)
+++ hadoop/common/trunk/src/test/system/c++/runAs/configure.ac Fri May 14 23:56:34 2010
@@ -0,0 +1,65 @@
+#                                               -*- Autoconf -*-
+# Process this file with autoconf to produce a configure script.
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+AC_PREREQ(2.59)
+AC_INIT([runAs],[0.1])
+
+#changing default prefix value to empty string, so that binary does not
+#gets installed within system
+AC_PREFIX_DEFAULT(.)
+
+#add new arguments --with-home
+AC_ARG_WITH(home,[--with-home path to hadoop home dir])
+AC_CONFIG_SRCDIR([main.c])
+AC_CONFIG_HEADER([runAs.h])
+
+# Checks for programs.
+AC_PROG_CC
+
+# Checks for libraries.
+
+# Checks for header files.
+AC_HEADER_STDC
+AC_CHECK_HEADERS([stdlib.h string.h unistd.h fcntl.h])
+
+#check for HADOOP_HOME
+if test "$with_home" != ""
+then
+AC_DEFINE_UNQUOTED(HADOOP_HOME,"$with_home")
+fi
+
+# Checks for typedefs, structures, and compiler characteristics.
+AC_C_CONST
+AC_TYPE_PID_T
+AC_TYPE_MODE_T
+AC_TYPE_SIZE_T
+
+# Checks for library functions.
+AC_FUNC_MALLOC
+AC_FUNC_REALLOC
+AC_FUNC_CHOWN
+AC_CHECK_FUNCS([strerror memset mkdir rmdir strdup])
+
+AC_CONFIG_FILES([Makefile])
+AC_OUTPUT
+
+AC_HEADER_STDBOOL
+AC_PROG_MAKE_SET

Added: hadoop/common/trunk/src/test/system/c++/runAs/main.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/system/c%2B%2B/runAs/main.c?rev=944521&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/system/c++/runAs/main.c (added)
+++ hadoop/common/trunk/src/test/system/c++/runAs/main.c Fri May 14 23:56:34 2010
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "runAs.h"
+
+/**
+ * The binary would be accepting the command of following format:
+ * cluster-controller user hostname hadoop-daemon.sh-command
+ */
+int main(int argc, char **argv) {
+  int errorcode;
+  char *user;
+  char *hostname;
+  char *command;
+  struct passwd user_detail;
+  int i = 1;
+  /*
+   * Minimum number of arguments required for the binary to perform.
+   */
+  if (argc < 4) {
+    fprintf(stderr, "Invalid number of arguments passed to the binary\n");
+    return INVALID_ARGUMENT_NUMER;
+  }
+
+  user = argv[1];
+  if (user == NULL) {
+    fprintf(stderr, "Invalid user name\n");
+    return INVALID_USER_NAME;
+  }
+
+  if (getuserdetail(user, &user_detail) != 0) {
+    fprintf(stderr, "Invalid user name\n");
+    return INVALID_USER_NAME;
+  }
+
+  if (user_detail.pw_gid == 0 || user_detail.pw_uid == 0) {
+      fprintf(stderr, "Cannot run tasks as super user\n");
+      return SUPER_USER_NOT_ALLOWED_TO_RUN_COMMANDS;
+  }
+
+  hostname = argv[2];
+  command = argv[3];
+  return process_controller_command(user, hostname, command);
+}

Added: hadoop/common/trunk/src/test/system/c++/runAs/runAs.c
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/system/c%2B%2B/runAs/runAs.c?rev=944521&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/system/c++/runAs/runAs.c (added)
+++ hadoop/common/trunk/src/test/system/c++/runAs/runAs.c Fri May 14 23:56:34 2010
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "runAs.h"
+
+/*
+ * Function to get the user details populated given a user name. 
+ */
+int getuserdetail(char *user, struct passwd *user_detail) {
+  struct passwd *tempPwdPtr;
+  int size = sysconf(_SC_GETPW_R_SIZE_MAX);
+  char pwdbuffer[size];
+  if ((getpwnam_r(user, user_detail, pwdbuffer, size, &tempPwdPtr)) != 0) {
+    fprintf(stderr, "Invalid user provided to getpwnam\n");
+    return -1;
+  }
+  return 0;
+}
+
+/**
+ * Function to switch the user identity and set the appropriate 
+ * group control as the user specified in the argument.
+ */
+int switchuser(char *user) {
+  //populate the user details
+  struct passwd user_detail;
+  if ((getuserdetail(user, &user_detail)) != 0) {
+    return INVALID_USER_NAME;
+  }
+  //set the right supplementary groups for the user.
+  if (initgroups(user_detail.pw_name, user_detail.pw_gid) != 0) {
+    fprintf(stderr, "Init groups call for the user : %s failed\n",
+        user_detail.pw_name);
+    return INITGROUPS_FAILED;
+  }
+  errno = 0;
+  //switch the group.
+  setgid(user_detail.pw_gid);
+  if (errno != 0) {
+    fprintf(stderr, "Setgid for the user : %s failed\n", user_detail.pw_name);
+    return SETUID_OPER_FAILED;
+  }
+  errno = 0;
+  //swith the user
+  setuid(user_detail.pw_uid);
+  if (errno != 0) {
+    fprintf(stderr, "Setuid for the user : %s failed\n", user_detail.pw_name);
+    return SETUID_OPER_FAILED;
+  }
+  errno = 0;
+  //set the effective user id.
+  seteuid(user_detail.pw_uid);
+  if (errno != 0) {
+    fprintf(stderr, "Seteuid for the user : %s failed\n", user_detail.pw_name);
+    return SETUID_OPER_FAILED;
+  }
+  return 0;
+}
+
+/*
+ * Top level method which processes a cluster management
+ * command.
+ */
+int process_cluster_command(char * user,  char * node , char *command) {
+  char *finalcommandstr;
+  int len;
+  int errorcode = 0;
+  if (strncmp(command, "", strlen(command)) == 0) {
+    fprintf(stderr, "Invalid command passed\n");
+    return INVALID_COMMAND_PASSED;
+  }
+  len = STRLEN + strlen(command);
+  finalcommandstr = (char *) malloc((len + 1) * sizeof(char));
+  snprintf(finalcommandstr, len, SCRIPT_DIR_PATTERN, HADOOP_HOME,
+      command);
+  finalcommandstr[len + 1] = '\0';
+  errorcode = switchuser(user);
+  if (errorcode != 0) {
+    fprintf(stderr, "switch user failed\n");
+    return errorcode;
+  }
+  errno = 0;
+  execlp(SSH_COMMAND, SSH_COMMAND, node, finalcommandstr, NULL);
+  if (errno != 0) {
+    fprintf(stderr, "Excelp failed dude to : %s\n", strerror(errno));
+  }
+  return 0;
+}
+
+/*
+ * Process cluster controller command the API exposed to the 
+ * main in order to execute the cluster commands.
+ */
+int process_controller_command(char *user, char * node, char *command) {
+  return process_cluster_command(user, node, command);
+}

Added: hadoop/common/trunk/src/test/system/c++/runAs/runAs.h.in
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/system/c%2B%2B/runAs/runAs.h.in?rev=944521&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/system/c++/runAs/runAs.h.in (added)
+++ hadoop/common/trunk/src/test/system/c++/runAs/runAs.h.in Fri May 14 23:56:34 2010
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <pwd.h>
+#include <assert.h>
+#include <getopt.h>
+#include <grp.h>
+
+/*
+* List of possible error codes.
+*/
+enum errorcodes {
+  INVALID_ARGUMENT_NUMER = 1,
+  INVALID_USER_NAME, //2
+  SUPER_USER_NOT_ALLOWED_TO_RUN_COMMANDS, //3
+  INITGROUPS_FAILED, //4
+  SETUID_OPER_FAILED, //5
+  INVALID_COMMAND_PASSED, //6
+};
+
+#undef HADOOP_HOME
+
+#define SSH_COMMAND "ssh"
+
+#define SCRIPT_DIR_PATTERN "%s/bin/hadoop-daemon.sh %s" //%s to be substituded 
+
+#define STRLEN strlen(SCRIPT_DIR_PATTERN) + strlen(HADOOP_HOME)
+
+/*
+ * Function to get the user details populated given a user name. 
+ */
+int getuserdetails(char *user, struct passwd *user_detail);
+
+ /*
+ * Process cluster controller command the API exposed to the 
+ * main in order to execute the cluster commands.
+ */
+int process_controller_command(char *user, char *node, char *command);

Added: hadoop/common/trunk/src/test/system/conf/hadoop-policy-system-test.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/system/conf/hadoop-policy-system-test.xml?rev=944521&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/system/conf/hadoop-policy-system-test.xml (added)
+++ hadoop/common/trunk/src/test/system/conf/hadoop-policy-system-test.xml Fri May 14 23:56:34 2010
@@ -0,0 +1,52 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+<!--
+  This is Herriot specific protocols. This section shouldn't be present in
+  a production cluster configuration. This file needs to be linked up to the
+  main conf/hadoop-policy.xml in the deployment process
+-->
+  <property>
+    <name>security.daemon.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DaemonProtocol, extended by all other
+    Herriot RPC protocols.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.nn.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NNProtocol, used by the
+    Herriot AbstractDaemonCluster's implementations to connect to a remote
+    NameNode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.dn.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DNProtocol, used by the
+    Herriot AbstractDaemonCluster's implementations to connect to a remote
+    DataNode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.tt.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TTProtocol, used by the
+    Herriot AbstractDaemonCluster's implementations to connect to a remote
+    TaskTracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+</configuration>

Added: hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonClient.java?rev=944521&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonClient.java (added)
+++ hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonClient.java Fri May 14 23:56:34 2010
@@ -0,0 +1,343 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.test.system;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.ConcurrentModificationException;
+import java.util.List;
+import org.junit.Assert;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.test.system.process.RemoteProcess;
+/**
+ * Abstract class which encapsulates the DaemonClient which is used in the 
+ * system tests.<br/>
+ * 
+ * @param PROXY the proxy implementation of a specific Daemon 
+ */
+public abstract class AbstractDaemonClient<PROXY extends DaemonProtocol> {
+  private Configuration conf;
+  private RemoteProcess process;
+  private boolean connected;
+
+  private static final Log LOG = LogFactory.getLog(AbstractDaemonClient.class);
+  
+  /**
+   * Create a Daemon client.<br/>
+   * 
+   * @param conf client to be used by proxy to connect to Daemon.
+   * @param process the Daemon process to manage the particular daemon.
+   * 
+   * @throws IOException
+   */
+  public AbstractDaemonClient(Configuration conf, RemoteProcess process) 
+      throws IOException {
+    this.conf = conf;
+    this.process = process;
+  }
+
+  /**
+   * Gets if the client is connected to the Daemon <br/>
+   * 
+   * @return true if connected.
+   */
+  public boolean isConnected() {
+    return connected;
+  }
+
+  protected void setConnected(boolean connected) {
+    this.connected = connected;
+  }
+
+  /**
+   * Create an RPC proxy to the daemon <br/>
+   * 
+   * @throws IOException
+   */
+  public abstract void connect() throws IOException;
+
+  /**
+   * Disconnect the underlying RPC proxy to the daemon.<br/>
+   * @throws IOException
+   */
+  public abstract void disconnect() throws IOException;
+
+  /**
+   * Get the proxy to connect to a particular service Daemon.<br/>
+   * 
+   * @return proxy to connect to a particular service Daemon.
+   */
+  protected abstract PROXY getProxy();
+
+  /**
+   * Gets the daemon level configuration.<br/>
+   * 
+   * @return configuration using which daemon is running
+   */
+  public Configuration getConf() {
+    return conf;
+  }
+
+  /**
+   * Gets the host on which Daemon is currently running. <br/>
+   * 
+   * @return hostname
+   */
+  public String getHostName() {
+    return process.getHostName();
+  }
+
+  /**
+   * Gets if the Daemon is ready to accept RPC connections. <br/>
+   * 
+   * @return true if daemon is ready.
+   * @throws IOException
+   */
+  public boolean isReady() throws IOException {
+    return getProxy().isReady();
+  }
+
+  /**
+   * Kills the Daemon process <br/>
+   * @throws IOException
+   */
+  public void kill() throws IOException {
+    process.kill();
+  }
+
+  /**
+   * Checks if the Daemon process is alive or not <br/>
+   * 
+   * @throws IOException
+   */
+  public void ping() throws IOException {
+    getProxy().ping();
+  }
+
+  /**
+   * Start up the Daemon process. <br/>
+   * @throws IOException
+   */
+  public void start() throws IOException {
+    process.start();
+  }
+
+  /**
+   * Get system level view of the Daemon process.
+   * 
+   * @return returns system level view of the Daemon process.
+   * 
+   * @throws IOException
+   */
+  public ProcessInfo getProcessInfo() throws IOException {
+    return getProxy().getProcessInfo();
+  }
+
+  /**
+   * Return a file status object that represents the path.
+   * @param path
+   *          given path
+   * @param local
+   *          whether the path is local or not
+   * @return a FileStatus object
+   * @throws java.io.FileNotFoundException when the path does not exist;
+   *         IOException see specific implementation
+   */
+  public FileStatus getFileStatus(String path, boolean local) throws IOException {
+    return getProxy().getFileStatus(path, local);
+  }
+
+  /**
+   * List the statuses of the files/directories in the given path if the path is
+   * a directory.
+   * 
+   * @param path
+   *          given path
+   * @param local
+   *          whether the path is local or not
+   * @return the statuses of the files/directories in the given patch
+   * @throws IOException
+   */
+  public FileStatus[] listStatus(String path, boolean local) 
+    throws IOException {
+    return getProxy().listStatus(path, local);
+  }
+
+  /**
+   * List the statuses of the files/directories in the given path if the path is
+   * a directory recursive/nonrecursively depending on parameters
+   * 
+   * @param path
+   *          given path
+   * @param local
+   *          whether the path is local or not
+   * @param recursive 
+   *          whether to recursively get the status
+   * @return the statuses of the files/directories in the given patch
+   * @throws IOException
+   */
+  public FileStatus[] listStatus(String path, boolean local, boolean recursive)
+    throws IOException {
+    List<FileStatus> status = new ArrayList<FileStatus>();
+    addStatus(status, path, local, recursive);
+    return status.toArray(new FileStatus[0]);
+  }
+
+  private void addStatus(List<FileStatus> status, String f, 
+      boolean local, boolean recursive) 
+    throws IOException {
+    FileStatus[] fs = listStatus(f, local);
+    if (fs != null) {
+      for (FileStatus fileStatus : fs) {
+        if (!f.equals(fileStatus.getPath().toString())) {
+          status.add(fileStatus);
+          if (recursive) {
+            addStatus(status, fileStatus.getPath().toString(), local, recursive);
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * Gets number of times FATAL log messages where logged in Daemon logs. 
+   * <br/>
+   * Pattern used for searching is FATAL. <br/>
+   * @param excludeExpList list of exception to exclude 
+   * @return number of occurrence of fatal message.
+   * @throws IOException
+   */
+  public int getNumberOfFatalStatementsInLog(String [] excludeExpList)
+      throws IOException {
+    DaemonProtocol proxy = getProxy();
+    String pattern = "FATAL";
+    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
+  }
+
+  /**
+   * Gets number of times ERROR log messages where logged in Daemon logs. 
+   * <br/>
+   * Pattern used for searching is ERROR. <br/>
+   * @param excludeExpList list of exception to exclude 
+   * @return number of occurrence of error message.
+   * @throws IOException
+   */
+  public int getNumberOfErrorStatementsInLog(String[] excludeExpList) 
+      throws IOException {
+    DaemonProtocol proxy = getProxy();
+    String pattern = "ERROR";    
+    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
+  }
+
+  /**
+   * Gets number of times Warning log messages where logged in Daemon logs. 
+   * <br/>
+   * Pattern used for searching is WARN. <br/>
+   * @param excludeExpList list of exception to exclude 
+   * @return number of occurrence of warning message.
+   * @throws IOException
+   */
+  public int getNumberOfWarnStatementsInLog(String[] excludeExpList) 
+      throws IOException {
+    DaemonProtocol proxy = getProxy();
+    String pattern = "WARN";
+    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
+  }
+
+  /**
+   * Gets number of time given Exception were present in log file. <br/>
+   * 
+   * @param e exception class.
+   * @param excludeExpList list of exceptions to exclude. 
+   * @return number of exceptions in log
+   * @throws IOException
+   */
+  public int getNumberOfExceptionsInLog(Exception e,
+      String[] excludeExpList) throws IOException {
+    DaemonProtocol proxy = getProxy();
+    String pattern = e.getClass().getSimpleName();    
+    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
+  }
+
+  /**
+   * Number of times ConcurrentModificationException present in log file. 
+   * <br/>
+   * @param excludeExpList list of exceptions to exclude.
+   * @return number of times exception in log file.
+   * @throws IOException
+   */
+  public int getNumberOfConcurrentModificationExceptionsInLog(
+      String[] excludeExpList) throws IOException {
+    return getNumberOfExceptionsInLog(new ConcurrentModificationException(),
+        excludeExpList);
+  }
+
+  private int errorCount;
+  private int fatalCount;
+  private int concurrentExceptionCount;
+
+  /**
+   * Populate the initial exception counts to be used to assert once a testcase
+   * is done there was no exception in the daemon when testcase was run.
+   * @param excludeExpList list of exceptions to exclude
+   * @throws IOException
+   */
+  protected void populateExceptionCount(String [] excludeExpList) 
+      throws IOException {
+    errorCount = getNumberOfErrorStatementsInLog(excludeExpList);
+    LOG.info("Number of error messages in logs : " + errorCount);
+    fatalCount = getNumberOfFatalStatementsInLog(excludeExpList);
+    LOG.info("Number of fatal statement in logs : " + fatalCount);
+    concurrentExceptionCount =
+        getNumberOfConcurrentModificationExceptionsInLog(excludeExpList);
+    LOG.info("Number of concurrent modification in logs : "
+        + concurrentExceptionCount);
+  }
+
+  /**
+   * Assert if the new exceptions were logged into the log file.
+   * <br/>
+   * <b><i>
+   * Pre-req for the method is that populateExceptionCount() has 
+   * to be called before calling this method.</b></i>
+   * @param excludeExpList list of exceptions to exclude
+   * @throws IOException
+   */
+  protected void assertNoExceptionsOccurred(String [] excludeExpList) 
+      throws IOException {
+    int newerrorCount = getNumberOfErrorStatementsInLog(excludeExpList);
+    LOG.info("Number of error messages while asserting :" + newerrorCount);
+    int newfatalCount = getNumberOfFatalStatementsInLog(excludeExpList);
+    LOG.info("Number of fatal messages while asserting : " + newfatalCount);
+    int newconcurrentExceptionCount =
+        getNumberOfConcurrentModificationExceptionsInLog(excludeExpList);
+    LOG.info("Number of concurrentmodification exception while asserting :"
+        + newconcurrentExceptionCount);
+    Assert.assertEquals(
+        "New Error Messages logged in the log file", errorCount, newerrorCount);
+    Assert.assertEquals(
+        "New Fatal messages logged in the log file", fatalCount, newfatalCount);
+    Assert.assertEquals(
+        "New ConcurrentModificationException in log file",
+        concurrentExceptionCount, newconcurrentExceptionCount);
+  }
+}

Added: hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonCluster.java?rev=944521&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonCluster.java (added)
+++ hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonCluster.java Fri May 14 23:56:34 2010
@@ -0,0 +1,293 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.test.system;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.test.system.process.ClusterProcessManager;
+import org.apache.hadoop.test.system.process.RemoteProcess;
+
+/**
+ * Abstract class which represent the cluster having multiple daemons.
+ */
+@SuppressWarnings("unchecked")
+public abstract class AbstractDaemonCluster {
+
+  private static final Log LOG = LogFactory.getLog(AbstractDaemonCluster.class);
+  private String [] excludeExpList ;
+  private Configuration conf;
+  protected ClusterProcessManager clusterManager;
+  private Map<Enum<?>, List<AbstractDaemonClient>> daemons = 
+    new LinkedHashMap<Enum<?>, List<AbstractDaemonClient>>();
+  
+  /**
+   * Constructor to create a cluster client.<br/>
+   * 
+   * @param conf
+   *          Configuration to be used while constructing the cluster.
+   * @param rcluster
+   *          process manger instance to be used for managing the daemons.
+   * 
+   * @throws IOException
+   */
+  public AbstractDaemonCluster(Configuration conf,
+      ClusterProcessManager rcluster) throws IOException {
+    this.conf = conf;
+    this.clusterManager = rcluster;
+    createAllClients();
+  }
+
+  /**
+   * The method returns the cluster manager. The system test cases require an
+   * instance of HadoopDaemonRemoteCluster to invoke certain operation on the
+   * daemon.
+   * 
+   * @return instance of clusterManager
+   */
+  public ClusterProcessManager getClusterManager() {
+    return clusterManager;
+  }
+
+  protected void createAllClients() throws IOException {
+    for (RemoteProcess p : clusterManager.getAllProcesses()) {
+      List<AbstractDaemonClient> dms = daemons.get(p.getRole());
+      if (dms == null) {
+        dms = new ArrayList<AbstractDaemonClient>();
+        daemons.put(p.getRole(), dms);
+      }
+      dms.add(createClient(p));
+    }
+  }
+  
+  /**
+   * Method to create the daemon client.<br/>
+   * 
+   * @param process
+   *          to manage the daemon.
+   * @return instance of the daemon client
+   * 
+   * @throws IOException
+   */
+  protected abstract AbstractDaemonClient<DaemonProtocol> 
+    createClient(RemoteProcess process) throws IOException;
+
+  /**
+   * Get the global cluster configuration which was used to create the 
+   * cluster. <br/>
+   * 
+   * @return global configuration of the cluster.
+   */
+  public Configuration getConf() {
+    return conf;
+  }
+
+  /**
+   *
+
+  /**
+   * Return the client handle of all the Daemons.<br/>
+   * 
+   * @return map of role to daemon clients' list.
+   */
+  public Map<Enum<?>, List<AbstractDaemonClient>> getDaemons() {
+    return daemons;
+  }
+
+  /**
+   * Checks if the cluster is ready for testing. <br/>
+   * Algorithm for checking is as follows : <br/>
+   * <ul>
+   * <li> Wait for Daemon to come up </li>
+   * <li> Check if daemon is ready </li>
+   * <li> If one of the daemon is not ready, return false </li>
+   * </ul> 
+   * 
+   * @return true if whole cluster is ready.
+   * 
+   * @throws IOException
+   */
+  public boolean isReady() throws IOException {
+    for (List<AbstractDaemonClient> set : daemons.values()) {
+      for (AbstractDaemonClient daemon : set) {
+        waitForDaemon(daemon);
+        if (!daemon.isReady()) {
+          return false;
+        }
+      }
+    }
+    return true;
+  }
+
+  protected void waitForDaemon(AbstractDaemonClient d) {
+    final int TEN_SEC = 10000;
+    while(true) {
+      try {
+        LOG.info("Waiting for daemon at " + d.getHostName() + " to come up.");
+        LOG.info("Daemon might not be " +
+            "ready or the call to setReady() method hasn't been " +
+            "injected to " + d.getClass() + " ");
+        d.connect();
+        break;
+      } catch (IOException e) {
+        try {
+          Thread.sleep(TEN_SEC);
+        } catch (InterruptedException ie) {
+        }
+      }
+    }
+  }
+
+  /**
+   * Starts the cluster daemons.
+   * @throws IOException
+   */
+  public void start() throws IOException {
+    clusterManager.start();
+  }
+
+  /**
+   * Stops the cluster daemons.
+   * @throws IOException
+   */
+  public void stop() throws IOException {
+    clusterManager.stop();
+  }
+
+  /**
+   * Connect to daemon RPC ports.
+   * @throws IOException
+   */
+  public void connect() throws IOException {
+    for (List<AbstractDaemonClient> set : daemons.values()) {
+      for (AbstractDaemonClient daemon : set) {
+        daemon.connect();
+      }
+    }
+  }
+
+  /**
+   * Disconnect to daemon RPC ports.
+   * @throws IOException
+   */
+  public void disconnect() throws IOException {
+    for (List<AbstractDaemonClient> set : daemons.values()) {
+      for (AbstractDaemonClient daemon : set) {
+        daemon.disconnect();
+      }
+    }
+  }
+
+  /**
+   * Ping all the daemons of the cluster.
+   * @throws IOException
+   */
+  public void ping() throws IOException {
+    for (List<AbstractDaemonClient> set : daemons.values()) {
+      for (AbstractDaemonClient daemon : set) {
+        LOG.info("Daemon is : " + daemon.getHostName() + " pinging....");
+        daemon.ping();
+      }
+    }
+  }
+
+  /**
+   * Connect to the cluster and ensure that it is clean to run tests.
+   * @throws Exception
+   */
+  public void setUp() throws Exception {
+    while (!isReady()) {
+      Thread.sleep(1000);
+    }
+    connect();
+    ping();
+    clearAllControlActions();
+    ensureClean();
+    populateExceptionCounts();
+  }
+  
+  /**
+   * This is mainly used for the test cases to set the list of exceptions
+   * that will be excluded.
+   * @param excludeExpList list of exceptions to exclude
+   */
+  public void setExcludeExpList(String [] excludeExpList)
+  {
+    this.excludeExpList = excludeExpList;
+  }
+  
+  public void clearAllControlActions() throws IOException {
+    for (List<AbstractDaemonClient> set : daemons.values()) {
+      for (AbstractDaemonClient daemon : set) {
+        LOG.info("Daemon is : " + daemon.getHostName() + " pinging....");
+        daemon.getProxy().clearActions();
+      }
+    }
+  }
+
+  /**
+   * Ensure that the cluster is clean to run tests.
+   * @throws IOException
+   */
+  public void ensureClean() throws IOException {
+  }
+
+  /**
+   * Ensure that cluster is clean. Disconnect from the RPC ports of the daemons.
+   * @throws IOException
+   */
+  public void tearDown() throws IOException {
+    ensureClean();
+    clearAllControlActions();
+    assertNoExceptionMessages();
+    disconnect();
+  }
+
+  /**
+   * Populate the exception counts in all the daemons so that it can be checked when 
+   * the testcase has finished running.<br/>
+   * @throws IOException
+   */
+  protected void populateExceptionCounts() throws IOException {
+    for(List<AbstractDaemonClient> lst : daemons.values()) {
+      for(AbstractDaemonClient d : lst) {
+        d.populateExceptionCount(excludeExpList);
+      }
+    }
+  }
+
+  /**
+   * Assert no exception has been thrown during the sequence of the actions.
+   * <br/>
+   * @throws IOException
+   */
+  protected void assertNoExceptionMessages() throws IOException {
+    for(List<AbstractDaemonClient> lst : daemons.values()) {
+      for(AbstractDaemonClient d : lst) {
+        d.assertNoExceptionsOccurred(excludeExpList);
+      }
+    }
+  }
+}
+

Added: hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/ControlAction.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/ControlAction.java?rev=944521&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/ControlAction.java (added)
+++ hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/ControlAction.java Fri May 14 23:56:34 2010
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.test.system;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.io.Writable;
+
+/**
+ * Class to represent a control action which can be performed on Daemon.<br/>
+ * 
+ */
+
+public abstract class ControlAction<T extends Writable> implements Writable {
+
+  private T target;
+
+  /**
+   * Default constructor of the Control Action, sets the Action type to zero. <br/>
+   */
+  public ControlAction() {
+  }
+
+  /**
+   * Constructor which sets the type of the Control action to a specific type. <br/>
+   * 
+   * @param target
+   *          of the control action.
+   */
+  public ControlAction(T target) {
+    this.target = target;
+  }
+
+  /**
+   * Gets the id of the control action <br/>
+   * 
+   * @return target of action
+   */
+  public T getTarget() {
+    return target;
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    target.readFields(in);
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    target.write(out);
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj instanceof ControlAction) {
+      ControlAction<T> other = (ControlAction<T>) obj;
+      return (this.target.equals(other.getTarget()));
+    } else {
+      return false;
+    }
+  }
+  
+  
+  @Override
+  public String toString() {
+    return "Action Target : " + this.target;
+  }
+}

Added: hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/DaemonProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/DaemonProtocol.java?rev=944521&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/DaemonProtocol.java (added)
+++ hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/DaemonProtocol.java Fri May 14 23:56:34 2010
@@ -0,0 +1,165 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.test.system;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.ipc.VersionedProtocol;
+
+/**
+ * RPC interface of a given Daemon.
+ */
+public interface DaemonProtocol extends VersionedProtocol{
+  long versionID = 1L;
+
+  /**
+   * Returns the Daemon configuration.
+   * @return Configuration
+   * @throws IOException in case of errors
+   */
+  Configuration getDaemonConf() throws IOException;
+
+  /**
+   * Check if the Daemon is alive.
+   * 
+   * @throws IOException
+   *           if Daemon is unreachable.
+   */
+  void ping() throws IOException;
+
+  /**
+   * Check if the Daemon is ready to accept RPC connections.
+   * 
+   * @return true if Daemon is ready to accept RPC connection.
+   * @throws IOException in case of errors
+   */
+  boolean isReady() throws IOException;
+
+  /**
+   * Get system level view of the Daemon process.
+   * 
+   * @return returns system level view of the Daemon process.
+   * 
+   * @throws IOException in case of errors
+   */
+  ProcessInfo getProcessInfo() throws IOException;
+  
+  /**
+   * Return a file status object that represents the path.
+   * @param path
+   *          given path
+   * @param local
+   *          whether the path is local or not
+   * @return a FileStatus object
+   * @throws FileNotFoundException when the path does not exist;
+   *         IOException see specific implementation
+   */
+  FileStatus getFileStatus(String path, boolean local) throws IOException;
+
+  /**
+   * List the statuses of the files/directories in the given path if the path is
+   * a directory.
+   * 
+   * @param path
+   *          given path
+   * @param local
+   *          whether the path is local or not
+   * @return the statuses of the files/directories in the given patch
+   * @throws IOException in case of errors
+   */
+  FileStatus[] listStatus(String path, boolean local) throws IOException;
+  
+  /**
+   * Enables a particular control action to be performed on the Daemon <br/>
+   * 
+   * @param action is a control action  to be enabled.
+   * 
+   * @throws IOException in case of errors
+   */
+  @SuppressWarnings("unchecked")
+  void sendAction(ControlAction action) throws IOException;
+  
+  /**
+   * Checks if the particular control action has be delivered to the Daemon 
+   * component <br/>
+   * 
+   * @param action to be checked.
+   * 
+   * @return true if action is still in waiting queue of 
+   *          actions to be delivered.
+   * @throws IOException in case of errors
+   */
+  @SuppressWarnings("unchecked")
+  boolean isActionPending(ControlAction action) throws IOException;
+  
+  /**
+   * Removes a particular control action from the list of the actions which the
+   * daemon maintains. <br/>
+   * <i><b>Not to be directly called by Test Case or clients.</b></i>
+   * @param action to be removed
+   * @throws IOException in case of errors
+   */
+  
+  @SuppressWarnings("unchecked")
+  void removeAction(ControlAction action) throws IOException;
+  
+  /**
+   * Clears out the list of control actions on the particular daemon.
+   * <br/>
+   * @throws IOException in case of errors
+   */
+  void clearActions() throws IOException;
+  
+  /**
+   * Gets a list of pending actions which are targeted on the specified key. 
+   * <br/>
+   * <i><b>Not to be directly used by clients</b></i>
+   * @param key target
+   * @return list of actions.
+   * @throws IOException in case of errors
+   */
+  @SuppressWarnings("unchecked")
+  ControlAction[] getActions(Writable key) throws IOException;
+
+  /**
+   * Gets the number of times a particular pattern has been found in the 
+   * daemons log file.<br/>
+   * <b><i>Please note that search spans across all previous messages of
+   * Daemon, so better practice is to get previous counts before an operation
+   * and then re-check if the sequence of action has caused any problems</i></b>
+   * @param pattern to look for in the damon's log file
+   * @param list of exceptions to ignore
+   * @return number of times the pattern if found in log file.
+   * @throws IOException in case of errors
+   */
+  int getNumberOfMatchesInLogFile(String pattern, String[] list) 
+      throws IOException;
+
+  /**
+   * Gets the user who started the particular daemon initially. <br/>
+   * 
+   * @return user who started the particular daemon.
+   * @throws IOException in case of errors
+   */
+  String getDaemonUser() throws IOException;
+}

Added: hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/ProcessInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/ProcessInfo.java?rev=944521&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/ProcessInfo.java (added)
+++ hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/ProcessInfo.java Fri May 14 23:56:34 2010
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.test.system;
+
+import java.util.Map;
+
+import org.apache.hadoop.io.Writable;
+
+/**
+ * Daemon system level process information.
+ */
+public interface ProcessInfo extends Writable {
+  /**
+   * Get the current time in the millisecond.<br/>
+   * 
+   * @return current time on daemon clock in millisecond.
+   */
+  public long currentTimeMillis();
+
+  /**
+   * Get the environment that was used to start the Daemon process.<br/>
+   * 
+   * @return the environment variable list.
+   */
+  public Map<String,String> getEnv();
+
+  /**
+   * Get the System properties of the Daemon process.<br/>
+   * 
+   * @return the properties list.
+   */
+  public Map<String,String> getSystemProperties();
+
+  /**
+   * Get the number of active threads in Daemon VM.<br/>
+   * 
+   * @return number of active threads in Daemon VM.
+   */
+  public int activeThreadCount();
+
+  /**
+   * Get the maximum heap size that is configured for the Daemon VM. <br/>
+   * 
+   * @return maximum heap size.
+   */
+  public long maxMemory();
+
+  /**
+   * Get the free memory in Daemon VM.<br/>
+   * 
+   * @return free memory.
+   */
+  public long freeMemory();
+
+  /**
+   * Get the total used memory in Demon VM. <br/>
+   * 
+   * @return total used memory.
+   */
+  public long totalMemory();
+}
\ No newline at end of file

Added: hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/ProcessInfoImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/ProcessInfoImpl.java?rev=944521&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/ProcessInfoImpl.java (added)
+++ hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/ProcessInfoImpl.java Fri May 14 23:56:34 2010
@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.test.system;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+
+public class ProcessInfoImpl implements ProcessInfo {
+
+  private int threadCount;
+  private long currentTime;
+  private long freemem;
+  private long maxmem;
+  private long totmem;
+  private Map<String, String> env;
+  private Map<String, String> props;
+
+  public ProcessInfoImpl() {
+    env = new HashMap<String, String>();
+    props = new HashMap<String, String>();
+  }
+
+  /**
+   * Construct a concrete process information object. <br/>
+   * 
+   * @param threadCount
+   *          count of threads.
+   * @param currentTime
+   * @param freemem
+   * @param maxmem
+   * @param totmem
+   * @param env environment list.
+   * @param props
+   */
+  public ProcessInfoImpl(int threadCount, long currentTime, long freemem,
+      long maxmem, long totmem, Map<String, String> env, 
+      Map<String, String> props) {
+    this.threadCount = threadCount;
+    this.currentTime = currentTime;
+    this.freemem = freemem;
+    this.maxmem = maxmem;
+    this.totmem = totmem;
+    this.env = env;
+    this.props = props;
+  }
+
+  @Override
+  public int activeThreadCount() {
+    return threadCount;
+  }
+
+  @Override
+  public long currentTimeMillis() {
+    return currentTime;
+  }
+
+  @Override
+  public long freeMemory() {
+    return freemem;
+  }
+
+  @Override
+  public Map<String, String> getEnv() {
+    return env;
+  }
+
+  @Override
+  public Map<String,String> getSystemProperties() {
+    return props;
+  }
+
+  @Override
+  public long maxMemory() {
+    return maxmem;
+  }
+
+  @Override
+  public long totalMemory() {
+    return totmem;
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    this.threadCount = in.readInt();
+    this.currentTime = in.readLong();
+    this.freemem = in.readLong();
+    this.maxmem = in.readLong();
+    this.totmem = in.readLong();
+    read(in, env);
+    read(in, props);
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    out.writeInt(threadCount);
+    out.writeLong(currentTime);
+    out.writeLong(freemem);
+    out.writeLong(maxmem);
+    out.writeLong(totmem);
+    write(out, env);
+    write(out, props);
+  }
+
+  private void read(DataInput in, Map<String, String> map) throws IOException {
+    int size = in.readInt();
+    for (int i = 0; i < size; i = i + 2) {
+      String key = in.readUTF();
+      String value = in.readUTF();
+      map.put(key, value);
+    }
+  }
+
+  private void write(DataOutput out, Map<String, String> map) 
+  throws IOException {
+    int size = (map.size() * 2);
+    out.writeInt(size);
+    for (Map.Entry<String, String> entry : map.entrySet()) {
+      out.writeUTF(entry.getKey());
+      out.writeUTF(entry.getValue());
+    }
+  }
+
+  @Override
+  public String toString() {
+    StringBuffer strBuf = new StringBuffer();
+    strBuf.append(String.format("active threads : %d\n", threadCount));
+    strBuf.append(String.format("current time  : %d\n", currentTime));
+    strBuf.append(String.format("free memory  : %d\n", freemem));
+    strBuf.append(String.format("total memory  : %d\n", totmem));
+    strBuf.append(String.format("max memory  : %d\n", maxmem));
+    strBuf.append("Environment Variables : \n");
+    for (Map.Entry<String, String> entry : env.entrySet()) {
+      strBuf.append(String.format("key : %s value : %s \n", entry.getKey(),
+          entry.getValue()));
+    }
+    return strBuf.toString();
+  }
+
+}

Added: hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/process/ClusterProcessManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/process/ClusterProcessManager.java?rev=944521&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/process/ClusterProcessManager.java (added)
+++ hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/process/ClusterProcessManager.java Fri May 14 23:56:34 2010
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.test.system.process;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Interface to manage the remote processes in the cluster.
+ */
+public interface ClusterProcessManager {
+
+  /**
+   * Initialization method to pass the configuration object which is required 
+   * by the ClusterProcessManager to manage the cluster.<br/>
+   * Configuration object should typically contain all the parameters which are 
+   * required by the implementations.<br/>
+   *  
+   * @param conf configuration containing values of the specific keys which 
+   * are required by the implementation of the cluster process manger.
+   * 
+   * @throws IOException when initialization fails.
+   */
+  void init(Configuration conf) throws IOException;
+
+  /**
+   * Get the list of RemoteProcess handles of all the remote processes.
+   */
+  List<RemoteProcess> getAllProcesses();
+
+  /**
+   * Get all the roles this cluster's daemon processes have.
+   */
+  Set<Enum<?>> getRoles();
+
+  /**
+   * Method to start all the remote daemons.<br/>
+   * 
+   * @throws IOException if startup procedure fails.
+   */
+  void start() throws IOException;
+
+  /**
+   * Starts the daemon from the user specified conf dir.
+   * @param newConfLocation the dir where the new conf files reside.
+   * @throws IOException
+   */
+  void start(String newConfLocation) throws IOException;
+
+  /**
+   * Stops the daemon running from user specified conf dir.
+   * 
+   * @param newConfLocation
+   *          the dir where ther new conf files reside.
+   * @throws IOException
+   */
+  void stop(String newConfLocation) throws IOException;
+
+  /**
+   * Method to shutdown all the remote daemons.<br/>
+   * 
+   * @throws IOException if shutdown procedure fails.
+   */
+  void stop() throws IOException;
+  
+  /**
+   * Gets if multi-user support is enabled for this cluster. 
+   * <br/>
+   * @return true if multi-user support is enabled.
+   * @throws IOException
+   */
+  boolean isMultiUserSupported() throws IOException;
+
+  /**
+   * The pushConfig is used to push a new config to the daemons.
+   * @param localDir
+   * @return is the remoteDir location where config will be pushed
+   * @throws IOException
+   */
+  String pushConfig(String localDir) throws IOException;
+}

Added: hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/process/HadoopDaemonRemoteCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/process/HadoopDaemonRemoteCluster.java?rev=944521&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/process/HadoopDaemonRemoteCluster.java (added)
+++ hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/process/HadoopDaemonRemoteCluster.java Fri May 14 23:56:34 2010
@@ -0,0 +1,404 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.test.system.process;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+
+/**
+ * The concrete class which implements the start up and shut down based routines
+ * based on the hadoop-daemon.sh. <br/>
+ * 
+ * Class requires two keys to be present in the Configuration objects passed to
+ * it. Look at <code>CONF_HADOOPHOME</code> and
+ * <code>CONF_HADOOPCONFDIR</code> for the names of the
+ * configuration keys.
+ * 
+ * Following will be the format which the final command execution would look : 
+ * <br/>
+ * <code>
+ *  ssh host 'hadoop-home/bin/hadoop-daemon.sh --script scriptName 
+ *  --config HADOOP_CONF_DIR (start|stop) command'
+ * </code>
+ */
+public abstract class HadoopDaemonRemoteCluster 
+    implements ClusterProcessManager {
+
+  private static final Log LOG = LogFactory
+      .getLog(HadoopDaemonRemoteCluster.class.getName());
+
+  public static final String CONF_HADOOPNEWCONFDIR =
+    "test.system.hdrc.hadoopnewconfdir";
+  /**
+   * Key used to configure the HADOOP_HOME to be used by the
+   * HadoopDaemonRemoteCluster.
+   */
+  public final static String CONF_HADOOPHOME =
+    "test.system.hdrc.hadoophome";
+
+  public final static String CONF_SCRIPTDIR =
+    "test.system.hdrc.deployed.scripts.dir";
+  /**
+   * Key used to configure the HADOOP_CONF_DIR to be used by the
+   * HadoopDaemonRemoteCluster.
+   */
+  public final static String CONF_HADOOPCONFDIR = 
+    "test.system.hdrc.hadoopconfdir";
+
+  public final static String CONF_DEPLOYED_HADOOPCONFDIR =
+    "test.system.hdrc.deployed.hadoopconfdir";
+
+  private String hadoopHome;
+  protected String hadoopConfDir;
+  protected String scriptsDir;
+  protected String hadoopNewConfDir;
+  private final Set<Enum<?>> roles;
+  private final List<HadoopDaemonInfo> daemonInfos;
+  private List<RemoteProcess> processes;
+  protected Configuration conf;
+  
+  public static class HadoopDaemonInfo {
+    public final String cmd;
+    public final Enum<?> role;
+    public final List<String> hostNames;
+    public HadoopDaemonInfo(String cmd, Enum<?> role, List<String> hostNames) {
+      super();
+      this.cmd = cmd;
+      this.role = role;
+      this.hostNames = hostNames;
+    }
+
+    public HadoopDaemonInfo(String cmd, Enum<?> role, String hostFile) 
+        throws IOException {
+      super();
+      this.cmd = cmd;
+      this.role = role;
+      File file = new File(getDeployedHadoopConfDir(), hostFile);
+      BufferedReader reader = null;
+      hostNames = new ArrayList<String>();
+      try {
+        reader = new BufferedReader(new FileReader(file));
+        String host = null;
+        while ((host = reader.readLine()) != null) {
+          if (host.trim().isEmpty() || host.startsWith("#")) {
+            // Skip empty and possible comment lines
+            // throw new IllegalArgumentException(
+            // "Hostname could not be found in file " + hostFile);
+            continue;
+          }
+          hostNames.add(host.trim());
+        }
+        if (hostNames.size() < 1) {
+          throw new IllegalArgumentException("At least one hostname "
+              +
+            "is required to be present in file - " + hostFile);
+        }
+      } finally {
+        try {
+          reader.close();
+        } catch (IOException e) {
+          LOG.warn("Could not close reader");
+        }
+      }
+      LOG.info("Created HadoopDaemonInfo for " + cmd + " " + role + " from " 
+          + hostFile);
+    }
+  }
+
+  @Override
+  public String pushConfig(String localDir) throws IOException {
+    for (RemoteProcess process : processes){
+      process.pushConfig(localDir);
+    }
+    return hadoopNewConfDir;
+  }
+
+  public HadoopDaemonRemoteCluster(List<HadoopDaemonInfo> daemonInfos) {
+    this.daemonInfos = daemonInfos;
+    this.roles = new HashSet<Enum<?>>();
+    for (HadoopDaemonInfo info : daemonInfos) {
+      this.roles.add(info.role);
+    }
+  }
+
+  @Override
+  public void init(Configuration conf) throws IOException {
+    this.conf = conf;
+    populateDirectories(conf);
+    this.processes = new ArrayList<RemoteProcess>();
+    populateDaemons();
+  }
+
+  @Override
+  public List<RemoteProcess> getAllProcesses() {
+    return processes;
+  }
+
+  @Override
+  public Set<Enum<?>> getRoles() {
+    return roles;
+  }
+
+  /**
+   * Method to populate the hadoop home and hadoop configuration directories.
+   * 
+   * @param conf
+   *          Configuration object containing values for
+   *          CONF_HADOOPHOME and
+   *          CONF_HADOOPCONFDIR
+   * 
+   * @throws IllegalArgumentException
+   *           if the configuration or system property set does not contain
+   *           values for the required keys.
+   */
+  protected void populateDirectories(Configuration conf) {
+    hadoopHome = conf.get(CONF_HADOOPHOME);
+    hadoopConfDir = conf.get(CONF_HADOOPCONFDIR);
+    scriptsDir = conf.get(CONF_SCRIPTDIR);
+    hadoopNewConfDir = conf.get(CONF_HADOOPNEWCONFDIR);
+    if (hadoopHome == null || hadoopConfDir == null || hadoopHome.isEmpty()
+        || hadoopConfDir.isEmpty()) {
+      LOG.error("No configuration "
+          + "for the HADOOP_HOME and HADOOP_CONF_DIR passed");
+      throw new IllegalArgumentException(
+          "No Configuration passed for hadoop home " +
+          "and hadoop conf directories");
+    }
+  }
+
+  public static String getDeployedHadoopConfDir() {
+    String dir = System.getProperty(CONF_DEPLOYED_HADOOPCONFDIR);
+    if (dir == null || dir.isEmpty()) {
+      LOG.error("No configuration "
+          + "for the CONF_DEPLOYED_HADOOPCONFDIR passed");
+      throw new IllegalArgumentException(
+          "No Configuration passed for hadoop deployed conf directory");
+    }
+    return dir;
+  }
+
+  @Override
+  public void start() throws IOException {
+    for (RemoteProcess process : processes) {
+      process.start();
+    }
+  }
+
+  @Override
+  public void start(String newConfLocation)throws IOException {
+    for (RemoteProcess process : processes) {
+      process.start(newConfLocation);
+    }
+  }
+
+  @Override
+  public void stop() throws IOException {
+    for (RemoteProcess process : processes) {
+      process.kill();
+    }
+  }
+
+  @Override
+  public void stop(String newConfLocation) throws IOException {
+    for (RemoteProcess process : processes) {
+      process.kill(newConfLocation);
+    }
+  }
+
+  protected void populateDaemon(HadoopDaemonInfo info) throws IOException {
+    for (String host : info.hostNames) {
+      InetAddress addr = InetAddress.getByName(host);
+      RemoteProcess process = getProcessManager(info, 
+          addr.getCanonicalHostName());
+      processes.add(process);
+    }
+  }
+
+  protected void populateDaemons() throws IOException {
+   for (HadoopDaemonInfo info : daemonInfos) {
+     populateDaemon(info);
+   }
+  }
+
+  @Override
+  public boolean isMultiUserSupported() throws IOException {
+    return false;
+  }
+
+  protected RemoteProcess getProcessManager(
+      HadoopDaemonInfo info, String hostName) {
+    RemoteProcess process = new ScriptDaemon(info.cmd, hostName, info.role);
+    return process;
+  }
+
+  /**
+   * The core daemon class which actually implements the remote process
+   * management of actual daemon processes in the cluster.
+   * 
+   */
+  class ScriptDaemon implements RemoteProcess {
+
+    private static final String STOP_COMMAND = "stop";
+    private static final String START_COMMAND = "start";
+    private static final String SCRIPT_NAME = "hadoop-daemon.sh";
+    private static final String PUSH_CONFIG ="pushConfig.sh";
+    protected final String daemonName;
+    protected final String hostName;
+    private final Enum<?> role;
+
+    public ScriptDaemon(String daemonName, String hostName, Enum<?> role) {
+      this.daemonName = daemonName;
+      this.hostName = hostName;
+      this.role = role;
+    }
+
+    @Override
+    public String getHostName() {
+      return hostName;
+    }
+
+    private String[] getPushConfigCommand(String localDir, String remoteDir,
+        File scriptDir) throws IOException{
+      ArrayList<String> cmdArgs = new ArrayList<String>();
+      cmdArgs.add(scriptDir.getAbsolutePath() + File.separator + PUSH_CONFIG);
+      cmdArgs.add(localDir);
+      cmdArgs.add(hostName);
+      cmdArgs.add(remoteDir);
+      cmdArgs.add(hadoopConfDir);
+      return (String[]) cmdArgs.toArray(new String[cmdArgs.size()]);
+    }
+
+    private ShellCommandExecutor buildPushConfig(String local, String remote )
+        throws IOException {
+      File scriptDir = new File(scriptsDir);
+      String[] commandArgs = getPushConfigCommand(local, remote, scriptDir);
+      HashMap<String, String> env = new HashMap<String, String>();
+      ShellCommandExecutor executor = new ShellCommandExecutor(commandArgs,
+          scriptDir, env);
+      LOG.info(executor.toString());
+      return executor;
+    }
+
+    private ShellCommandExecutor createNewConfDir() throws IOException {
+      ArrayList<String> cmdArgs = new ArrayList<String>();
+      cmdArgs.add("ssh");
+      cmdArgs.add(hostName);
+      cmdArgs.add("if [ -d "+ hadoopNewConfDir+
+          " ];\n then echo Will remove existing directory;  rm -rf "+
+          hadoopNewConfDir+";\nmkdir "+ hadoopNewConfDir+"; else \n"+
+          "echo " + hadoopNewConfDir + " doesnt exist hence creating" +
+          ";  mkdir " + hadoopNewConfDir + ";\n  fi");
+      String[] cmd = (String[]) cmdArgs.toArray(new String[cmdArgs.size()]);
+      ShellCommandExecutor executor = new ShellCommandExecutor(cmd);
+      LOG.info(executor.toString());
+      return executor;
+    }
+
+    @Override
+    public void pushConfig(String localDir) throws IOException {
+      createNewConfDir().execute();
+      buildPushConfig(localDir, hadoopNewConfDir).execute();
+    }
+
+    private ShellCommandExecutor buildCommandExecutor(String command,
+        String confDir) {
+      String[] commandArgs = getCommand(command, confDir);
+      File cwd = new File(".");
+      HashMap<String, String> env = new HashMap<String, String>();
+      env.put("HADOOP_CONF_DIR", confDir);
+      ShellCommandExecutor executor
+        = new ShellCommandExecutor(commandArgs, cwd, env);
+      LOG.info(executor.toString());
+      return executor;
+    }
+
+    private File getBinDir() {
+      File binDir = new File(hadoopHome, "bin");
+      return binDir;
+    }
+
+    protected String[] getCommand(String command, String confDir) {
+      ArrayList<String> cmdArgs = new ArrayList<String>();
+      File binDir = getBinDir();
+      cmdArgs.add("ssh");
+      cmdArgs.add(hostName);
+      cmdArgs.add(binDir.getAbsolutePath() + File.separator + SCRIPT_NAME);
+      cmdArgs.add("--config");
+      cmdArgs.add(confDir);
+      // XXX Twenty internal version does not support --script option.
+      cmdArgs.add(command);
+      cmdArgs.add(daemonName);
+      return (String[]) cmdArgs.toArray(new String[cmdArgs.size()]);
+    }
+
+    @Override
+    public void kill() throws IOException {
+      kill(hadoopConfDir);
+    }
+
+    @Override
+    public void start() throws IOException {
+      start(hadoopConfDir);
+    }
+
+    public void start(String newConfLocation) throws IOException {
+      ShellCommandExecutor cme = buildCommandExecutor(START_COMMAND,
+          newConfLocation);
+      cme.execute();
+      String output = cme.getOutput();
+      if (!output.isEmpty()) { //getOutput() never returns null value
+        if (output.toLowerCase().contains("error")) {
+          LOG.warn("Error is detected.");
+          throw new IOException("Start error\n" + output);
+        }
+      }
+    }
+
+    public void kill(String newConfLocation) throws IOException {
+      ShellCommandExecutor cme
+        = buildCommandExecutor(STOP_COMMAND, newConfLocation);
+      cme.execute();
+      String output = cme.getOutput();
+      if (!output.isEmpty()) { //getOutput() never returns null value
+        if (output.toLowerCase().contains("error")) {
+          LOG.info("Error is detected.");
+          throw new IOException("Kill error\n" + output);
+        }
+      }
+    }
+
+    @Override
+    public Enum<?> getRole() {
+      return role;
+    }
+  }
+}

Added: hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/process/MultiUserHadoopDaemonRemoteCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/process/MultiUserHadoopDaemonRemoteCluster.java?rev=944521&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/process/MultiUserHadoopDaemonRemoteCluster.java (added)
+++ hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/process/MultiUserHadoopDaemonRemoteCluster.java Fri May 14 23:56:34 2010
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package org.apache.hadoop.test.system.process;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster.HadoopDaemonInfo;
+
+public abstract class MultiUserHadoopDaemonRemoteCluster
+    extends HadoopDaemonRemoteCluster {
+
+  public MultiUserHadoopDaemonRemoteCluster(List<HadoopDaemonInfo> daemonInfos) {
+    super(daemonInfos);
+  }
+
+  @Override
+  protected RemoteProcess getProcessManager(
+      HadoopDaemonInfo info, String hostName) {
+    return new MultiUserScriptDaemon(info.cmd, hostName, info.role);
+  }
+
+  @Override
+  public boolean isMultiUserSupported() throws IOException {
+    return true;
+  }
+
+  class MultiUserScriptDaemon extends ScriptDaemon {
+
+    private static final String MULTI_USER_BINARY_PATH_KEY =
+        "test.system.hdrc.multi-user.binary.path";
+    private static final String MULTI_USER_MANAGING_USER =
+        "test.system.hdrc.multi-user.managinguser.";
+    private String binaryPath;
+    /**
+     * Manging user for a particular daemon is gotten by
+     * MULTI_USER_MANAGING_USER + daemonname
+     */
+    private String mangingUser;
+
+    public MultiUserScriptDaemon(
+        String daemonName, String hostName, Enum<?> role) {
+      super(daemonName, hostName, role);
+      initialize(daemonName);
+    }
+
+    private void initialize(String daemonName) {
+      binaryPath = conf.get(MULTI_USER_BINARY_PATH_KEY);
+      if (binaryPath == null || binaryPath.trim().isEmpty()) {
+        throw new IllegalArgumentException(
+            "Binary path for multi-user path is not present. Please set "
+                + MULTI_USER_BINARY_PATH_KEY + " correctly");
+      }
+      File binaryFile = new File(binaryPath);
+      if (!binaryFile.exists() || !binaryFile.canExecute()) {
+        throw new IllegalArgumentException(
+            "Binary file path is not configured correctly. Please set "
+                + MULTI_USER_BINARY_PATH_KEY
+                + " to properly configured binary file.");
+      }
+      mangingUser = conf.get(MULTI_USER_MANAGING_USER + daemonName);
+      if (mangingUser == null || mangingUser.trim().isEmpty()) {
+        throw new IllegalArgumentException(
+            "Manging user for daemon not present please set : "
+                + MULTI_USER_MANAGING_USER + daemonName + " to correct value.");
+      }
+    }
+
+    @Override
+    protected String[] getCommand(String command,String confDir) {
+      ArrayList<String> commandList = new ArrayList<String>();
+      commandList.add(binaryPath);
+      commandList.add(mangingUser);
+      commandList.add(hostName);
+      commandList.add("--config "
+          + confDir + " " + command + " " + daemonName);
+      return (String[]) commandList.toArray(new String[commandList.size()]);
+    }
+  }
+}

Added: hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/process/RemoteProcess.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/process/RemoteProcess.java?rev=944521&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/process/RemoteProcess.java (added)
+++ hadoop/common/trunk/src/test/system/java/org/apache/hadoop/test/system/process/RemoteProcess.java Fri May 14 23:56:34 2010
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.test.system.process;
+
+import java.io.IOException;
+
+/**
+ * Interface to manage the remote process.
+ */
+public interface RemoteProcess {
+  /**
+   * Get the host on which the daemon process is running/stopped.<br/>
+   * 
+   * @return hostname on which process is running/stopped.
+   */
+  String getHostName();
+
+  /**
+   * Start a given daemon process.<br/>
+   * 
+   * @throws IOException if startup fails.
+   */
+  void start() throws IOException;
+  /**
+   * Starts a daemon from user specified conf dir. 
+   * @param newConfLocation is dir where new conf resides. 
+   * @throws IOException
+   */
+  void start(String newConfLocation) throws IOException;
+  /**
+   * Stop a given daemon process.<br/>
+   * 
+   * @throws IOException if shutdown fails.
+   */
+  void kill() throws IOException;
+  
+  /**
+   * Stops a given daemon running from user specified 
+   * conf dir. </br>
+   * @throws IOException
+   * @param newConfLocation dir location where new conf resides. 
+   */
+   void kill(String newConfLocation) throws IOException;
+  /**
+   * Get the role of the Daemon in the cluster.
+   * 
+   * @return Enum
+   */
+  Enum<?> getRole();
+  
+  /**
+   * Pushed the configuration to new configuration directory 
+   * @param localDir
+   * @throws IOException
+   */
+  void pushConfig(String localDir) throws IOException;
+}

Added: hadoop/common/trunk/src/test/system/scripts/pushConfig.sh
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/system/scripts/pushConfig.sh?rev=944521&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/system/scripts/pushConfig.sh (added)
+++ hadoop/common/trunk/src/test/system/scripts/pushConfig.sh Fri May 14 23:56:34 2010
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# local folder with new configuration file
+LOCAL_DIR=$1
+# remote daemon host
+HOST=$2
+#remote dir points to the location of new config files
+REMOTE_DIR=$3
+# remote daemon HADOOP_CONF_DIR location
+DAEMON_HADOOP_CONF_DIR=$4
+
+if [ $# -ne 4 ]; then
+  echo "Wrong number of parameters" >&2
+  exit 2
+fi
+
+ret_value=0
+
+echo The script makes a remote copy of existing ${DAEMON_HADOOP_CONF_DIR} to ${REMOTE_DIR}
+echo and populates it with new configs prepared in $LOCAL_DIR
+
+ssh ${HOST} cp -r ${DAEMON_HADOOP_CONF_DIR}/* ${REMOTE_DIR}
+ret_value=$?
+
+# make sure files are writeble
+ssh ${HOST} chmod u+w ${REMOTE_DIR}/*
+
+# copy new files over
+scp -r ${LOCAL_DIR}/* ${HOST}:${REMOTE_DIR}
+
+err_code=`echo $? + $ret_value | bc`
+echo Copying of files from local to remote returned ${err_code}
+



Mime
View raw message