trafodion-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sure...@apache.org
Subject [2/3] incubator-trafodion git commit: TRAFODION-1451: Vanilla hadoop support for installer
Date Tue, 22 Sep 2015 03:20:52 GMT
TRAFODION-1451: Vanilla hadoop support for installer


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/fad0c54e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/fad0c54e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/fad0c54e

Branch: refs/heads/master
Commit: fad0c54ea5defde1d45b8b8fff303f354542c782
Parents: eeb0189
Author: Eason <hfutmkby@gmail.com>
Authored: Tue Sep 15 00:09:07 2015 +0800
Committer: Eason <hfutmkby@gmail.com>
Committed: Tue Sep 15 00:09:07 2015 +0800

----------------------------------------------------------------------
 core/sqf/sql/scripts/install_apache_hadoop      | 1807 ++++++++++++++++++
 .../installer/traf_apache_hadoop_config_setup   |  842 ++++++++
 .../installer/trafodion_apache_hadoop_install   |  771 ++++++++
 3 files changed, 3420 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/fad0c54e/core/sqf/sql/scripts/install_apache_hadoop
----------------------------------------------------------------------
diff --git a/core/sqf/sql/scripts/install_apache_hadoop b/core/sqf/sql/scripts/install_apache_hadoop
new file mode 100755
index 0000000..8d766ea
--- /dev/null
+++ b/core/sqf/sql/scripts/install_apache_hadoop
@@ -0,0 +1,1807 @@
+#!/bin/sh
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+#
+##############################################################################
+##
+## This sets up a Hadoop/Hive/HBase environment to be used with a trafodion
+## workstation development environment. This script is meant for developers
+## and has the following characteristics:
+##
+## - Creates a pseudo-distributed single node installation
+## - Sandboxes the installation into a single directory,
+##   $MY_SQROOT/sqf/sql/local_hadoop
+## - creates some convenience shell scripts in $MY_SQROOT/sqf/sql/scripts
+##   to start and stop Hadoop and for some interactive shells
+## - Does not require sudo privileges for installing or running Hadoop
+## - Can run on non-standard ports, if needed, to be able to run multiple
+##   Hadoop/Hive/HBase instances on the same machine
+## - Uses a file system directory to store HBase data, not HDFS
+## - Uses MySQL as the Hive metastore
+## - Creates a TPC-DS sample database in Hive
+##
+##############################################################################
+# Environment variables - optional
+#
+# MY_LOCAL_SW_DIST - shared location on local network for tar balls for Hadoop, etc.
+# - Hadoop
+# - HBase
+# - Hive
+# - MySQL (used as Hive metastore DB)
+# - MySQL connector for Java (used by Hive to access metastore)
+# - TPC-DS from tpc.org (for Hive sample DB)
+# also set http_proxy and ftp_proxy if necessary, to download
+# files from repositories on the Internet
+#
+# DCS to use. This list is in order of precedent.
+# DCS_TAR - Optionally specify a local tar file to use
+# DCS_URL - Optionally specify a URL to download
+# DCS_SRC - Optionally specify a local source tree to use
+# If none specified, download latest code from github
+#
+# Trafodion REST to use. This list is in order of precedent.
+# REST_TAR - Optionally specify a local tar file to use
+# REST_URL - Optionally specify a URL to download
+# REST_SRC - Optionally specify a local source tree to use
+# If none specified, download latest code from github
+#
+# phoenix_test
+# PHX_SRC - Optionally specify a local source tree to use
+#  otherwise, download latest code from github
+##############################################################################
+
+function usage {
+
+cat <<EOF
+
+Usage:
+
+$MY_CMD [ -p {<start port num> | rand | fromDisplay} ]
+                     [ -y ]
+                     [ -n ]
+                     [ -v ]
+
+  -p configures non-standard ports, and is one of:
+    -p <start port num>     custom cases
+    -p rand                 for shared systems, use a random start port number
+                            between 9000 and 49000 that is divisible by 200
+    -p fromDisplay          if you are running on a VNC session
+
+  -y answers interactive questions implicitly with yes
+
+  -n takes no action, useful with -v
+
+  -v lists the port values used
+
+  See script header for use of optional environment variables.
+
+EOF
+}
+
+function check_ssh {
+  SSH_FAILED=no
+  echo
+  echo "Check ssh ${MY_HOST_1} access without a password..."
+  # disable any options that would query the user terminal, ask for strict host key checking
+  # to force it to fail if something is not quite right with the host key
+  ssh -o PasswordAuthentication=no -o KbdInteractiveDevices=none -o StrictHostKeyChecking=yes ${MY_HOST_1} echo "testing ssh ${MY_HOST_1}"
+  if [ $? -ne 0 ]; then
+    # if we deal with multiple systems, don't wipe out .ssh directory
+    # try some simpler measures and give up if those don't work
+    echo "Problems encountered with ssh, trying to fix some common issues..."
+    if [ -d ~/.ssh ]; then
+      grep -q NoHostAuthenticationForLocalhost ~/.ssh/config
+      if [ $? -ne 0 ]; then
+        # configure ssh to omit host check for localhost
+        grep -qi ^host ~/.ssh/config
+        if [ $? -eq 0 ]; then
+          # make sure this applies to all hosts, even if
+          # there are host directives in the file
+          echo "host *"  >>~/.ssh/config
+        fi
+        echo "NoHostAuthenticationForLocalhost=yes" >>~/.ssh/config
+        chmod go-w ~/.ssh/config
+      fi
+
+      if [ ${MY_HOST_1} != "localhost" ]; then
+        # remove existing entries for the host from known_hosts
+        if [ -f ~/.ssh/known_hosts ]; then
+          ed ~/.ssh/known_hosts <<EOF
+g/^${MY_HOST_1}/d
+w
+EOF
+        fi
+        # add an entry for our host to known_hosts
+        echo "${MY_HOST_1}" `cat /etc/ssh/ssh_host_rsa_key.pub` >>~/.ssh/known_hosts
+        chmod 644 ~/.ssh/known_hosts
+      fi
+
+      # check whether doing ssh-add will fix the problem
+      ps -aef | grep ${USER} | grep ssh-agent | grep -v grep >/dev/null
+      if [ $? -eq 0 ]; then
+        # ssh-agent is running, tell it to use the new key
+        ssh-add
+      fi
+
+      # now try once more (with regular host key checking)
+      ssh -o PasswordAuthentication=no -o KbdInteractiveDevices=none ${MY_HOST_1} echo "testing ssh ${MY_HOST_1}"
+      if [ $? -ne 0 ]; then
+        SSH_FAILED=yes
+      fi
+    else
+      SSH_FAILED=yes
+    fi
+  fi
+
+  if [ $SSH_FAILED = yes ]; then
+    # A few initial steps that are required:
+    cat <<EOF
+Please make sure you can do ssh ${MY_HOST_1} without having to enter a password
+(this is a one-time setup):
+
+cd
+rm -rf .ssh
+ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
+chmod 600 ~/.ssh/id_rsa
+cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
+chmod 644 ~/.ssh/authorized_keys
+ssh-add
+
+EOF
+
+    if [ -z "$MY_IMPLICIT_Y" ]; then
+      echo "Would you like to configure ssh to ${MY_HOST_1}? This will wipe out your existing"
+      echo "~/.ssh directory and you will lose existing private key files."
+      echo " "
+      echo -n "Enter y/n (n): "
+
+      read YN
+    else
+      # user already enabled on command line
+      YN=$MY_IMPLICIT_Y
+    fi
+
+    if [ "$YN" = "y" -o "$YN" = "Y" ]; then
+      echo "Setting up public/private key pair for connection to ${MY_HOST_1}..."
+      echo "Saving the old ~/.ssh directory to ~/.ssh-renamed, in case this is not what you wanted"
+      cd
+      rm -rf .ssh-renamed
+      mv -f .ssh .ssh-renamed
+      # generate an RSA public/private key pair without a passphrase
+      ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
+
+      # copy public key to authorized_keys and set permissions correctly
+      chmod 600 ~/.ssh/id_rsa
+      cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
+      chmod 644 ~/.ssh/authorized_keys
+
+      # avoid the interactive question to authenticate localhost
+      # as a valid host (this might cause problems when running
+      # this script in batch mode)
+      echo "${MY_HOST_1}" `cat /etc/ssh/ssh_host_rsa_key.pub` >~/.ssh/known_hosts
+      chmod 644 ~/.ssh/known_hosts
+
+      # disable checks for "localhost", so we can use this on multiple machines
+      echo "NoHostAuthenticationForLocalhost=yes" >>~/.ssh/config
+      chmod 644 ~/.ssh/config
+
+      ps -aef | grep ${USER} | grep ssh-agent | grep -v grep >/dev/null
+      if [ $? -eq 0 ]; then
+        # ssh-agent is running, tell it to use the new key
+        ssh-add
+      fi
+
+      # now try once more (with regular host key checking)
+      ssh -o PasswordAuthentication=no -o KbdInteractiveDevices=none ${MY_HOST_1} echo "testing ssh ${MY_HOST_1}"
+      if [ $? -ne 0 ]; then
+        SSH_FAILED=yes
+      fi
+    else
+      echo "================================================="
+      echo "exiting, set up ssh access on your own,"
+      echo "remove ${MY_SW_ROOT} and rerun this"
+      echo "installation script, $0"
+      echo "================================================="
+      exit 1
+    fi
+  else
+    echo "Check ssh ${MY_HOST_1} access without a password succeeded."
+  fi
+  # end of ssh checking
+}
+
+function listports {
+  # Report values selected for ports
+  VARS_FOR_PORTS="
+	MY_DCS_MASTER_INFO_PORT
+	MY_DCS_MASTER_PORT
+	MY_DCS_SERVER_INFO_PORT
+	MY_HADOOP_DN_HTTP_PORT_NUM
+	MY_HADOOP_DN_IPC_PORT_NUM
+	MY_HADOOP_DN_PORT_NUM
+	MY_HADOOP_HDFS_PORT_NUM
+	MY_HADOOP_JOB_TRACKER_HTTP_PORT_NUM
+	MY_HADOOP_NN_HTTP_PORT_NUM
+	MY_HADOOP_SECONDARY_NN_PORT_NUM
+	MY_HADOOP_SHUFFLE_PORT_NUM
+	MY_HADOOP_TASK_TRACKER_PORT_NUM
+	MY_HBASE_MASTER_INFO_PORT_NUM
+	MY_HBASE_MASTER_PORT_NUM
+	MY_HBASE_REGIONSERVER_INFO_PORT_NUM
+	MY_HBASE_REGIONSERVER_PORT_NUM
+	MY_HBASE_REST_PORT_NUM
+	MY_HBASE_ZOOKEEPER_LEADERPORT_NUM
+	MY_HBASE_ZOOKEEPER_PEERPORT_NUM
+	MY_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT_NUM
+	MY_REST_SERVER_PORT
+	MY_REST_SERVER_SECURE_PORT
+	MY_SQL_PORT_NUM
+	MY_YARN_ADMIN_PORT_NUM
+	MY_YARN_HTTP_PORT_NUM
+	MY_YARN_LOCALIZER_PORT_NUM
+	MY_YARN_NM_PORT_NUM
+	MY_YARN_RESMAN_PORT_NUM
+	MY_YARN_SCHED_PORT_NUM
+	MY_YARN_TRACKER_PORT_NUM"
+
+  for AVAR in $VARS_FOR_PORTS; do
+    AVALUE="$(eval "echo \$$AVAR")"
+    printf '%s=%s\n' $AVAR $AVALUE
+  done
+  echo
+}
+
+# global config
+
+# directory on local disk that belongs to me
+MY_SW_PARENT=$MY_SQROOT/sql
+
+# name of subdirectory to install Hadoop/Hive/MySQL in
+MY_SW_ROOT=$MY_SW_PARENT/local_hadoop
+
+# directory where convenience scripts will be located
+MY_SW_SCRIPTS_DIR=$MY_SW_PARENT/scripts
+
+MYSQL_HOME=${MY_SW_ROOT}/mysql
+YARN_HOME=${MY_SW_ROOT}/hadoop
+HIVE_HOME=${MY_SW_ROOT}/hive
+HBASE_HOME=${MY_SW_ROOT}/hbase
+
+  # shared location on local network for tar balls for Hadoop, etc.
+  # - Hadoop
+  # - HBase
+  # - Hive
+  # - MySQL (used as Hive metastore DB)
+  # - MySQL connector for Java (used by Hive to access metastore)
+  # - TPC-DS from tpc.org (for Hive sample DB)
+  # ------------- please customize this line ----------------------
+  if [ -z "$MY_LOCAL_SW_DIST" ]; then
+    MY_LOCAL_SW_DIST=/add_your_local_shared_folder_here
+  fi
+
+  # also set http_proxy and ftp_proxy if necessary, to download
+  # files from repositories on the Internet
+
+
+# hosts, only localhost or the actual DNS name of the local host are supported for now
+############################################
+MY_HOST_1=localhost
+# this may get changed below to a DNS name, with -d command line option
+
+MY_LOG_FILE=${MY_SW_ROOT}/log/install_local_hadoop_$(date +%F_%T).log
+
+# multiple hosts not yet supported
+
+# locations for storing data and metadata
+##########################################
+MY_DATA_DIR=${MY_SW_ROOT}/data
+MY_SQL_DATA_DIR=${MY_DATA_DIR}/mysql
+MY_DERBY_DATA_DIR=${MY_DATA_DIR}/derby
+MY_HADOOP_DATA_DIR=${MY_DATA_DIR}/hadoop
+MY_HIVE_DATA_DIR=${MY_DATA_DIR}/hive
+MY_HBASE_DATA_DIR=${MY_DATA_DIR}/hbase
+
+# Administrator and Hive user name for MySQL installation
+MY_SQL_ADMIN=root
+MY_SQL_USER=$MY_SQL_ADMIN
+# generate a random password to use for MySQL (no spaces or special characters)
+MY_SQL_ADMIN_PASSWD=p${RANDOM}${RANDOM}
+MY_SQL_USER_PASSWD=$MY_SQL_ADMIN_PASSWD
+# database name for Hive metastore
+MY_SQL_METASTORE_DB=metastore
+
+# MySQL configuration file
+MY_SQL_CONFIG_FILE=${MYSQL_HOME}/my.cnf
+
+MY_CMD=$0
+MY_IMPLICIT_Y=
+NOACTION=
+VERBOSE=
+MISSING_P_VAL="ERROR: -p option should be followed by <start port num>, rand or fromDisplay"
+
+# process command line arguments
+################################
+
+while [ $# -gt 0 ];
+do
+  case $1 in
+-p) shift
+    if [[ -z "$1" ]]; then
+      echo "$MISSING_P_VAL"
+      exit 1
+    fi
+    MY_START_PORT=$1
+    if [ $MY_START_PORT != "rand" -a $MY_START_PORT != "fromDisplay" ]; then
+      test $MY_START_PORT -gt 0 >/dev/null 2>&1
+      if [ $? -ne 0 ]; then
+        echo "$MISSING_P_VAL"
+        exit 1
+      fi
+    fi
+    ;;
+-y) MY_IMPLICIT_Y="y"
+    ;;
+-d) # use DNS name instead of "localhost" (option is not currently recommended)
+    MY_HOST_1=`uname -a | cut -f 2 -d ' '`
+    ;;
+-h|-help)
+    usage
+    exit 0
+    ;;
+-n) NOACTION="y"
+    ;;
+-v) VERBOSE="y"
+    ;;
+*)  echo "ERROR: Unexpected argument $1"
+    echo
+    cat <<EOF
+Syntax: $0 [ -p [<starting port num> | rand | fromDisplay ] ]  [-y]  [-n] [-v]
+EOF
+    exit 1
+    ;;
+  esac
+  shift
+done
+
+# port numbers used
+####################
+
+# From http://blog.cloudera.com/blog/2009/08/hadoop-default-ports-quick-reference/
+# and http://hbase.apache.org/book/config.files.html
+
+USE_DEFAULT_PORTS=no
+# To be done later, use separate ports to allow multiple
+# Hadoop instances on the same Linux node
+if [ -z "$MY_START_PORT" ]; then
+  USE_DEFAULT_PORTS=yes
+  MY_START_PORT=50000
+else
+  if [ $MY_START_PORT == "fromDisplay" ]; then
+    # display :34.0 would result in starting port 53400 (instead of default 50000 range)
+    MY_START_PORT=${DISPLAY/*:/}
+    MY_START_PORT=${MY_START_PORT/.*/}
+    MY_START_PORT=`expr 50000 + $MY_START_PORT '*' 100`
+  elif [ $MY_START_PORT == "rand" ]; then
+    # pick a random number between 9000 and 49000 that is divisible by 200
+    MY_START_PORT=`expr $RANDOM '%' 200 '*' 200 + 9000`
+  fi
+  echo "# Using non-standard port range from MY_START_PORT env var: $MY_START_PORT..."
+fi
+
+# assign ports with defaults outside the range of 50000-50199
+if [ $USE_DEFAULT_PORTS == 'yes' ]; then
+  # fs.default.name
+  MY_HADOOP_HDFS_PORT_NUM=9000
+  # mapreduce.shuffle.port
+  MY_HADOOP_SHUFFLE_PORT_NUM=8080
+  # yarn.resourcemanager.address
+  MY_YARN_RESMAN_PORT_NUM=8032
+  # yarn.resourcemanager.scheduler.address
+  MY_YARN_SCHED_PORT_NUM=8030
+  # yarn.resourcemanager.webapp.address
+  MY_YARN_HTTP_PORT_NUM=8088
+  # yarn.resourcemanager.resource-tracker.address
+  MY_YARN_TRACKER_PORT_NUM=8031
+  # yarn.resourcemanager.admin.address
+  MY_YARN_ADMIN_PORT_NUM=8033
+  # yarn.nodemanager.localizer.address
+  MY_YARN_LOCALIZER_PORT_NUM=8040
+  # yarn.nodemanager.webapp.address
+  MY_YARN_NM_PORT_NUM=8041
+  # unique port # for MySQL (don't use default of 3306, often used already)
+  MY_SQL_PORT_NUM=3346
+  # hbase.master.port
+  MY_HBASE_MASTER_PORT_NUM=60000
+  # hbase.master.info.port
+  MY_HBASE_MASTER_INFO_PORT_NUM=60010
+  # hbase.regionserver.port
+  MY_HBASE_REGIONSERVER_PORT_NUM=60020
+  # hbase.regionserver.info.port
+  MY_HBASE_REGIONSERVER_INFO_PORT_NUM=60030
+  # hbase.zookeeper.peerport
+  MY_HBASE_ZOOKEEPER_PEERPORT_NUM=2888
+  # hbase.zookeeper.leaderport
+  MY_HBASE_ZOOKEEPER_LEADERPORT_NUM=3888
+  # hbase.zookeeper.property.clientPort
+  MY_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT_NUM=2181
+  # hbase.rest.port
+  MY_HBASE_REST_PORT_NUM=8080
+  # dcs.master.port (range of port numbers, one per server)
+  MY_DCS_MASTER_PORT=23400
+  # dcs.master.info.port
+  MY_DCS_MASTER_INFO_PORT=24400
+  # dcs.server.info.port
+  MY_DCS_SERVER_INFO_PORT=24410
+  # trafodion.rest.port
+  MY_REST_SERVER_PORT=4200
+  # trafodion.rest.https.port
+  MY_REST_SERVER_SECURE_PORT=4201
+else
+  # fs.default.name
+  MY_HADOOP_HDFS_PORT_NUM=$MY_START_PORT
+  MY_HADOOP_SHUFFLE_PORT_NUM=`expr $MY_START_PORT + 62`
+  MY_YARN_RESMAN_PORT_NUM=`expr $MY_START_PORT + 132`
+  MY_YARN_SCHED_PORT_NUM=`expr $MY_START_PORT + 130`
+  MY_YARN_HTTP_PORT_NUM=`expr $MY_START_PORT + 188`
+  MY_YARN_TRACKER_PORT_NUM=`expr $MY_START_PORT + 131`
+  MY_YARN_ADMIN_PORT_NUM=`expr $MY_START_PORT + 133`
+  MY_YARN_LOCALIZER_PORT_NUM=`expr $MY_START_PORT + 140`
+  MY_YARN_NM_PORT_NUM=`expr $MY_START_PORT + 141`
+  MY_HBASE_MASTER_PORT_NUM=`expr $MY_START_PORT + 160`
+  MY_HBASE_MASTER_INFO_PORT_NUM=`expr $MY_START_PORT + 161`
+  MY_HBASE_REGIONSERVER_PORT_NUM=`expr $MY_START_PORT + 162`
+  MY_HBASE_REGIONSERVER_INFO_PORT_NUM=`expr $MY_START_PORT + 163`
+  MY_HBASE_ZOOKEEPER_PEERPORT_NUM=`expr $MY_START_PORT + 167`
+  MY_HBASE_ZOOKEEPER_LEADERPORT_NUM=`expr $MY_START_PORT + 168`
+  MY_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT_NUM=`expr $MY_START_PORT + 170`
+  MY_HBASE_REST_PORT_NUM=`expr $MY_START_PORT + 171`
+  # unique port # for MySQL (default is 3306)
+  MY_SQL_PORT_NUM=`expr $MY_START_PORT + 46`
+  # MY_DCS_MASTER_PORT is a range of ports, one per server
+  # (see "server" file written below for how many are configured)
+  MY_DCS_MASTER_PORT=`expr $MY_START_PORT + 172`
+  MY_DCS_MASTER_INFO_PORT=`expr $MY_START_PORT + 181`
+  MY_DCS_SERVER_INFO_PORT=`expr $MY_START_PORT + 182`
+  MY_REST_SERVER_PORT=`expr $MY_START_PORT + 183`
+  MY_REST_SERVER_SECURE_PORT=`expr $MY_START_PORT + 184`
+fi
+
+# handle ports in the range of 50000 to 50199
+
+# in hdfs-site.xml (setting any of these to 0 means start on a free port):
+
+# dfs.http.address	        50070	 dfs namenode web ui
+MY_HADOOP_NN_HTTP_PORT_NUM=`expr $MY_START_PORT + 70`
+
+# dfs.secondary.http.address	50090	 The secondary namenode http server
+MY_HADOOP_SECONDARY_NN_PORT_NUM=`expr $MY_START_PORT + 90`
+
+# dfs.datanode.address	        50010	 datanode server
+MY_HADOOP_DN_PORT_NUM=`expr $MY_START_PORT + 10`
+
+# dfs.datanode.http.address	50075	 datanode http server
+MY_HADOOP_DN_HTTP_PORT_NUM=`expr $MY_START_PORT + 75`
+
+# dfs.datanode.ipc.address	50020	 datanode ipc server
+MY_HADOOP_DN_IPC_PORT_NUM=`expr $MY_START_PORT + 20`
+
+# in mapred-site.xml:
+
+# mapred.job.tracker.http.address (default 50030)
+MY_HADOOP_JOB_TRACKER_HTTP_PORT_NUM=`expr $MY_START_PORT + 30`
+
+# mapred.task.tracker.http.address (default 50060)
+MY_HADOOP_TASK_TRACKER_PORT_NUM=`expr $MY_START_PORT + 60`
+
+# others, left as default for now:
+
+# dfs.backup.address (50100)
+# dfs.backup.http.address (50105)
+# mapred.job.tracker (???)
+
+if [[ -n "$VERBOSE" ]]; then
+  listports
+fi
+if [[ -n "$NOACTION" ]]; then
+  exit 0
+fi
+
+
+# Specify mirrors and versions of needed components
+#####################################################
+
+#HADOOP_MIRROR_URL=http://archive.cloudera.com/cdh5/cdh/5
+#HADOOP_TAR=hadoop-2.5.0-cdh5.3.0.tar.gz
+
+HADOOP_MIRROR_URL=http://archive.apache.org/dist/hadoop/core/hadoop-2.4.0
+HADOOP_TAR=hadoop-2.4.0.tar.gz
+
+if [[ "$SQ_HBASE_DISTRO" = "HDP" ]]; then
+    HADOOP_TAR=hadoop-2.6.0.2.2.0.0-2041.tar.gz
+fi
+
+# Alternative: Use MariaDB (not validated)
+# MARIADB_MIRROR_URL=https://downloads.mariadb.org/f/mariadb-5.5.29/kvm-bintar-hardy-amd64/mariadb-5.5.29-linux-x86_64.tar.gz/from/http:/ftp.osuosl.org/pub/mariadb
+# MARIADB_TAR=mariadb-5.5.29-linux-x86_64.tar.gz
+
+MYSQL_MIRROR_URL=http://cdn.mysql.com/archives/mysql-5.6
+MYSQL_TAR=mysql-5.6.10-linux-glibc2.5-x86_64.tar.gz
+
+MYSQL_JDBC_URL=http://cdn.mysql.com/archives/mysql-connector-java-5.1
+MYSQL_JDBC_TAR=mysql-connector-java-5.1.23.tar.gz
+
+HIVE_MIRROR_URL=https://archive.apache.org/dist/hive/hive-0.13.1
+HIVE_PREFIX=apache-hive-0.13.1-bin
+HIVE_TAR=${HIVE_PREFIX}.tar.gz
+
+#HBASE_MIRROR_URL=http://psg.mtu.edu/pub/apache/hbase/hbase-0.98.3
+#HBASE_MIRROR_URL=http://archive.cloudera.com/cdh5/cdh/5
+HBASE_MIRROR_URL=http://archive.apache.org/dist/hbase/hbase-0.98.6/
+
+#HBASE_TAR=hbase-0.98.6-cdh5.3.0.tar.gz
+HBASE_TAR=hbase-0.98.6-hadoop2-bin.tar.gz
+#HBASE_TAR=hbase-0.98.4-hadoop2-bin.tar.gz
+if [[ "$SQ_HBASE_DISTRO" = "HDP" ]]; then
+    HBASE_TAR=hbase-0.98.4.2.2.0.0-2041-hadoop2.tar.gz
+fi
+
+echo "Checking for existing Hadoop processes..."
+if [ `netstat -anl | grep ${MY_HADOOP_JOB_TRACKER_HTTP_PORT_NUM} | grep LISTEN | wc -l` -gt 0 -o \
+     `netstat -anl | grep ${MY_HADOOP_NN_HTTP_PORT_NUM} | grep LISTEN | wc -l` -gt 0 ]; then
+  echo '**** ERROR:'
+  echo "A process is already listening to port ${MY_HADOOP_JOB_TRACKER_HTTP_PORT_NUM} or ${MY_HADOOP_NN_HTTP_PORT_NUM}."
+  echo "This could be your own HDFS web interface or that of someone else."
+  echo "Please shut Hadoop down first or switch to another machine."
+  if [ -n "$DISPLAY" ]; then
+    echo "Alternatively, use non-standard ports with this option:"
+    echo "$MY_CMD -p fromDisplay"
+  fi
+  exit 1
+fi
+
+# check for missing tpcds_kit.zip file
+install_hadoop_regr_test_env --check
+if [ $? -ne 0 ]; then
+  exit 1
+fi
+
+if [ -d "$MY_SW_ROOT" ]; then
+  echo "$MY_SW_ROOT already exists, skipping initial steps..."
+else
+  echo
+  echo "Creating common directory $MY_SW_ROOT..."
+  #####################################################
+
+  mkdir $MY_SW_ROOT
+  cd $MY_SW_ROOT
+  mkdir log
+
+  echo
+  echo "Checking Java version..."
+
+  # check JAVA_HOME and Java version (1.6 or 1.7)
+  if [ -n $JAVA_HOME ]; then
+    if [ `expr "${JAVA_HOME}" : ".*_32"` -gt 0 ]; then
+      echo "Using a 32 bit Java environment, JAVA_HOME=${JAVA_HOME}"
+      echo -n "This might not be optimal. Ok to switch to a 64 bit Java environment? Enter y/n (n):"
+
+      read YN
+
+      if [ "$YN" == "y" -o "$YN" == "Y" ]; then
+        unset JAVA_HOME
+      else
+        echo "Ok, continuing with 32 bit Java..."
+      fi
+    fi
+    if [ -n "$JAVA_HOME" ]; then
+      echo "Picked up JAVA_HOME=${JAVA_HOME} from the environment..."
+    fi
+  fi
+
+  if [ -z "$JAVA_HOME" ]; then
+    echo "Trying to determine JAVA_HOME..."
+
+    JAVA_HOME_CANDIDATES="\
+         /opt/home/tools/jdk1.6.*_64 \
+         /opt/home/tools/jdk1.7.*_64"
+
+    # Add the directory of the java executable in the path to the candidates
+    JAVA_EXE=`which java`
+    if [ $? -eq 0 ]; then
+      # follow symbolic links until we reach the actual file
+      while [ -L $JAVA_EXE ]; do
+        JAVA_EXE=`readlink $JAVA_EXE`
+      done
+      JAVA_DIR=`dirname $JAVA_EXE`
+      JAVA_HOME_CANDIDATES="$JAVA_HOME_CANDIDATES $JAVA_DIR"
+    fi
+    cd $MY_SW_ROOT
+
+    # loop through candidates, use the last one that has a
+    # java executable in it (preference for later versions and path)
+    for c in $JAVA_HOME_CANDIDATES
+    do
+      if [ -x $c/bin/java ]; then
+        JAVA_HOME=$c
+      fi
+    done
+
+    echo "Setting JAVA_HOME=${JAVA_HOME}"
+  fi
+
+  JAVA_EXE=$JAVA_HOME/bin/java
+  if [ -z "$JAVA_HOME" -o \( ! -x $JAVA_EXE \) ]; then
+    echo '**** ERROR:'
+    echo "JAVA_HOME environment variable $JAVA_HOME doesn't point to a java executable, exiting..."
+    exit 1
+  fi
+
+  $JAVA_EXE -version
+  (${JAVA_EXE} -version 2>&1) | grep '1.6' >/dev/null
+  if [ $? -ne 0 ]; then
+    (${JAVA_EXE} -version 2>&1) | grep '1.7' >/dev/null
+    if [ $? -ne 0 ]; then
+      echo '**** ERROR:'
+      cat <<EOF
+      Please make sure you are using the Java 1.6 or 1.7 SDK.
+      Otherwise, download it into ${MY_SW_ROOT}, extract it and
+      make a symbolic link ${MY_SW_ROOT}/java that points  to it and
+      export JAVA_HOME=${MY_SW_ROOT}/java
+EOF
+      echo exiting...
+      exit 1
+    else
+      echo "Java version 1.7 is ok"
+    fi
+  else
+    echo "Java version 1.6 is ok"
+  fi
+  # end of check Java version
+
+  echo
+  echo "Creating some convenience shell scripts in $MY_SW_SCRIPTS_DIR..."
+  if [ ! -d $MY_SW_SCRIPTS_DIR ]; then
+    mkdir $MY_SW_SCRIPTS_DIR
+  fi
+
+  # shell script to set up common environment variables
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/sw_env.sh
+# Basic environment variables for Trafodion/Hadoop/Hive/HBase/MySQL setup
+export JAVA_HOME=${JAVA_HOME}
+export MYSQL_HOME=${MYSQL_HOME}
+export YARN_HOME=${YARN_HOME}
+export HIVE_HOME=${HIVE_HOME}
+export HBASE_HOME=${HBASE_HOME}
+export MY_HADOOP_HDFS_PORT_NUM=${MY_HADOOP_HDFS_PORT_NUM}
+EOF
+
+  # now source in this script
+  . $MY_SW_SCRIPTS_DIR/sw_env.sh
+
+  ####################################
+  # scripts to start/stop environment
+  ####################################
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swstartall
+#!/bin/sh
+echo "Starting Hadoop, MySQL, HBase..."
+cd ${MY_SW_SCRIPTS_DIR}
+./swstarthadoop
+./swstartmysql
+./swstarthbase
+EOF
+
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swstopall
+#!/bin/sh
+echo "Stopping Hadoop, MySQL, HBase..."
+cd ${MY_SW_SCRIPTS_DIR}
+./swstophbase
+./swstophadoop
+./swstopmysql
+EOF
+
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swstarthadoop
+#!/bin/sh
+echo "Starting Hadoop..."
+cd ${MY_SW_ROOT}
+. $MY_SW_SCRIPTS_DIR/sw_env.sh
+./hadoop/sbin/start-dfs.sh
+./hadoop/sbin/start-yarn.sh
+EOF
+
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swstophadoop
+#!/bin/sh
+echo "Stopping Hadoop..."
+cd ${MY_SW_ROOT}
+. $MY_SW_SCRIPTS_DIR/sw_env.sh
+./hadoop/sbin/stop-yarn.sh
+./hadoop/sbin/stop-dfs.sh
+EOF
+
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swstartmysql
+#!/bin/sh
+echo "Starting mysqld..."
+cd ${MY_SW_ROOT}/mysql
+. $MY_SW_SCRIPTS_DIR/sw_env.sh
+./bin/mysqld_safe --defaults-file=${MY_SQL_CONFIG_FILE} --log-error=${MY_SW_ROOT}/log/mysqld_safe.\$HOSTNAME.log &
+EOF
+
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swstopmysql
+#!/bin/sh
+echo "Stopping mysqld..."
+cd ${MY_SW_ROOT}/mysql
+. $MY_SW_SCRIPTS_DIR/sw_env.sh
+./bin/mysqladmin --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_ADMIN} --password=${MY_SQL_ADMIN_PASSWD}  shutdown
+EOF
+
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swstarthbase
+#!/bin/sh
+echo "Starting HBase..."
+cd ${MY_SW_ROOT}
+. $MY_SW_SCRIPTS_DIR/sw_env.sh
+./hbase/bin/start-hbase.sh
+EOF
+
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swstophbase
+#!/bin/sh
+cd ${MY_SW_ROOT}
+. $MY_SW_SCRIPTS_DIR/sw_env.sh
+./hbase/bin/stop-hbase.sh
+EOF
+
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swstatus
+#!/bin/sh
+cd ${MY_SW_ROOT}
+. $MY_SW_SCRIPTS_DIR/sw_env.sh
+NUM_JAVA_PROCS=\`ps -aef | grep \$USER | grep java | grep -v grep | wc -l\`
+NUM_MYSQLD_PROCS=\`ps -aef | grep \$USER | grep mysqld | grep -v grep | wc -l\`
+
+if [ "\$1" == "-v" ]; then
+  ps -aef | grep \$USER | grep java | grep -v grep
+  ps -aef | grep \$USER | grep mysqld | grep -v grep
+fi
+
+echo "\$NUM_JAVA_PROCS java servers and \$NUM_MYSQLD_PROCS mysqld processes are running"
+
+jps | grep -v Jps
+
+EOF
+
+  #######################################################
+  # scripts to start command line interpreters and tools
+  #######################################################
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swhadoop
+#!/bin/sh
+# command to run hadoop
+
+. $MY_SW_SCRIPTS_DIR/sw_env.sh
+${MY_SW_ROOT}/hadoop/bin/hadoop \$*
+EOF
+
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swyarn
+#!/bin/sh
+# command to run yarn
+
+. $MY_SW_SCRIPTS_DIR/sw_env.sh
+${MY_SW_ROOT}/hadoop/bin/yarn \$*
+EOF
+
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swhdfs
+#!/bin/sh
+# command to run hadoop
+
+. $MY_SW_SCRIPTS_DIR/sw_env.sh
+${MY_SW_ROOT}/hadoop/bin/hdfs \$*
+EOF
+
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swmysql
+#!/bin/sh
+# command to run mysql
+
+. $MY_SW_SCRIPTS_DIR/sw_env.sh
+${MY_SW_ROOT}/mysql/bin/mysql --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_USER} --password=${MY_SQL_USER_PASSWD} --database=${MY_SQL_METASTORE_DB} \$*
+EOF
+
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swmysqladmin
+#!/bin/sh
+# command to run mysqladmin as root user
+
+. $MY_SW_SCRIPTS_DIR/sw_env.sh
+${MY_SW_ROOT}/mysql/bin/mysqladmin --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_ADMIN} --password=${MY_SQL_ADMIN_PASSWD} \$*
+EOF
+
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swhive
+#!/bin/sh
+# command to run hive command line interpreter
+
+# Pick up MySQL JDBC driver
+. $MY_SW_SCRIPTS_DIR/sw_env.sh
+export HADOOP_HOME=${MY_SW_ROOT}/hadoop
+${MY_SW_ROOT}/hive/bin/hive \$*
+EOF
+
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swhbase
+#!/bin/sh
+# command to run hbase shell
+
+. $MY_SW_SCRIPTS_DIR/sw_env.sh
+${MY_SW_ROOT}/hbase/bin/hbase shell \$*
+EOF
+
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swuninstall_local_hadoop
+#!/bin/sh
+# uninstall local Hadoop instance.
+
+. $MY_SW_SCRIPTS_DIR/swstopall
+echo "Removing directory $MY_SW_ROOT"
+echo "and all of its content."
+echo "All Hadoop, HDFS, Hive, HBase content on this local instance will be lost."
+echo -n "Is this ok? (y, (n)) "
+read YN
+
+if [ "\$YN" = "y" -o "\$YN" = "Y" ]; then
+  rm -rf $MY_SW_ROOT
+  echo "Removed $MY_SW_ROOT"
+else
+  echo "Exiting without removing anything..."
+fi
+EOF
+
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swurls.html
+<HTML>
+<HEAD>
+<title>URLs for local Hadoop Instance</title>
+</HEAD>
+<BODY>
+<pre>
+<a href="http://${MY_HOST_1}:${MY_HADOOP_NN_HTTP_PORT_NUM}">HDFS Admin</a>
+<a href="http://${MY_HOST_1}:${MY_YARN_HTTP_PORT_NUM}">Yarn</a>
+<a href="http://${MY_HOST_1}:${MY_HBASE_MASTER_INFO_PORT_NUM}">HBase Master</a>
+<a href="http://${MY_HOST_1}:${MY_DCS_MASTER_INFO_PORT}">DCS Master</a>
+</pre>
+</BODY>
+</HTML>
+EOF
+
+
+  mkdir -p $MY_SQROOT/etc
+  SYSTEM_DEFAULTS_TEXT_FILE=$MY_SQROOT/etc/SQSystemDefaults.conf
+
+  # alternative method for open source build, system defaults in a text file
+  cat <<EOF >${SYSTEM_DEFAULTS_TEXT_FILE}.temp
+#+-+-+ install_local_hadoop inserted this - please do not edit this section
+# No default settings added by install_local_hadoop at this time
+#-+-+- install_local_hadoop end of system-inserted values, please add your own custom values below
+EOF
+
+  # preserve any other text in the system defaults text file, remove old generated text
+  if [ -r ${SYSTEM_DEFAULTS_TEXT_FILE} ]; then
+    sed '/#+-+-+ install_local_hadoop /,/#-+-+- install_local_hadoop /d' <${SYSTEM_DEFAULTS_TEXT_FILE} >>${SYSTEM_DEFAULTS_TEXT_FILE}.temp
+  fi
+  # now overwrite the system defaults text file
+  cat ${SYSTEM_DEFAULTS_TEXT_FILE}.temp >${SYSTEM_DEFAULTS_TEXT_FILE}
+
+  # secure all shell scripts to be executable
+  chmod +x $MY_SW_SCRIPTS_DIR/sw*
+
+  echo
+  echo "Checking permissions on home directory..."
+  ls -ld ~
+  ls -ld ~ | egrep 'drwx.-..-.' >/dev/null
+  if [ $? -ne 0 ]; then
+    echo '**** ERROR:'
+    cat <<EOF
+    Check permissions on your home directory. Authentication with
+    public/private keys won't work if you allow write access to
+    your home directory. You can fix this by doing something like
+    the following:
+
+    chmod 755 ~
+
+    exiting, please correct and retry this script...
+EOF
+    exit
+  fi
+
+  # check password-less login via ssh
+  check_ssh
+
+fi
+# end of general setup of sw directory
+
+cd $MY_SW_ROOT
+echo
+if [ -d hadoop/bin ]; then
+  echo "Hadoop files already exist, skipping Hadoop setup"
+else
+  echo "Setting up Hadoop..."
+  #####################################################
+
+  if [ -f $MY_LOCAL_SW_DIST/${HADOOP_TAR} ]; then
+    cp $MY_LOCAL_SW_DIST/${HADOOP_TAR} .
+  else
+    curl -O ${HADOOP_MIRROR_URL}/${HADOOP_TAR}
+  fi
+
+  echo "Unpacking Hadoop tar file..."
+  tar -xf ${HADOOP_TAR}
+  rm -rf hadoop
+  ln -s `dirname hadoop-*/lib` hadoop
+
+  H_CORE_FILE=hadoop/etc/hadoop/core-site.xml
+  H_HDFS_FILE=hadoop/etc/hadoop/hdfs-site.xml
+  H_MAPRED_FILE=hadoop/etc/hadoop/mapred-site.xml
+  H_Y_SITE_FILE=hadoop/etc/hadoop/yarn-site.xml
+  H_ENV_FILE=hadoop/etc/hadoop/hadoop-env.sh
+  H_Y_ENV_FILE=hadoop/etc/hadoop/yarn-env.sh
+
+  echo "Updating files $H_CORE_FILE, $H_HDFS_FILE and $H_MAPRED_FILE ..."
+cat <<EOF >$H_ENV_FILE
+export HADOOP_COMMON_LIB_NATIVE_DIR=${MY_SW_ROOT}/hadoop/lib/native
+export HADOOP_OPTS="-Djava.library.path=$MY_SQ_ROOT/hadoop/lib"
+EOF
+
+  mv -f $H_CORE_FILE $H_CORE_FILE.orig
+  cat <<EOF >$H_CORE_FILE
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+     <property>
+         <name>fs.default.name</name>
+         <value>hdfs://${MY_HOST_1}:${MY_HADOOP_HDFS_PORT_NUM}</value>
+     </property>
+     <property>
+         <name>hadoop.tmp.dir</name>
+         <value>${MY_HADOOP_DATA_DIR}/tmp</value>
+     </property>
+</configuration>
+EOF
+
+  mv -f $H_HDFS_FILE $H_HDFS_FILE.orig
+  cat <<EOF >$H_HDFS_FILE
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+     <property>
+         <name>dfs.replication</name>
+         <value>1</value>
+     </property>
+     <property>
+         <name>dfs.datanode.data.dir</name>
+         <value>${MY_HADOOP_DATA_DIR}/dfs/data</value>
+     </property>
+     <property>
+         <name>dfs.namenode.name.dir</name>
+         <value>file://${MY_HADOOP_DATA_DIR}/dfs/name</value>
+     </property>
+
+     <property>
+         <name>dfs.http.address</name>
+         <value>${MY_HOST_1}:${MY_HADOOP_NN_HTTP_PORT_NUM}</value>
+     </property>
+     <property>
+         <name>dfs.secondary.http.address</name>
+         <value>${MY_HOST_1}:${MY_HADOOP_SECONDARY_NN_PORT_NUM}</value>
+     </property>
+     <property>
+         <name>dfs.datanode.address</name>
+         <value>${MY_HOST_1}:${MY_HADOOP_DN_PORT_NUM}</value>
+     </property>
+     <property>
+         <name>dfs.datanode.http.address</name>
+         <value>${MY_HOST_1}:${MY_HADOOP_DN_HTTP_PORT_NUM}</value>
+     </property>
+     <property>
+         <name>dfs.datanode.ipc.address</name>
+         <value>${MY_HOST_1}:${MY_HADOOP_DN_IPC_PORT_NUM}</value>
+     </property>
+
+     <property>
+         <name>dfs.namenode.acls.enabled</name>
+         <value>true</value>
+     </property>
+</configuration>
+EOF
+
+  if [ -r $H_MAPRED_FILE ]; then
+    mv -f $H_MAPRED_FILE $H_MAPRED_FILE.orig
+  fi
+  cat <<EOF >$H_MAPRED_FILE
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+     <property>
+         <name>mapreduce.framework.name</name>
+         <value>yarn</value>
+     </property>
+     <property>
+         <name>mapreduce.jobtracker.http.address</name>
+         <value>${MY_HOST_1}:${MY_HADOOP_JOB_TRACKER_HTTP_PORT_NUM}</value>
+     </property>
+     <property>
+         <name>mapreduce.tasktracker.http.address</name>
+         <value>${MY_HOST_1}:${MY_HADOOP_TASK_TRACKER_PORT_NUM}</value>
+     </property>
+     <property>
+         <name>mapreduce.shuffle.port</name>
+         <value>${MY_HADOOP_SHUFFLE_PORT_NUM}</value>
+     </property>
+</configuration>
+EOF
+
+  mv -f $H_Y_SITE_FILE $H_Y_SITE_FILE.orig
+  cat <<EOF >$H_Y_SITE_FILE
+<?xml version="1.0"?>
+<configuration>
+  <property>
+    <name>yarn.resourcemanager.address</name>
+    <value>${MY_HOST_1}:${MY_YARN_RESMAN_PORT_NUM}</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.scheduler.address</name>
+    <value>${MY_HOST_1}:${MY_YARN_SCHED_PORT_NUM}</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.webapp.address	</name>
+    <value>${MY_HOST_1}:${MY_YARN_HTTP_PORT_NUM}</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.resource-tracker.address	</name>
+    <value>${MY_HOST_1}:${MY_YARN_TRACKER_PORT_NUM}</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.admin.address	</name>
+    <value>${MY_HOST_1}:${MY_YARN_ADMIN_PORT_NUM}</value>
+  </property>
+  <property>
+    <name>yarn.nodemanager.localizer.address	</name>
+    <value>${MY_HOST_1}:${MY_YARN_LOCALIZER_PORT_NUM}</value>
+  </property>
+  <property>
+    <name>yarn.nodemanager.webapp.address</name>
+    <value>${MY_HOST_1}:${MY_YARN_NM_PORT_NUM}</value>
+  </property>
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce_shuffle</value>
+    <description>shuffle service that needs to be set for Map Reduce to run </description>
+  </property>
+</configuration>
+EOF
+
+  echo "Appending local configuration to $H_ENV_FILE..."
+  cat <<EOF >>$H_ENV_FILE
+
+# Trafodion-local configuration
+# Make sure JAVA_HOME is set
+export JAVA_HOME=${JAVA_HOME}
+# Use a local PID dir to avoid conflicts with other Hadoop instances
+export HADOOP_PID_DIR=${MY_SW_ROOT}/log
+EOF
+
+  echo "Appending local configuration to $H_Y_ENV_FILE..."
+  cat <<EOF >>$H_Y_ENV_FILE
+
+# Trafodion-local configuration
+# Make sure JAVA_HOME is set
+export JAVA_HOME=${JAVA_HOME}
+export HADOOP_CONF_DIR=${YARN_HOME}/etc/hadoop
+export HADOOP_COMMON_HOME=${YARN_HOME}
+export HADOOP_HDFS_HOME=${YARN_HOME}
+# Use a local PID dir to avoid conflicts with other Hadoop instances
+export HADOOP_PID_DIR=${MY_SW_ROOT}/log
+EOF
+
+  #####################################################
+  echo "Initializing and starting Hadoop..." | tee ${MY_LOG_FILE}
+
+  cd $MY_SW_ROOT/hadoop
+  . $MY_SW_SCRIPTS_DIR/sw_env.sh
+
+  bin/hdfs namenode -format         >>${MY_LOG_FILE} 2>&1
+  $MY_SW_SCRIPTS_DIR/swstophadoop   >>${MY_LOG_FILE} 2>&1
+  $MY_SW_SCRIPTS_DIR/swstarthadoop  >>${MY_LOG_FILE} 2>&1
+
+  echo "Creating HDFS directories" 2>&1 | tee -a ${MY_LOG_FILE}
+  bin/hdfs dfs -mkdir /tmp                           >>${MY_LOG_FILE} 2>&1
+  bin/hdfs dfs -mkdir /user                          >>${MY_LOG_FILE} 2>&1
+  bin/hdfs dfs -mkdir /user/$USER                    >>${MY_LOG_FILE} 2>&1
+  bin/hdfs dfs -mkdir /user/hive                     >>${MY_LOG_FILE} 2>&1
+  bin/hdfs dfs -mkdir /user/trafodion                >>${MY_LOG_FILE} 2>&1
+  bin/hdfs dfs -mkdir /bulkload                      >>${MY_LOG_FILE} 2>&1
+  bin/hdfs dfs -mkdir /user/trafodion/bulkload       >>${MY_LOG_FILE} 2>&1
+  bin/hdfs dfs -mkdir /user/hive/warehouse           >>${MY_LOG_FILE} 2>&1
+  bin/hdfs dfs -mkdir /hive                          >>${MY_LOG_FILE} 2>&1
+  bin/hdfs dfs -chmod g+w /tmp                       >>${MY_LOG_FILE} 2>&1
+  bin/hdfs dfs -chmod g+w /user/hive/warehouse       >>${MY_LOG_FILE} 2>&1
+  bin/hdfs dfs -chmod g+w /bulkload                  >>${MY_LOG_FILE} 2>&1
+  bin/hdfs dfs -chmod g+w /user/trafodion/bulkload   >>${MY_LOG_FILE} 2>&1
+  bin/hadoop fs -ls -R /                 2>&1 | tee -a ${MY_LOG_FILE}
+  echo "Done: Creating HDFS directories" 2>&1 | tee -a ${MY_LOG_FILE}
+
+fi
+# end of Hadoop (MapReduce + HDFS) setup
+
+cd $MY_SW_ROOT
+
+if [ -d mysql/bin ]; then
+  echo "MySQL files already exist, skipping MySQL setup"
+else
+  #####################################################
+  echo "Downloading MySQL..."
+
+  if [ -f $MY_LOCAL_SW_DIST/${MYSQL_TAR} ]; then
+    cp $MY_LOCAL_SW_DIST/${MYSQL_TAR} .
+  else
+    curl ${MYSQL_MIRROR_URL}/${MYSQL_TAR} -o ${MYSQL_TAR}
+  fi
+
+  echo "Unpacking MySQL tar file ${MYSQL_TAR} ..."
+  tar -xf ${MYSQL_TAR}
+  echo "Creating symbolic link to latest MySQL distribution..."
+  rm -rf mysql
+  ln -s `dirname mysql-*-linux-*/bin` mysql
+
+  # set up MySQL configuration file
+
+  if [ -f ${MY_SQL_CONFIG_FILE} ]; then
+    echo "Using existing MySQL config file ${MY_SQL_CONFIG_FILE}"
+  else
+    echo "Setting up MySQL configuration file ${MY_SQL_CONFIG_FILE} ..."
+    mkdir ${MY_SQL_DATA_DIR}
+    cat <<EOF >${MY_SQL_CONFIG_FILE}
+[client-server]
+# Uncomment these if you want to use a nonstandard connection to MySQL
+port=${MY_SQL_PORT_NUM}
+socket=/tmp/mysql_${MY_SQL_PORT_NUM}.sock
+
+# This will be passed to all MySQL clients
+[client]
+port=${MY_SQL_PORT_NUM}
+socket=/tmp/mysql_${MY_SQL_PORT_NUM}.sock
+
+# The MySQL server
+[mysqld]
+# port to use (default is 3306)
+port=${MY_SQL_PORT_NUM}
+socket=/tmp/mysql_${MY_SQL_PORT_NUM}.sock
+# Directory where you want to put your data
+datadir=${MY_SQL_DATA_DIR}
+# File that contains the pid of the running mysqld
+pid_file=${MY_SW_ROOT}/log/mysqld.pid
+# Directory for error messages
+lc-messages-dir=${MY_SW_ROOT}/mysql/share
+# Create a file where the InnoDB/XtraDB engine stores it's data
+# innodb_data_file_path = ibdata1:20M;ibdata2:40M:autoextend
+# innodb_file_per_table
+
+# Enable logging by default to help find problems
+general-log=1
+general-log-file=${MY_SW_ROOT}/log/mysql-general.${HOSTNAME}.log
+
+EOF
+
+  fi
+
+  cd mysql
+  echo "Running MySQL installation script..." | tee -a ${MY_LOG_FILE}
+  ./scripts/mysql_install_db --defaults-file=${MY_SQL_CONFIG_FILE} >>${MY_LOG_FILE} 2>&1
+  if [ $? -ne 0 ]; then
+    echo "Problem installing MySQL, see, file ${MY_LOG_FILE}" | tee -a ${MY_LOG_FILE}
+  fi
+
+  # start mysql
+  echo "Starting mysqld..." | tee -a ${MY_LOG_FILE}
+  $MY_SW_SCRIPTS_DIR/swstartmysql >>${MY_LOG_FILE} 2>&1
+  if [ $? -ne 0 ]; then
+    echo "Problem starting MySQL, see, file ${MY_LOG_FILE}" | tee -a ${MY_LOG_FILE}
+  fi
+fi
+# end of MySQL setup
+
+cd $MY_SW_ROOT
+
+if [ -f mysql-connector-java-*/mysql-connector-java-*.jar ]; then
+  echo "MySQL JDBC driver file already exists, skipping JDBC setup"
+else
+  #####################################################
+  echo "Downloading MySQL JDBC driver..."
+
+  if [ -f $MY_LOCAL_SW_DIST/${MYSQL_JDBC_TAR} ]; then
+    cp $MY_LOCAL_SW_DIST/${MYSQL_JDBC_TAR} .
+  else
+    curl -O ${MYSQL_JDBC_URL}/${MYSQL_JDBC_TAR}
+  fi
+
+  echo "Unpacking MySQL JDBC tar file ${MYSQL_JDBC_TAR} ..."
+  tar -xf ${MYSQL_JDBC_TAR}
+  ln -s `dirname mysql-connector-java-*/mysql-connector-java-*.jar` mysql-connector-java
+
+fi
+# end of MySQL JDBC setup
+
+cd $MY_SW_ROOT
+
+if [ -d hive/bin ]; then
+  echo "Hive files already exist, skipping Hive setup"
+else
+  #####################################################
+  echo "Downloading Hive..."
+
+  if [ -f $MY_LOCAL_SW_DIST/${HIVE_TAR} ]; then
+    cp $MY_LOCAL_SW_DIST/${HIVE_TAR} .
+  else
+    curl -O ${HIVE_MIRROR_URL}/${HIVE_TAR}
+  fi
+
+  echo "Unpacking Hive tar file ${HIVE_TAR} ..."
+  tar -xf ${HIVE_TAR}
+  echo "Creating symbolic link to latest Hive distribution..."
+  rm -rf hive
+#  ln -s `dirname hive-*/bin` hive
+  ln -s ${HIVE_PREFIX} hive
+
+  HIVE_CONFIG_FILE=hive/conf/hive-site.xml
+  HIVE_LOG_CONFIG_FILE=hive/conf/hive-log4j.properties
+
+  echo "Updating file $HIVE_CONFIG_FILE ..."
+
+  if [ -r $HIVE_CONFIG_FILE ]; then
+    mv -f $HIVE_CONFIG_FILE $HIVE_CONFIG_FILE.orig
+  fi
+  cat <<EOF >$HIVE_CONFIG_FILE
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+  <property>
+     <name>hive.exec.scratchdir</name>
+     <value>${MY_HIVE_DATA_DIR}</value>
+  </property>
+
+  <property>
+     <name>hive.metastore.local</name>
+     <value>true</value>
+  </property>
+
+  <!-- Use MySQL as metastore -->
+
+  <property>
+     <name>javax.jdo.option.ConnectionURL</name>
+     <value>jdbc:mysql://${MY_HOST_1}:${MY_SQL_PORT_NUM}/${MY_SQL_METASTORE_DB}?createDatabaseIfNotExist=true</value>
+     <description>JDBC connect string for a JDBC metastore (don't include white space)</description>
+  </property>
+
+  <property>
+     <name>javax.jdo.option.ConnectionDriverName</name>
+     <value>com.mysql.jdbc.Driver</value>
+     <description>Driver class name for a JDBC metastore</description>
+  </property>
+
+  <property>
+     <name>javax.jdo.option.ConnectionUserName</name>
+     <value>${MY_SQL_USER}</value>
+  </property>
+
+  <property>
+     <name>javax.jdo.option.ConnectionPassword</name>
+     <value>${MY_SQL_USER_PASSWD}</value>
+  </property>
+
+  <!-- end of MySQL metastore parameters -->
+
+  <!-- Alternatively, use this to set up Apache Derby as metadata store
+
+  <property>
+    <name>javax.jdo.option.ConnectionURL</name>
+    <value>jdbc:derby:;databaseName=${MY_DERBY_DATA_DIR}/metastore_db;create=true</value>
+    <description>JDBC connect string for a JDBC metastore</description>
+  </property>
+
+  end of alternative Derby configuration -->
+
+  <!-- other useful Hive configuration settings -->
+  <property>
+    <name>hive.cli.print.header</name>
+    <value>true</value>
+    <description>Whether to print the names of the columns in query output.</description>
+  </property>
+</configuration>
+EOF
+
+  cat <<EOF >${HIVE_LOG_CONFIG_FILE}
+hive.root.logger=WARN,DRFA
+hive.log.dir=${MY_SW_ROOT}/log/hive
+hive.log.file=hive.log
+
+EOF
+
+  echo "Copying MySQL JDBC driver to ${MY_SW_ROOT}/hive/lib"
+  cp -p ${MY_SW_ROOT}/mysql-connector-java/*.jar ${MY_SW_ROOT}/hive/lib
+
+  echo "Sleeping 10 sec to wait for MySQL to start..."
+  sleep 10
+
+  mysql/bin/mysqladmin --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_ADMIN} password ${MY_SQL_ADMIN_PASSWD} 2>&1 | tee -a ${MY_LOG_FILE}
+  mysql/bin/mysqladmin --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_ADMIN} --password=${MY_SQL_ADMIN_PASSWD} -h ${MY_HOST_1} password ${MY_SQL_ADMIN_PASSWD} 2>&1 | tee -a ${MY_LOG_FILE}
+
+  if [ ${MY_SQL_ADMIN} != ${MY_SQL_USER} ]; then
+    echo "Creating MySQL user ${MY_SQL_USER}"
+    ${MY_SW_ROOT}/mysql/bin/mysql --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_ADMIN} --password=${MY_SQL_ADMIN_PASSWD} <<EOF 2>&1 | tee -a ${MY_LOG_FILE}
+CREATE USER '${MY_SQL_USER}'@'%' IDENTIFIED BY '${MY_SQL_USER_PASSWD}';
+EOF
+    mysql/bin/mysqladmin --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_USER} password ${MY_SQL_USER_PASSWD} 2>&1 | tee -a ${MY_LOG_FILE}
+    mysql/bin/mysqladmin --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_USER} --password=${MY_SQL_USER_PASSWD} -h ${MY_HOST_1} password ${MY_SQL_USER_PASSWD} 2>&1 | tee -a ${MY_LOG_FILE}
+  fi
+
+  echo "Creating Hive database and metastore in MySQL..."
+  ${MY_SW_ROOT}/mysql/bin/mysql --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_USER} --password=${MY_SQL_USER_PASSWD} <<EOF 2>&1 | tee -a ${MY_LOG_FILE}
+create database ${MY_SQL_METASTORE_DB};
+use ${MY_SQL_METASTORE_DB};
+SOURCE ${MY_SW_ROOT}/hive/scripts/metastore/upgrade/mysql/hive-schema-0.13.0.mysql.sql;
+EOF
+
+fi
+# end of Hive setup
+
+cd $MY_SW_ROOT
+
+if [ -d hbase/bin ]; then
+  echo "HBase files already exist, skipping HBase setup"
+else
+  #####################################################
+  echo "Downloading HBase..."
+
+  if [ -f $MY_LOCAL_SW_DIST/${HBASE_TAR} ]; then
+    cp $MY_LOCAL_SW_DIST/${HBASE_TAR} .
+  else
+    curl -O ${HBASE_MIRROR_URL}/${HBASE_TAR}
+  fi
+
+  echo "Unpacking HBase tar file ${HBASE_TAR} ..."
+  tar -xf ${HBASE_TAR}
+  echo "Creating symbolic link to latest HBase distribution..."
+  rm -rf hbase
+  ln -s `dirname hbase-*/bin` hbase
+
+  HBASE_CONFIG_FILE=hbase/conf/hbase-site.xml
+  HBASE_ENV_FILE=hbase/conf/hbase-env.sh
+
+  # For HBase_Trx
+
+  # Do not build if the TRX jar already exists
+  if [ ! -e ${MY_SQROOT}/export/lib/${HBASE_TRX_JAR} ]; then
+     # Build HBase TRX
+     echo "Building HBase TRX"
+     cd $MY_SQROOT ; make genverhdr 2>&1 | tee -a ${MY_LOG_FILE}
+     cd $MY_SQROOT/src/seatrans/hbase-trx
+     make clean 2>&1 | tee -a ${MY_LOG_FILE}
+     make 2>&1 | tee -a ${MY_LOG_FILE}
+  fi
+
+  cd $MY_SW_ROOT
+
+  # Setup HBase TRX JAR in HBase' CLASSPATH
+  echo "export HBASE_CLASSPATH=${MY_SQROOT}/export/lib/\${HBASE_TRX_JAR}" >> ${HBASE_ENV_FILE}
+
+  echo "Updating file $HBASE_CONFIG_FILE ..."
+
+  if [ -r $HBASE_CONFIG_FILE ]; then
+    mv -f $HBASE_CONFIG_FILE $HBASE_CONFIG_FILE.orig
+  fi
+  cat <<EOF >$HBASE_CONFIG_FILE
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+  <property>
+    <name>hbase.rootdir</name>
+    <value>hdfs://${MY_HOST_1}:${MY_HADOOP_HDFS_PORT_NUM}/hbase</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.dataDir</name>
+    <value>hdfs://${MY_HOST_1}:${MY_HADOOP_HDFS_PORT_NUM}/zookeeper</value>
+  </property>
+  <property>
+    <name>hbase.master.port</name>
+    <value>${MY_HBASE_MASTER_PORT_NUM}</value>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>${MY_HBASE_MASTER_INFO_PORT_NUM}</value>
+  </property>
+  <property>
+    <name>hbase.regionserver.port</name>
+    <value>${MY_HBASE_REGIONSERVER_PORT_NUM}</value>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>${MY_HBASE_REGIONSERVER_INFO_PORT_NUM}</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.peerport</name>
+    <value>${MY_HBASE_ZOOKEEPER_PEERPORT_NUM}</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.leaderport</name>
+    <value>${MY_HBASE_ZOOKEEPER_LEADERPORT_NUM}</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>${MY_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT_NUM}</value>
+  </property>
+  <property>
+    <name>hbase.rest.port</name>
+    <value>${MY_HBASE_REST_PORT_NUM}</value>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+	<value>100</value>
+  </property>
+  <property>
+    <name>hbase.client.scanner.timeout.period</name>
+	<value>60000</value>
+  </property>
+  <property>
+     <name>hbase.bulkload.staging.dir</name>
+     <value>hdfs://${MY_HOST_1}:${MY_HADOOP_HDFS_PORT_NUM}/hbase-staging</value>
+  </property>
+ <property>
+    <name>hbase.snapshot.enabled</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hbase.master.distributed.log.splitting</name>
+    <value>false</value>
+   </property>
+   <property>
+     <name>hbase.hregion.impl</name>
+     <value>org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegion</value>
+   </property>
+   <property>
+    <name>hbase.coprocessor.region.classes</name>
+      <value>
+           org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionObserver,
+           org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint,
+           org.apache.hadoop.hbase.coprocessor.transactional.SsccRegionEndpoint,
+           org.apache.hadoop.hbase.coprocessor.AggregateImplementation
+      </value>
+   </property>
+</configuration>
+EOF
+
+  echo "Starting HBase..." | tee -a ${MY_LOG_FILE}
+  $MY_SW_SCRIPTS_DIR/swstarthbase >>${MY_LOG_FILE} 2>&1
+  if [ $? -ne 0 ]; then
+    echo "Problem starting HBase, see, file ${MY_LOG_FILE}" | tee -a ${MY_LOG_FILE}
+  fi
+
+fi
+# end of HBase setup
+
+cd $MY_SW_ROOT
+
+if [ -d tpcds/tools ]; then
+  echo "TPC-DS files already exist, skipping TPC-DS setup"
+else
+  install_hadoop_regr_test_env \
+      --unpackDir=$MY_SW_ROOT/tpcds \
+      --dataDir=${MY_DATA_DIR}/tpcds \
+      --logFile=$MY_LOG_FILE \
+      --hdfsCmd=${MY_SW_SCRIPTS_DIR}/swhdfs \
+      --hiveCmd=${MY_SW_SCRIPTS_DIR}/swhive
+  if [ $? -ne 0 ]; then
+    echo "Error installing TPC-DS and ORC files, exiting..."
+    exit 1
+  fi
+fi
+# end of TPC-DS setup
+
+cd $MY_SW_ROOT
+
+echo "Setting up DCS, REST and Phoenix tests..."
+
+#Default GIT location
+GIT_DIR="git@github.com:apache/incubator-trafodion"
+DCS_SRC=$MY_SQROOT/../../dcs
+
+if [ -d $DCS_SRC ]; then
+   TRAF_SRC=$MY_SQROOT/../../
+   # Default location of DCS code
+   DCS_SRC=$MY_SQROOT/../../dcs
+   # Default location of REST code
+   REST_SRC=$MY_SQROOT/../rest
+   # Default location for phoenix_test
+   PHX_SRC=$MY_SQROOT/../../tests/phx
+else
+   TRAF_SRC=$MY_SW_ROOT/src/incubator-trafodion
+   if [ ! -d $TRAF_SRC ]; then
+     mkdir -p $MY_SW_ROOT/src
+     cd $MY_SW_ROOT/src
+     git clone $GIT_DIR
+   fi
+   # Default location of DCS code
+   DCS_SRC=$TRAF_SRC/dcs
+   # Default location of REST code
+   REST_SRC=$TRAF_SRC/core/rest
+   # Default location for phoenix_test
+   PHX_SRC=$TRAF_SRC/tests/phx
+fi
+
+echo "Default Trafodion Source directory..."
+echo "For Core... $TRAF_SRC"
+echo "For DCS... $DCS_SRC"
+echo "For REST... $REST_SRC"
+echo "For PHX... $PHX_SRC"
+
+if [ -d dcs-* ]; then
+  echo "DCS files already exist, skipping DCS setup"
+else
+  #####################################################
+  # four options, depend on user Env variables, as described above
+  if [[ -f $DCS_TAR ]]
+  then
+    echo "Using DCS Tar: $DCS_TAR" | tee -a ${MY_LOG_FILE}
+  elif [[ -n $DCS_URL ]]
+  then
+    echo "Downloading DCS Tar: $DCS_URL" | tee -a ${MY_LOG_FILE}
+    rm -f dcs_download.tar
+    curl ${DCS_URL} -o dcs_download.tar
+    DCS_TAR=./dcs_download.tar
+  elif [[ -d $DCS_SRC ]]
+  then
+    if [[ -f $DCS_SRC/target/dcs*tar.gz ]]
+    then
+      echo "Using DCS tar file in source tree: $DCS_SRC" | tee -a ${MY_LOG_FILE}
+    else
+      echo "No DCS tar file found, building DCS: $DCS_SRC" | tee -a ${MY_LOG_FILE}
+      echo "Building DCS Source in $DCS_SRC" | tee -a ${MY_LOG_FILE}
+      cd $DCS_SRC
+      ${MAVEN:-mvn} site package >>${MY_LOG_FILE} 2>&1
+      cd $MY_SW_ROOT
+    fi
+    DCS_TAR=$(ls $DCS_SRC/target/dcs*tar.gz)
+  fi
+  if [[ ! -f $DCS_TAR ]]
+  then
+    echo '**** ERROR:'  | tee -a ${MY_LOG_FILE}
+    echo "DCS tar file not found: $DCS_TAR"  | tee -a ${MY_LOG_FILE}
+    exit 2
+  fi
+  # install
+  echo "Installing DCS from: $DCS_TAR"  | tee -a ${MY_LOG_FILE}
+  tar xzf $DCS_TAR
+  DCS_HOME=$(ls -d $MY_SW_ROOT/dcs-*)
+
+  # configure DCS
+
+  #   use ~/.trafodion to avoid modifying sqenv*.sh in source tree
+  echo "Adding DCS_INSTALL_DIR=$DCS_INSTALL_DIR to sqenv via ~/.trafodion"  | tee -a ${MY_LOG_FILE}
+  echo "  Update it if switching between multiple local_hadoop environments"  | tee -a ${MY_LOG_FILE}
+  if [[ -f ~/.trafodion ]]
+  then
+    mv -f ~/.trafodion ~/.trafodion.orig
+    grep -v 'DCS_INSTALL_DIR=' ~/.trafodion.orig > ~/.trafodion
+  fi
+  DCSDIR=${DCS_HOME##*/}
+  echo "export DCS_INSTALL_DIR=\${MY_SQROOT}/sql/local_hadoop/$DCSDIR" >> ~/.trafodion
+
+  cd $DCS_HOME/conf/
+  mv dcs-env.sh dcs-env.sh.orig
+   # SQROOT env var does not propagate thru ssh command
+  echo "MY_SQROOT=$MY_SQROOT" > dcs-env.sh
+  sed -e "s@#[ ]*export DCS_MANAGES_ZK=true@export DCS_MANAGES_ZK=false@" dcs-env.sh.orig >> dcs-env.sh
+  mv -f dcs-site.xml dcs-site.xml.orig
+  sed -e "s@</configuration>@@" dcs-site.xml.orig > dcs-site.xml
+  cat >>dcs-site.xml <<EOF
+    <property>
+     <name>dcs.master.port</name> <value>$MY_DCS_MASTER_PORT</value>
+    </property>
+    <property>
+     <name>dcs.master.info.port</name> <value>$MY_DCS_MASTER_INFO_PORT</value>
+    </property>
+    <property>
+     <name>dcs.server.info.port</name> <value>$MY_DCS_SERVER_INFO_PORT</value>
+    </property>
+    <property>
+     <name>dcs.zookeeper.peerport</name> <value>$MY_HBASE_ZOOKEEPER_PEERPORT_NUM</value>
+    </property>
+    <property>
+     <name>dcs.zookeeper.leaderport</name> <value>$MY_HBASE_ZOOKEEPER_LEADERPORT_NUM</value>
+    </property>
+    <property>
+     <name>dcs.zookeeper.property.clientPort</name> <value>$MY_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT_NUM</value>
+    </property>
+  </configuration>
+EOF
+  echo "localhost 4" > servers
+
+  # Configure DCS test scripts
+  if [[ -n "$DCS_SRC" ]]
+  then
+    echo "Adding swjdbc script...." | tee -a ${MY_LOG_FILE}
+    cat <<EOF >$MY_SW_SCRIPTS_DIR/swjdbc
+#!/bin/sh
+# command to run JDBC tests
+cd $DCS_SRC/src/test/jdbc_test
+./jdbc_test.py --appid=jdbc_test --user=SOMEUSER --pw=SOMEPASSWORD --javahome=\$JAVA_HOME \\
+  --target=localhost:$MY_DCS_MASTER_PORT \\
+  --jdbctype=T4 --jdbccp=\$MY_SQROOT/export/lib/jdbcT4.jar "\$@"
+EOF
+    chmod +x $MY_SW_SCRIPTS_DIR/swjdbc
+  fi
+fi
+# end of DCS setup
+
+# begin of trafci setup
+
+echo "Updating trafci port number" | tee -a ${MY_LOG_FILE}
+TRAFCI_BIN_DIR=$MY_SQROOT/trafci/bin
+if [[ -f $TRAFCI_BIN_DIR/trafci ]]
+then
+  mv $TRAFCI_BIN_DIR/trafci  $TRAFCI_BIN_DIR/trafci.orig | tee -a ${MY_LOG_FILE}
+  sed -e "s@localhost:23400@localhost:$MY_DCS_MASTER_PORT@" $TRAFCI_BIN_DIR/trafci.orig >> $TRAFCI_BIN_DIR/trafci | tee -a ${MY_LOG_FILE}
+  chmod +x $TRAFCI_BIN_DIR/trafci | tee -a ${MY_LOG_FILE}
+  echo "Modified trafci port number to $MY_DCS_MASTER_PORT" | tee -a ${MY_LOG_FILE}
+else
+  echo "$TRAFCI_BIN_DIR not found" | tee -a ${MY_LOG_FILE}
+fi
+
+# end of trafci set up
+
+if [[ -d $PHX_SRC ]]; then
+  echo "Phoenix files already exist, skipping Phoenix setup" | tee -a ${MY_LOG_FILE}
+
+  #######################################################
+  # scripts to run tests
+  #######################################################
+  # adding this in this section to enable adding it to
+  # existing local_hadoop installations
+  echo "Adding swphoenix script...." | tee -a ${MY_LOG_FILE}
+  cat <<EOF >$MY_SW_SCRIPTS_DIR/swphoenix
+#!/bin/sh
+# command to run phoenix tests
+
+cd $PHX_SRC
+if [[ \$1 == "t4" ]]
+then
+  ./phoenix_test.py --target=localhost:$MY_DCS_MASTER_PORT --user=dontcare --pw=dontcare \\
+     --targettype=TR --javahome=\$JAVA_HOME --jdbccp=\$MY_SQROOT/export/lib/jdbcT4.jar
+elif [[ \$1 == "t2" ]]
+then
+  export LD_PRELOAD=\$JAVA_HOME/jre/lib/amd64/libjsig.so:\$MY_SQROOT/export/lib\$SQ_MBTYPE/libseabasesig.so
+  ./phoenix_test.py --targettype=TR --javahome=\$JAVA_HOME \\
+     --jdbccp=\$MY_SQROOT/export/lib/jdbcT2.jar --jdbctype=T2
+else
+  echo "Usage: swphoenix (t2|t4)"
+  exit 1
+fi
+EOF
+  chmod +x $MY_SW_SCRIPTS_DIR/swphoenix
+
+fi
+# end of Phoenix setup
+
+# Begin of Trafodion REST Server setup
+
+if [ -d rest-* ]; then
+  echo "Trafodion REST files already exist, skipping REST setup"
+else
+  #####################################################
+  # three options, depend on user Env variables, as described above
+  if [[ -f $REST_TAR ]]
+  then
+    echo "Using REST Tar: $REST_TAR" | tee -a ${MY_LOG_FILE}
+  elif [[ -n $REST_URL ]]
+  then
+    echo "Downloading REST Tar: $REST_URL" | tee -a ${MY_LOG_FILE}
+    rm -f rest_download.tar
+    curl ${REST_URL} -o rest_download.tar
+    REST_TAR=./rest_download.tar
+  elif [[ -d $REST_SRC ]]
+  then
+    if [[ -f $REST_SRC/target/rest*tar.gz ]]
+    then
+      echo "Using REST tar file in source tree: $REST_SRC" | tee -a ${MY_LOG_FILE}
+    else
+      echo "No REST tar file found, building REST: $REST_SRC" | tee -a ${MY_LOG_FILE}
+      echo "Building REST Source in $REST_SRC" | tee -a ${MY_LOG_FILE}
+      cd $REST_SRC
+      ${MAVEN:-mvn} site package >>${MY_LOG_FILE} 2>&1
+      cd $MY_SW_ROOT
+    fi
+    REST_TAR=$(ls $REST_SRC/target/rest*tar.gz)
+  fi
+  if [[ ! -f $REST_TAR ]]
+  then
+    echo '**** ERROR:'  | tee -a ${MY_LOG_FILE}
+    echo "REST tar file not found: $REST_TAR"  | tee -a ${MY_LOG_FILE}
+    exit 2
+  fi
+  # install
+  echo "Installing REST from: $REST_TAR"  | tee -a ${MY_LOG_FILE}
+  tar xzf $REST_TAR
+  REST_HOME=$(ls -d $MY_SW_ROOT/rest-*)
+
+  # configure REST
+
+  #   use ~/.trafodion to avoid modifying sqenv*.sh in source tree
+  echo "Adding REST_INSTALL_DIR=$REST_INSTALL_DIR to sqenv via ~/.trafodion"  | tee -a ${MY_LOG_FILE}
+  echo "  Update it if switching between multiple local_hadoop environments"  | tee -a ${MY_LOG_FILE}
+  if [[ -f ~/.trafodion ]]
+  then
+    mv -f ~/.trafodion ~/.trafodion.orig
+    grep -v 'REST_INSTALL_DIR=' ~/.trafodion.orig >> ~/.trafodion
+  fi
+  RESTDIR=${REST_HOME##*/}
+  echo "export REST_INSTALL_DIR=\${MY_SQROOT}/sql/local_hadoop/$RESTDIR" >> ~/.trafodion
+  cd $REST_HOME/conf/
+  mv rest-env.sh rest-env.sh.orig
+   # SQROOT env var does not propagate thru ssh command
+  echo "MY_SQROOT=$MY_SQROOT" > rest-env.sh
+  sed -e "s@#[ ]*export REST_MANAGES_ZK=true@export REST_MANAGES_ZK=false@" rest-env.sh.orig >> rest-env.sh
+  mv -f rest-site.xml rest-site.xml.orig
+  sed -e "s@</configuration>@@" rest-site.xml.orig > rest-site.xml
+  cat >>rest-site.xml <<EOF
+    <property>
+     <name>rest.port</name> <value>$MY_REST_SERVER_PORT</value>
+    </property>
+    <property>
+     <name>rest.https.port</name> <value>$MY_REST_SERVER_SECURE_PORT</value>
+    </property>
+    <property>
+     <name>rest.zookeeper.peerport</name> <value>$MY_HBASE_ZOOKEEPER_PEERPORT_NUM</value>
+    </property>
+    <property>
+     <name>rest.zookeeper.leaderport</name> <value>$MY_HBASE_ZOOKEEPER_LEADERPORT_NUM</value>
+    </property>
+    <property>
+     <name>rest.zookeeper.property.clientPort</name> <value>$MY_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT_NUM</value>
+    </property>
+  </configuration>
+EOF
+fi
+
+# End of Trafodion REST Server setup
+
+if [ ! -r $YARN_HOME/lib/native/libhdfs.so ]; then
+  echo "------------------------------------------------------------"
+  echo "-- WARNING: libhdfs.so is not present on this system. Please"
+  echo "--          build it, otherwise Trafodion will not compile."
+  echo "------------------------------------------------------------"
+fi
+
+echo
+echo "Installed directory size and name = $(du -sh $MY_SW_ROOT)" | tee -a ${MY_LOG_FILE}
+echo
+echo "Setup is complete. You can use the convenience scripts starting with sw... located in $MY_SW_SCRIPTS_DIR."

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/fad0c54e/install/installer/traf_apache_hadoop_config_setup
----------------------------------------------------------------------
diff --git a/install/installer/traf_apache_hadoop_config_setup b/install/installer/traf_apache_hadoop_config_setup
new file mode 100755
index 0000000..9547db2
--- /dev/null
+++ b/install/installer/traf_apache_hadoop_config_setup
@@ -0,0 +1,842 @@
+#!/bin/bash
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+#  Script that prompts user for all user input needed for the
+#  trafodion_config file
+
+
+#==============================================
+# Defaults
+suseLinux=$(grep -cr SUSE /proc/version)
+LOCAL_WORKDIR="$( cd "$( dirname "$0" )" && pwd )"
+TRAF_WORKDIR="/usr/lib/trafodion"
+TRAF_CONFIG="/etc/trafodion/trafodion_config"
+TRAF_CONFIG_DIR=$(dirname $TRAF_CONFIG)
+LOCAL_TRAF_CONFIG="$LOCAL_WORKDIR/trafodion_config"
+LDAP_AUTH_FILE="traf_authentication_config_${HOSTNAME}"
+TRAF_USER="trafodion"
+TRAF_USER_PASSWORD="traf123"   # must be 6-8 chars
+userForTrafodion=`grep "$TRAF_USER:" /etc/passwd | wc -l`
+NODE_LIST=""
+HOME_DIR="/home"
+
+TRAF_PACKAGE=""
+TRAF_BUILD=""
+DCS_BUILD=""
+
+
+HADOOP_PREFIX=""
+HBASE_HOME=""
+HIVE_HOME=""
+
+
+HADOOP_NODES=""
+HDFS_USER="hdfs"
+HBASE_USER="hbase"
+HBASE_GROUP="hbase"
+
+SQ_ROOT=""
+INIT_TRAFODION="N"
+START="N"
+SQCONFIG=""
+DCS_SERVERS_PARM="2"
+LDAP_SECURITY="N"
+SCANNER_MODE="N"
+
+LDAP_LEVEL="0"
+#==============================================
+
+function print_usage {
+cat << EOF
+
+This script takes in user input to add to the trafodion_config file or uses known
+defaults. Script will return an error if user enters bad inputs. Type response or
+select [Enter] to select default.
+
+Inputs Requested:
+
+*Trafodion Password (default is [$TRAF_USER_PASSWORD])
+*List of Nodes
+*Home directory (default is [$HOME_DIR])
+*Location of EPEL RPM (default is [None])
+*Full path to Trafodion Server tar file
+(default is [$TRAF_BUILD])
+*Apache Hadoop install location
+*Apache Hbase install location
+*Apache Hive install location
+*HDFS username (default is [$HDFS_USER])
+*HBase username (default is [$HBASE_USER])
+*HBase group (default is [$HBASE_GROUP])
+*Full pathname to the install directory location (default [$SQ_ROOT])
+*Full pathname to the DNS build tar file (default [$DCS_BUILD])
+*Start Trafodion after install (default is [No])
+*Total number of DCS servers to start
+
+
+Options:
+    --help             Print this message and exit.
+
+EOF
+}
+
+#==============================================
+#Parse input parameters
+
+while [[ $# -gt 0 ]]; do
+    case "$1" in
+        --scanner_mode)
+            SCANNER_MODE="Y"
+            ;;
+
+        --help)
+            print_usage
+            exit -1
+            ;;
+        *)
+            echo "***ERROR: unknown parameter '$1'"
+            print_usage
+            exit -1
+    esac
+    shift
+done
+
+#==============================================
+
+
+echo
+echo "*******************************"
+echo " Trafodion Configuration Setup"
+echo "*******************************"
+echo
+echo "***INFO: Please press [Enter] to select defaults."
+echo
+
+sudo mkdir -p $TRAF_WORKDIR
+sudo chmod 777 $TRAF_WORKDIR
+
+# if there was a partial config file then use it
+# to fill in default values so user doesn't have to retype
+# everything again.
+if [ -f $LOCAL_TRAF_CONFIG ]; then
+	source $LOCAL_TRAF_CONFIG
+fi
+
+if [[ "$suseLinux" -ge "1" ]]; then
+   SUSE_LINUX="true"
+else 
+   SUSE_LINUX="false"
+fi
+
+echo "#!/bin/bash" > $LOCAL_TRAF_CONFIG
+echo "export SUSE_LINUX=\"$SUSE_LINUX\"" >> $LOCAL_TRAF_CONFIG
+echo "export TRAF_USER=\"$TRAF_USER\"" >> $LOCAL_TRAF_CONFIG
+echo "export TRAF_WORKDIR=\"$TRAF_WORKDIR\"" >> $LOCAL_TRAF_CONFIG
+echo "export LOCAL_WORKDIR=\"$LOCAL_WORKDIR\"" >> $LOCAL_TRAF_CONFIG
+echo "export SQCONFIG=\"$SQCONFIG\"" >> $LOCAL_TRAF_CONFIG
+
+#==============================================
+# Password
+if [[ "$userForTrafodion" == "0" ]]; then
+   echo -n "Enter $TRAF_USER password, default is [$TRAF_USER_PASSWORD]: "
+   read answer
+   if [ -z $answer ]; then
+      echo "export TRAF_USER_PASSWORD=\"$TRAF_USER_PASSWORD\"" >> $LOCAL_TRAF_CONFIG
+   else
+      length=${#answer}
+      if [[ $length -lt "6" ]]; then
+         echo "***ERROR: Linux requires password to be at least 6 characters"
+         echo "***ERROR: Password length must be from 6-8 characters."
+         exit -1
+      fi
+      if [[ $length -gt "8" ]]; then
+         echo "***ERROR: VNC server requires passwords be 8 or less characters"
+         echo "***ERROR: Password length must be from 6-8 characters."
+         exit -1  
+      fi
+      echo "export TRAF_USER_PASSWORD=\"$answer\"" >> $LOCAL_TRAF_CONFIG
+   fi
+fi
+
+#==============================================
+# List of Nodes
+
+echo -n "Enter list of nodes to install trafodion(must have apache hadoop/hbase installed) (blank separated), default [$NODE_LIST]: "
+read answer
+if [[ -z "$answer" ]]; then
+   if [ -z "$NODE_LIST" ]; then
+      echo "***ERROR: Must enter list of nodes."
+      exit -1
+   fi 
+else
+   NODES="$answer"
+   NODE_LIST=""
+   ERROR_NODES=""
+   for node in $NODES
+   do
+      newNode=$(ssh -q -n $node hostname)
+      
+      if [[ $? -ne "0" ]]; then
+         echo "***ERROR: Could not ssh to $node."
+         echo "***ERROR: Please check node names for typos"
+         exit -1
+      fi
+
+      nodeName=$(echo $newNode | sed 's/[^a-zA-Z0-9\ \.\-]//g')
+
+      if [[ "$nodeName" != "$newNode" ]]; then
+         echo "***ERROR: Nodes are not named properly."
+         echo "***ERROR: Nodes can have no special characters other than '-' and '.'"
+         exit -1
+      fi
+
+      if [[ "$newNode" =~ ^([0-9]{1,3})[.]([0-9]{1,3})[.]([0-9]{1,3})[.]([0-9]{1,3})$ ]]; then
+         echo "***ERROR: Nodes are not named properly."
+         echo "***ERROR: IP addresses are not supported. Please use node names."
+         exit -1
+      fi
+    
+      #Testing sudo access on all nodes
+      ssh -q -n $newNode sudo echo "***INFO: Testing sudo access on node $newNode"
+      if [ $? -ne "0" ]; then
+         error=1
+         ERROR_NODES="$ERROR_NODES $newNode"
+      fi
+      NODE_LIST="$NODE_LIST $newNode"  
+   done
+
+   if [[ $error == "1" ]]; then
+      echo "***ERROR: $ERROR_NODES does not have sudo access."
+      echo "***ERROR: Must have sudo access on all nodes."
+      exit -1
+   fi
+
+fi
+
+for node in $NODE_LIST
+do
+   ssh -q -n $node echo "***INFO: Testing ssh on $node"
+   if [[ $? -ne "0" ]]; then
+      errorFound=1
+      ERROR_NODES="$ERROR_NODES $node"
+   fi
+done
+
+if [[ $errorFound == "1" ]]; then
+   echo "***ERROR: Could not ssh to $ERROR_NODES."
+   echo "***ERROR: Check permissions and known hosts files."
+   exit -1 
+fi
+
+node_count=$(echo $NODE_LIST | wc -w)
+echo "export NODE_LIST=\"$NODE_LIST\"" >> $LOCAL_TRAF_CONFIG
+echo "export node_count=\"$node_count\"" >> $LOCAL_TRAF_CONFIG
+
+MY_NODES=""
+for node in $NODE_LIST
+do 
+   MY_NODES="$MY_NODES -w $node"
+done 
+echo "export MY_NODES=\"$MY_NODES\"" >> $LOCAL_TRAF_CONFIG
+
+if [ $node_count -eq 1 ]; then
+    TRAF_PDSH=""
+else
+    TRAF_PDSH="pdsh -S $MY_NODES"
+fi
+
+#==============================================
+# Home directory
+if [[ "$userForTrafodion" == "0" ]]; then
+   echo -n "Enter Trafodion userid's home directory prefix, default is [$HOME_DIR]: "
+   read answer
+
+   if [ ! -z $answer ]; then
+      # strip off TRAF_USER if user added it on end of directory
+      HOME_DIR=$(echo "$answer" | sed -e "s@/$TRAF_USER\$@@")
+   fi
+
+   #Test if home directory is on a NFS disk
+   diskType="$( df -P -T $HOME_DIR | tail -n +2 | awk '{print $2}')"
+   if [ "$diskType" == "nfs" ]; then
+      echo "***ERROR: Trafodion userid's home directory ($HOME_DIR) cannot be on an NFS disk"
+      echo "***ERROR: Please choose a different directory to install on."
+      exit -1
+  fi
+else
+  HOME_DIR=$(grep -r "trafodion" /etc/passwd | sed 's/.*:://' | sed 's/\:.*$//' | sed 's/\/trafodion.*$//')
+  
+fi
+
+echo "export HOME_DIR=\"$HOME_DIR\"" >> $LOCAL_TRAF_CONFIG
+#==============================================
+#Check location given for EPEL RPM
+if [[ "$userForTrafodion" == "0" ]] &&[[ "$SUSE_LINUX" != "true" ]]; then
+   echo -n "Specify full path to EPEL RPM (including .rpm), default is None: "
+   read answer
+
+   if [ -z $answer ]; then
+      echo "***INFO: Will attempt to download RPM if EPEL is not installed on all nodes."
+      echo "export EPEL_RPM=\"\"" >> $LOCAL_TRAF_CONFIG
+   else
+      if [[ ! -f $answer ]]; then
+         echo "***ERROR: File not found. Please check path for existence and typos."
+         exit -1
+      fi 
+      if [[ "$answer" == *"rpm"* ]]; then
+         echo "export EPEL_RPM=\"$answer\"" >> $LOCAL_TRAF_CONFIG      
+      else
+         echo "***ERROR: File enter is not an RPM. Check file is of type epel***.rpm"
+         exit -1
+      fi
+   fi
+fi
+#==============================================
+#Check location given for Java 65 or greater
+
+echo -n "Specify location of Java 1.7.0_65 or higher (JDK), default is [$JAVA_HOME]: "
+read answer
+
+if [ -z $answer ]; then
+      if [ -z $JAVA_HOME ]; then
+         echo "***ERROR: Must specify location of Java 1.7.0_65 or higher (JDK)."
+         exit -1
+      fi
+else
+      JAVA_HOME=$answer
+fi
+
+REQ_VER="1.7.0_65"
+JAVA7="7"
+JAVA65="65"
+
+for node in $NODE_LIST
+do
+   THIS_JVM_VER="$(ssh -q -n $node $JAVA_HOME/bin/java -version 2>&1 > /dev/null)"
+   if [[ "$THIS_JVM_VER" == *"No such file"* ]]; then
+      echo "***ERROR: Unable to find Java version on node $node"
+      echo "***ERROR: Must enter a specific Java version (example: /usr/java/jdk1.7.0_67)"
+      echo "***ERROR: Please check for typos and that directory exists"
+      exit -1
+   fi
+
+   javaType="$(ssh -q -n $node $JAVA_HOME/bin/javac -version 2>&1 > /dev/null | grep "No such file" | wc -l)"
+   if [[ $javaType -eq "1" ]]; then
+      echo "***ERROR: Java version on node $node is of type JRE"
+      echo "***ERROR: Java version needed on all nodes must be a JDK"
+      exit -1
+   fi
+
+   temp_JAVA=`echo "${THIS_JVM_VER:15:2}" | sed 's/.*\.//'`
+   
+   #Check if using something less than Java 7
+   if [[ "$temp_JAVA" -lt "$JAVA7" ]]; then
+      error="true"
+   fi
+   
+   #Checking if using 1.7.0_n
+   if [[ "${THIS_JVM_VER:18:1}" -eq "0" ]] && [[ "$temp_JAVA" -eq "$JAVA7" ]]; then
+      if [[ "${THIS_JVM_VER:20:1}" -eq "0" ]] || [[ "${THIS_JVM_VER:20:2}" -lt $JAVA65 ]]; then      
+         error="true"
+      fi
+   fi
+
+   if [[ -n $error ]]; then
+      echo "***ERROR: Your existing JAVA_HOME on $node is less than $REQ_VER"
+      echo "***ERROR: Your Java Version on $node = $THIS_JVM_VER"
+      echo "***ERROR: Required java version on $node should be greater than $REQ_VER"
+      exit -1
+   fi
+done
+
+echo "export JAVA_HOME=\"$JAVA_HOME\"" >> $LOCAL_TRAF_CONFIG
+
+#==============================================
+# Trafodion Build location
+# set the following based on what the user provided as input
+# TRAF_PACKAGE = the trafodion package tar file which contains trafodion core & dcs
+# TRAF_BUILD = the trafodion core tar file extracted from the trafodion package
+# DCS_BUILD = the dcs tar file extracted from the trafodion package
+
+if [ -z "$TRAF_PACKAGE" ]; then
+   TRAF_DEFAULT="$TRAF_BUILD"
+else
+   TRAF_DEFAULT="$TRAF_PACKAGE"
+fi
+
+
+echo -n "Enter full path (including .tar or .tar.gz) of trafodion tar file [$TRAF_DEFAULT]: "
+read answer
+if [ -z $answer ] && [ -z $TRAF_DEFAULT ]; then
+   echo "***ERROR: Must specify Trafodion tar file location."
+   exit -1
+fi
+
+if [ ! -z "$answer" ]; then
+   TRAF_PACKAGE="$answer"
+else
+   TRAF_PACKAGE="$TRAF_DEFAULT"
+fi
+
+# make sure file exists
+if [ ! -f $TRAF_PACKAGE ]; then
+   echo "***ERROR: $TRAF_PACKAGE does not exist"
+   exit -1
+fi
+
+# Determine if we have a package or just the trafodion_server tar file
+# Normally, the package file is what should be specified but because
+# we used to not support the package file and required the trafodion core
+# tar file previous users might still specify the trafodion core tar file instead
+# Also, developers might only have trafodion core tar file and not a package
+# tar file.
+package=$(tar -tzf $TRAF_PACKAGE | grep build-version.txt | wc -l )
+if [ $package -eq 0 ]; then
+   
+   # assume user specified a trafodion core tar file
+   TRAF_BUILD=$TRAF_PACKAGE
+   TRAF_PACKAGE=""
+   
+   #TODO: do a quick sanity check on the file to make sure it is a traf build tar file
+
+   echo "***INFO: tar file is not a package tar file which includes Trafodion & DCS"
+   echo "***INFO: assuming it is a Trafodion build only tar file"
+   
+   # since it is not a package tar file, we'll need to prompt for the DCS build file
+   echo -n "Enter location of DCS tar file [DCS_BUILD]: "
+   read answer
+   if [ -z $answer ]; then
+      if [ -z $DCS_BUILD ]; then
+         echo "***ERROR: Must specify DCS tar file location."
+         exit -1
+      fi
+   else
+      DCS_BUILD=$answer
+   fi
+   
+   # make sure file exists
+   if [ ! -f $DCS_BUILD ]; then
+      echo "***ERROR: $DCS_BUILD does not exist"
+      exit -1
+   fi
+
+   #TODO: do a quick sanity check on the file to make sure it is a DCS build tar file
+   
+   # since it is not a package tar file, we'll need to prompt for the rest build file
+   echo -n "Enter location of REST tar file [$REST_BUILD]: "
+   read answer
+   if [ -z $answer ]; then
+      if [ -z $REST_BUILD ]; then
+         echo "***ERROR: Must specify REST tar file location."
+         exit -1
+      fi
+   else
+      REST_BUILD=$answer
+   fi
+   
+   # make sure file exists
+   if [ ! -f $REST_BUILD ]; then
+      echo "***ERROR: $REST_BUILD does not exist"
+      exit -1
+   fi
+
+   #TODO: do a quick sanity check on the file to make sure it is a rest build tar file
+   
+else
+   # user specifed a package file
+   TRAF_BUILD=""
+   DCS_BUILD=""
+   REST_BUILD=""
+fi
+
+
+echo "export TRAF_PACKAGE=\"$TRAF_PACKAGE\"" >> $LOCAL_TRAF_CONFIG
+echo "export TRAF_BUILD=\"$TRAF_BUILD\"" >> $LOCAL_TRAF_CONFIG
+echo "export DCS_BUILD=\"$DCS_BUILD\"" >> $LOCAL_TRAF_CONFIG
+echo "export REST_BUILD=\"$REST_BUILD\"" >> $LOCAL_TRAF_CONFIG
+
+#Check if install_features file exists
+if [[ -z $TRAF_BUILD ]]; then
+   TRAF_BUILD=$(tar -tf $TRAF_PACKAGE | grep "trafodion_server")
+   tar -xzf $TRAF_PACKAGE --directory $LOCAL_WORKDIR
+fi
+
+if [[ -z $TRAF_BUILD ]]; then 
+   echo "***ERROR: trafodion_server-n.n.n.tgz not included in $TRAF_PACKAGE"
+   echo "***ERROR: Workaround may be to enter trafodion_server-n.n.n.tgz as trafodion tar package."
+   exit -1
+fi
+
+
+#==============================================
+#Hadoop path 
+
+echo -n "Enter Hadoop installed full path, default is [$HADOOP_PREFIX]: "
+read answer
+
+if [ -z  $answer ]; then
+   if [[ -z $HADOOP_PREFIX ]]; then
+      echo "***ERROR: Must enter apache Hadoop installed path"
+      exit -1
+   fi
+else
+   if [[ -e $answer ]]; then
+      HADOOP_PREFIX=$answer
+   else
+      echo "***ERROR: apache Hadoop installed path doesn't exist"
+      exit -1
+   fi
+fi
+echo "export HADOOP_PREFIX=\"$HADOOP_PREFIX\"" >> $LOCAL_TRAF_CONFIG
+
+#==============================================
+#Hbase path 
+
+echo -n "Enter Hbase installed full path, default is [$HBASE_HOME]: "
+read answer
+
+if [ -z  $answer ]; then
+   if [[ -z $HBASE_HOME ]]; then
+      echo "***ERROR: Must enter apache Hbase installed path"
+      exit -1
+   fi
+else
+   if [[ -e $answer ]]; then
+      HBASE_HOME=$answer
+   else
+      echo "***ERROR: apache Hbase installed path doesn't exist"
+      exit -1
+   fi
+fi
+echo "export HBASE_HOME=\"$HBASE_HOME\"" >> $LOCAL_TRAF_CONFIG
+
+#==============================================
+#Hive path 
+
+echo -n "Enter Hive installed full path, default is [$HIVE_HOME]: "
+read answer
+
+if [ -z  $answer ]; then
+   if [[ -z $HIVE_HOME ]]; then
+      echo "***ERROR: Must enter apache Hive installed path"
+      exit -1
+   fi
+else
+   if [[ -e $answer ]]; then
+      HIVE_HOME=$answer
+   else
+      echo "***ERROR: apache Hive installed path doesn't exist"
+      exit -1
+   fi
+fi
+
+echo "export HIVE_HOME=\"$HIVE_HOME\"" >> $LOCAL_TRAF_CONFIG
+
+#=====================
+#Hadoop checks
+count=0
+
+# assume they are the same
+HADOOP_NODES=$NODE_LIST
+echo "export HADOOP_NODES=\"$HADOOP_NODES\"" >> $LOCAL_TRAF_CONFIG
+
+for node in $NODE_LIST;
+do
+   ssh -q -n $node 'echo "exit" | hbase shell &> $HOME/hbaseVersion.txt'
+   count=$[$count+1]
+   foundFile=$(ssh -q -n $node ls $HOME/hbaseVersion.txt | wc -l)
+   if [[ $foundFile -eq "0" ]]; then
+      echo "***ERROR: HBase shell not found on any node"
+      echo "***ERROR: Check that HBase is installed and working"
+      exit -1
+   fi
+done
+
+
+#==============================================
+#HDFS Username
+
+echo -n "Enter HDFS username, default is [$HDFS_USER]: "
+read answer
+
+if [ -z $answer ]; then
+   echo "export HDFS_USER=\"$HDFS_USER\"" >> $LOCAL_TRAF_CONFIG
+else
+   echo "export HDFS_USER=\"$answer\"" >> $LOCAL_TRAF_CONFIG
+fi
+
+#==============================================
+#HBase user
+
+echo -n "Enter HBase username, default is [$HBASE_USER]: "
+read answer
+
+if [ -z $answer ]; then
+   echo "export HBASE_USER=\"$HBASE_USER\"" >> $LOCAL_TRAF_CONFIG
+else
+   echo "export HBASE_USER=\"$answer\"" >> $LOCAL_TRAF_CONFIG
+fi
+
+#==============================================
+#HBase group
+
+echo -n "Enter HBase group, default is [$HBASE_GROUP]: "
+read answer
+
+if [ -z $answer ]; then
+   echo "export HBASE_GROUP=\"$HBASE_GROUP\"" >> $LOCAL_TRAF_CONFIG
+else
+   echo "export HBASE_GROUP=\"$answer\"" >> $LOCAL_TRAF_CONFIG
+fi
+
+#==============================================
+#Install location
+
+if [ -z $SQ_ROOT ]; then
+
+	# if we have a package file then use its name for the default install dir
+	if [ ! -z "$TRAF_PACKAGE" ]; then
+	   traf_filename=$(basename "$TRAF_PACKAGE")
+	   SQ_ROOT="$HOME_DIR/$TRAF_USER/${traf_filename%.tar.gz}"
+	else
+	   # otherwise, create a default install dir using a timestamp
+	   SQ_ROOT="$HOME_DIR/$TRAF_USER/traf_$(date +%F_%H%M)"
+	fi
+fi
+
+echo -n "Enter directory to install trafodion to, default is [$SQ_ROOT]: "
+read answer
+
+if [ -z $answer ]; then
+   echo "export SQ_ROOT=\"$SQ_ROOT\"" >> $LOCAL_TRAF_CONFIG
+else
+   echo "export SQ_ROOT=\"$answer\"" >> $LOCAL_TRAF_CONFIG
+fi
+
+#==============================================
+#Run sqstart
+if [[ $SCANNER_MODE == "N" ]]; then
+   echo -n "Start Trafodion after install (Y/N), default is Y: "
+   read answer
+
+   if [ -z $answer ]; then
+      echo "export START=\"Y\"" >> $LOCAL_TRAF_CONFIG
+      echo "export INIT_TRAFODION=\"Y\"" >> $LOCAL_TRAF_CONFIG 
+   else
+      if [[ "${answer}" =~ ^[Yy]$ ]]; then
+         echo "export START=\"Y\"" >> $LOCAL_TRAF_CONFIG
+         echo "export INIT_TRAFODION=\"Y\"" >> $LOCAL_TRAF_CONFIG 
+      else
+         echo "export START=\"N\"" >> $LOCAL_TRAF_CONFIG
+         echo "export INIT_TRAFODION=\"N\"" >> $LOCAL_TRAF_CONFIG
+         echo "***INFO: Trafodion will not be started after install is complete."
+         echo "***INFO: User will need follow steps on the trafodion wiki on how to start."
+      fi
+   fi
+fi
+#==============================================
+#DCS Servers
+
+echo -n "Total number of DCS servers to start, default [$DCS_SERVERS_PARM]: "
+read answer
+
+if [ ! -z $answer ]; then
+   DCS_SERVERS_PARM="$answer"
+fi
+
+if [[ $DCS_SERVERS_PARM != *[!0-9]* ]]; then
+   echo "export DCS_SERVERS_PARM=\"$DCS_SERVERS_PARM\"" >> $LOCAL_TRAF_CONFIG
+else
+   echo "***ERROR: DCS servers must be a number."
+   exit -1
+fi
+
+#==============================================
+#Simple security
+if [[ $SCANNER_MODE == "N" ]]; then
+   echo -n "Enable simple LDAP security (Y/N), default is N: "
+   read answer
+
+   if [ ! -z $answer ]; then
+      if [[ "${answer}" =~ ^[Yy]$ ]]; then
+         LDAP_SECURITY="Y"
+      fi
+   else 
+      LDAP_SECURITY="N"
+   fi
+
+   echo "export LDAP_SECURITY=\"$LDAP_SECURITY\"" >> $LOCAL_TRAF_CONFIG
+
+   if [[ "$LDAP_SECURITY" == "Y" ]]; then
+      #Hostnames
+      echo -n "Enter list of LDAP Hostnames (blank separated), default [$LDAP_HOSTS]: "
+      read answer1
+      if [[ -z "$answer1" ]]; then
+         if [ -z "$LDAP_HOSTS" ]; then
+            echo "***ERROR: Must enter list of LDAP Hostnames."
+            exit -1
+         fi
+      else
+         LDAP_HOSTS=$answer1
+      fi
+      echo "export LDAP_HOSTS=\"$LDAP_HOSTS\"" >> $LOCAL_TRAF_CONFIG
+      cp -rf traf_authentication_conf_default $LOCAL_WORKDIR/$LDAP_AUTH_FILE
+      echo "export LDAP_AUTH_FILE=\"$LDAP_AUTH_FILE\"" >> $LOCAL_TRAF_CONFIG
+
+      counter=0
+      for host in $LDAP_HOSTS
+      do
+        counter=$[$counter+1]
+        if [ $counter -eq "1" ]; then
+           list=" LdapHostname: $host"
+        elif [ $counter -eq "$node_count" ]; then
+           list="LdapHostname: $host\n $list"
+        else
+           list=" LdapHostname: $host\n $list"
+        fi
+      done
+      sed -i -e "s/LdapHostname:/$list/g" $LDAP_AUTH_FILE
+
+      #Port numbers
+      echo -n "Enter LDAP Port number (Example: 389 for no encryption or TLS, 636 for SSL), default [$LDAP_PORT]: "
+      read answer2
+      if [[ -z "$answer2" ]]; then
+         if [ -z "$LDAP_PORT" ]; then
+            echo "***ERROR: Must enter LDAP port."
+            exit -1
+         fi
+      else
+         LDAP_PORT=$answer2
+      fi
+
+      echo "export LDAP_PORT=\"$LDAP_PORT\"" >> $LOCAL_TRAF_CONFIG
+      port="LdapPort: $LDAP_PORT"
+      sed -i -e "s/LdapPort:389/$port/g" $LDAP_AUTH_FILE
+
+      #Unique IDs
+      echo -n "Enter all LDAP unique identifiers (blank separated), default [$LDAP_ID]: "
+      read answer3
+      if [[ -z "$answer3" ]]; then
+         if [ -z "$LDAP_ID" ]; then
+            echo "***ERROR: Must enter LDAP unique identifiers."
+            exit -1
+         fi
+      else
+         LDAP_ID=$answer3
+      fi
+      echo "export LDAP_ID=\"$LDAP_ID\"" >> $LOCAL_TRAF_CONFIG
+
+      counter=0
+      for id in $LDAP_ID
+      do
+        counter=$[$counter+1]
+        if [ $counter -eq "1" ]; then
+           list=" UniqueIdentifier: $id"
+        elif [ $counter -eq "$node_count" ]; then
+           list="UniqueIdentifier: $id\n $list"
+        else
+           list=" LdapHostname: $id\n $list"
+        fi
+      done
+
+      sed -i -e "s/UniqueIdentifier:/$list/g" $LDAP_AUTH_FILE
+
+      #Encryption level
+      echo -n "Enter LDAP Encryption Level (0: Encryption not used, 1: SSL, 2: TLS), default [$LDAP_LEVEL]: "
+      read answer4
+      if [[ -z "$answer4" ]]; then
+         if [ -z "$LDAP_LEVEL" ]; then
+            echo "***ERROR: Much enter LDAP Encryption level."
+            exit -1
+         fi
+      else
+         LDAP_LEVEL=$answer4
+      fi
+      echo "export LDAP_LEVEL=\"$LDAP_LEVEL\"" >> $LOCAL_TRAF_CONFIG
+
+      level="LDAPSSL: $LDAP_LEVEL"
+      sed -i -e "s/LDAPSSL:0/$level/g" $LDAP_AUTH_FILE
+
+
+      if [[ "$LDAP_LEVEL" -eq "1" ]] || [[ "$LDAP_LEVEL" -eq "2" ]]; then
+         echo -n "Enter full path to TLS certificate, default [$LDAP_CERT]: "
+         read answer7
+         if [[ -z "$answer7" ]]; then
+            if [ -z "$LDAP_CERT" ]; then
+               echo "***ERROR: Encryption level 2(TLS) requires a certificate file (*.pem)"
+               exit -1
+            fi
+         else
+            LDAP_CERT=$answer7
+            LDAP_CERT_BASE=$(basename $LDAP_CERT)
+
+            if [[ ! -f $LDAP_CERT ]]; then
+               echo "***ERROR: File does not exist."
+               echo "***ERROR: Please enter full path or check for errors."
+               exit -1
+            fi
+
+         fi
+         echo "export LDAP_CERT=\"$LDAP_CERT\"" >> $LOCAL_TRAF_CONFIG
+         echo "export LDAP_CERT_BASE=\"$LDAP_CERT_BASE\"" >> $LOCAL_TRAF_CONFIG
+
+         list="TLS_CACERTFilename: $HOME_DIR/$TRAF_USER/$LDAP_CERT_BASE"
+
+         sed -i -e "s@TLS\_CACERTFilename:@$list@" $LDAP_AUTH_FILE
+      fi
+
+      #Search username and password
+      echo -n "Enter Search user name (if required), default [$LDAP_USER]: "
+      read answer5
+      if [[ ! -z "$answer5" ]]; then
+         LDAP_USER=$answer5
+         echo "export LDAP_USER=\"$LDAP_USER\"" >> $LOCAL_TRAF_CONFIG
+         
+         echo -n "Enter Search password (if required), default [$LDAP_PASSWORD]: "
+         read answer6
+         if [[ ! -z "$answer6" ]]; then
+            LDAP_PASSWORD=$answer6
+            echo "export LDAP_PASSWORD=\"$LDAP_PASSWORD\"" >> $LOCAL_TRAF_CONFIG
+         fi
+      fi
+      user="LDAPSearchDN: $LDAP_USER"
+      sed -i -e "s/LDAPSearchDN:/$user/g" $LDAP_AUTH_FILE
+      password="LDAPSearchPwd: $LDAP_PASSWORD"
+      sed -i -e "s/LDAPSearchPwd:/$password/g" $LDAP_AUTH_FILE
+   fi
+
+fi
+#==============================================
+#All items added to config file
+#Will add setup complete flag
+
+echo "export CONFIG_COMPLETE=\"true\"" >> $LOCAL_TRAF_CONFIG
+#=============================================
+#Create directories and cp files
+
+sudo mkdir -p $TRAF_CONFIG_DIR
+sudo chmod 777 $TRAF_CONFIG_DIR
+sudo cp $LOCAL_TRAF_CONFIG $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+echo "***INFO: Configuration file: $TRAF_CONFIG"
+
+echo "***INFO: Trafodion configuration setup complete"


Mime
View raw message