trafodion-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ama...@apache.org
Subject [1/5] incubator-trafodion git commit: [[TRAFODION 1903 ]] Removing TRAFODION VERSION and other clean up
Date Fri, 25 Mar 2016 21:10:57 GMT
Repository: incubator-trafodion
Updated Branches:
  refs/heads/master ebeecd7b2 -> f34fceb0b


[[TRAFODION 1903 ]] Removing TRAFODION VERSION and other clean up


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/93728159
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/93728159
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/93728159

Branch: refs/heads/master
Commit: 93728159a11e7a0e274740b41dd38a823fd50e10
Parents: 760ba88
Author: Amanda Moran <amanda@apache.com>
Authored: Tue Mar 22 00:31:45 2016 +0000
Committer: Amanda Moran <amanda@apache.com>
Committed: Tue Mar 22 00:31:45 2016 +0000

----------------------------------------------------------------------
 install/installer/rest_installer                |   7 --
 install/installer/traf_cloudera_mods98          |  66 +++++++-----
 install/installer/traf_config_check             |   7 +-
 install/installer/traf_createPasswordLessSSH    |   6 +-
 install/installer/traf_getHadoopNodes           |  88 ++++++++++++++++
 install/installer/traf_hortonworks_mods98       | 101 +++++++------------
 .../installer/trafodion_apache_hadoop_install   |  12 +--
 install/installer/trafodion_install             |  12 ---
 install/installer/trafodion_uninstaller         |  10 +-
 9 files changed, 176 insertions(+), 133 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/93728159/install/installer/rest_installer
----------------------------------------------------------------------
diff --git a/install/installer/rest_installer b/install/installer/rest_installer
index 68888c2..d8f900d 100755
--- a/install/installer/rest_installer
+++ b/install/installer/rest_installer
@@ -108,13 +108,6 @@ mv $MY_SQROOT/sqenvcom.temp $MY_SQROOT/sqenvcom.sh
 cd $REST_DIR/conf
 
 echo "***INFO: modifying $REST_DIR/conf/rest-site.xml"
-# get zookeeper quorum
-
-lineNumber=$(grep -n "zookeeper\.quorum" /etc/hbase/conf/hbase-site.xml | sed 's/\:.*//')
-lineNumber=$((lineNumber+1))
-
-ZOOKEEPER_NODES=`sed "$lineNumber!d" /etc/hbase/conf/hbase-site.xml | sed 's/\/value.*//'
| sed 's/.*>//' | sed 's/.$//'`
-
 # add zookeeper quorum property to end of configuration
 rm rest-site.temp 2>/dev/null
 cat rest-site.xml | sed -e "s@</configuration>@  <property>\n    <name>rest.zookeeper.quorum</name>\n
   <value>$ZOOKEEPER_NODES</value>\n  </property>\n </configuration>@"
> rest-site.temp

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/93728159/install/installer/traf_cloudera_mods98
----------------------------------------------------------------------
diff --git a/install/installer/traf_cloudera_mods98 b/install/installer/traf_cloudera_mods98
index edb09e1..865ca08 100755
--- a/install/installer/traf_cloudera_mods98
+++ b/install/installer/traf_cloudera_mods98
@@ -31,8 +31,10 @@ source $TRAF_CONFIG
 export PDSH="pdsh -R exec"
 export PDSH_SSH_CMD="ssh -q -n %h"
 export PDCP="pdcp -R ssh"
-export PDSH_HADOOP_NODES="$PDSH $MY_HADOOP_NODES $PDSH_SSH_CMD"
-export PDCP_HADOOP_NODES="$PDCP $MY_HADOOP_NODES"
+export PDSH_HADOOP_NODES="$PDSH $MY_HBASE_NODES $PDSH_SSH_CMD"
+export PDCP_HADOOP_NODES="$PDCP $MY_HBASE_NODES"
+export HDFS_NODE=$(echo $HDFS_NODES | head -n1 | awk '{print $1;}')
+export HBASE_NODE=$(echo $HBASE_NODES | head -n1 | awk '{print $1;}')
 #=====================================
 # copy Trafodion trx jar to Cloudera's plugins directory on all nodes
 
@@ -42,18 +44,18 @@ cd $UNTAR_DIR
 if [[ $CDH_5_3_HDP_2_2_SUPPORT == "N" ]]; then
    if [[ $CDH_5_4_SUPPORT == "Y" ]] || [[ $CDH_5_5_SUPPORT == "Y" ]]; then
       if [[ $CDH_VERSION == "5.4" ]]; then
-         hbase_trx_jar="hbase-trx-cdh5_4-${TRAF_VERSION}.jar"
+         hbase_trx_jar="hbase-trx-cdh5_4-*.jar"
       else
-         hbase_trx_jar="hbase-trx-cdh5_5-${TRAF_VERSION}.jar"
+         hbase_trx_jar="hbase-trx-cdh5_5-*.jar"
       fi
    else
-      hbase_trx_jar="hbase-trx-${TRAF_VERSION}.jar" 
+      hbase_trx_jar="hbase-trx-*.jar" 
    fi 
 else
-    hbase_trx_jar="hbase-trx-cdh5_3-${TRAF_VERSION}.jar"
+    hbase_trx_jar="hbase-trx-cdh5_3-*.jar"
 fi
 
-traf_util_jar="trafodion-utility-${TRAF_VERSION}.jar"
+traf_util_jar="trafodion-utility-*.jar"
 
 
 # The permissions the Trafodion build process creates on the hbase-trx jar
@@ -78,7 +80,9 @@ if [ $node_count -ne 1 ]; then
     $PDSH_HADOOP_NODES mkdir -p $LOCAL_WORKDIR 2>/dev/null
     cp $UNTAR_DIR/export/lib/$hbase_trx_jar $LOCAL_WORKDIR
     cp $UNTAR_DIR/export/lib/$traf_util_jar $LOCAL_WORKDIR
+    echo "here"
     $PDCP_HADOOP_NODES $LOCAL_WORKDIR/$hbase_trx_jar $LOCAL_WORKDIR
+    echo "there"
     $PDCP_HADOOP_NODES $LOCAL_WORKDIR/$traf_util_jar $LOCAL_WORKDIR
     $PDSH_HADOOP_NODES sudo cp $LOCAL_WORKDIR/$hbase_trx_jar $HADOOP_PATH
     $PDSH_HADOOP_NODES sudo cp $LOCAL_WORKDIR/$traf_util_jar $HADOOP_PATH
@@ -87,7 +91,7 @@ if [ $node_count -ne 1 ]; then
     $PDSH_HADOOP_NODES rm $LOCAL_WORKDIR/$hbase_trx_jar 2>/dev/null
     $PDSH_HADOOP_NODES rm $LOCAL_WORKDIR/$traf_util_jar 2>/dev/null
 else
-    for node in $HADOOP_NODES
+    for node in $HBASE_NODES
     do
     ssh -q -n $node sudo rm -rf /usr/lib/hbase/lib/hbase-trx* 2>/dev/null
     ssh -q -n $node sudo rm -rf /usr/share/cmf/lib/plugins/hbase-trx* 2>/dev/null
@@ -108,7 +112,7 @@ fi
 #====================================
 #Make sure hbase-trx*jar got copied
 
-for node in $HADOOP_NODES
+for node in $HBASE_NODES
 do
    copiedOver=$(ssh -q -n $node sudo ls $HADOOP_PATH/hbase-trx* | wc -l)
    if [[ $copiedOver -ne "1" ]]; then
@@ -128,14 +132,20 @@ echo "***INFO: $hbase_trx_jar copied correctly! Huzzah."
 rm $LOCAL_WORKDIR/traf_temp_output 2>/dev/null
 
 #Copy hbase-site.xml file
-sudo cp /etc/hbase/conf/hbase-site.xml $HOME
-sudo chown $(whoami).$(whoami) $HOME/hbase-site.xml
-sudo chmod 777 $HOME/hbase-site.xml
+ssh -q -n $HBASE_NODE sudo cp /etc/hbase/conf/hbase-site.xml $HOME
+ssh -q -n $HBASE_NODE sudo chown "$(whoami)"."$(whoami)" "$HOME"/hbase-site.xml
+ssh -q -n $HBASE_NODE sudo chmod 777 $HOME/hbase-site.xml
+
+scp -q $(whoami)@$HBASE_NODE:$HOME/hbase-site.xml $HOME
+if [[ $? -gt 1 ]]; then
+   echo "***ERROR: Unable to find /etc/hbase/conf/hbase-site.xml file on $HBASE_NODE or unable
to copy."
+   exit -1
+fi
 
 sudo cp $HOME/hbase-site.xml $TRAF_WORKDIR
 sudo chown trafodion.trafodion $TRAF_WORKDIR/hbase-site.xml
 
-sudo su $HDFS_USER --command "$HADOOP_BIN_PATH/hadoop fs -mkdir /hbase-staging" 2> $HOME/traf_temp_output
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' "$HADOOP_BIN_PATH"'/hadoop fs -mkdir
/hbase-staging" 2> $HOME/traf_temp_output'
 if [ $? != 0 ]; then
    # ok if directory already exists
    dir_exists=$(grep "File exists" $HOME/traf_temp_output | wc -l)
@@ -146,8 +156,8 @@ if [ $? != 0 ]; then
    fi
 fi
 
-sudo su $HDFS_USER --command "$HADOOP_BIN_PATH/hadoop fs -chown -R $HBASE_USER:$HBASE_GROUP
/hbase-staging"
-sudo su $HDFS_USER --command "$HADOOP_BIN_PATH/hadoop fs -mkdir /bulkload" 2> $HOME/traf_temp_output
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' "$HADOOP_BIN_PATH"'/hadoop fs -chown
-R '"$HBASE_USER"':'"$HBASE_GROUP"' /hbase-staging"'
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' "$HADOOP_BIN_PATH"'/hadoop fs -mkdir
/bulkload" 2> $HOME/traf_temp_output'
 if [ $? != 0 ]; then
    # ok if directory already exists
    dir_exists=$(grep "File exists" $HOME/traf_temp_output | wc -l)
@@ -157,10 +167,10 @@ if [ $? != 0 ]; then
       exit -1
    fi
 fi
-sudo su $HDFS_USER --command "$HADOOP_BIN_PATH/hadoop fs -chown -R $TRAF_USER:trafodion /bulkload"
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' "$HADOOP_BIN_PATH"'/hadoop fs -chown
-R $TRAF_USER:trafodion /bulkload"'
 
 # Create lobs directory
-sudo su $HDFS_USER --command "$HADOOP_BIN_PATH/hadoop fs -mkdir /lobs" 2> $HOME/traf_temp_output
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "'"$HADOOP_BIN_PATH"'/hadoop fs -mkdir
/lobs" 2> $HOME/traf_temp_output'
 if [ $? != 0 ]; then
    # ok if directory already exists
    dir_exists=$(grep "File exists" $HOME/traf_temp_output | wc -l)
@@ -170,11 +180,11 @@ if [ $? != 0 ]; then
       exit -1
    fi
 fi
-sudo su $HDFS_USER --command "$HADOOP_BIN_PATH/hadoop fs -chown -R $TRAF_USER:trafodion /lobs"
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "'"$HADOOP_BIN_PATH"'/hadoop fs -chown
-R $TRAF_USER:trafodion /lobs"'
 
 
 #Create Backup directory 
-sudo su $HDFS_USER --command "$HADOOP_BIN_PATH/hadoop fs -mkdir /trafodion_backups" 2>
$HOME/traf_temp_output
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' "$HADOOP_BIN_PATH"'/hadoop fs -mkdir
/trafodion_backups" 2> $HOME/traf_temp_output'
 if [ $? != 0 ]; then
    # ok if directory already exists
    dir_exists=$(grep "File exists" $HOME/traf_temp_output | wc -l)
@@ -184,9 +194,9 @@ if [ $? != 0 ]; then
       exit -1
    fi
 fi
-sudo su $HDFS_USER --command "$HADOOP_BIN_PATH/hadoop fs -chown -R $TRAF_USER:trafodion /trafodion_backups"
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "'"$HADOOP_BIN_PATH"'/hadoop fs -chown
-R $TRAF_USER:trafodion /trafodion_backups"'
 
-sudo su $HDFS_USER --command "$HADOOP_BIN_PATH/hadoop fs -chmod 777 /trafodion_backups"
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' "$HADOOP_BIN_PATH"'/hadoop fs -chmod
777 /trafodion_backups"'
 #=====================================
 # Modify hadoop settings as needed by Trafodion
 
@@ -358,33 +368,33 @@ echo "***INFO: Hadoop restart completed successfully"
 
 # wait to make sure HDFS is fully restarted and out of safemode
 echo "***INFO: waiting for HDFS to exit safemode"
-sudo su hdfs --command "$HADOOP_BIN_PATH/hdfs dfsadmin -safemode wait"
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs dfsadmin -safemode
wait"'
 
 #====================================================
 # NOTE: These command must be done AFTER acls are 
 #       enabled and HDFS has been restarted
 echo "***INFO: Setting HDFS ACLs for snapshot scan support"
-sudo su hdfs --command "$HADOOP_BIN_PATH/hdfs dfs -mkdir -p /hbase/archive"
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs dfs -mkdir -p /hbase/archive"'
 if [ $? != 0 ]; then
    echo "***ERROR: ($HADOOP_BIN_PATH/hdfs dfs -mkdir -p /hbase/archive) command failed"
    exit -1
 fi
-sudo su hdfs --command "$HADOOP_BIN_PATH/hdfs dfs -chown hbase:hbase /hbase/archive"
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs dfs -chown hbase:hbase
/hbase/archive"'
 if [ $? != 0 ]; then
    echo "***ERROR: ($HADOOP_BIN_PATH/hdfs dfs -chown hbase:hbase /hbase/archive) command
failed"
    exit -1
 fi
-sudo su hdfs --command "$HADOOP_BIN_PATH/hdfs dfs -setfacl -R -m user:$TRAF_USER:rwx /hbase/archive"
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs dfs -setfacl -R
-m user:'"$TRAF_USER"':rwx /hbase/archive"'
 if [ $? != 0 ]; then
    echo "***ERROR: ($HADOOP_BIN_PATH/hdfs dfs -setfacl -R -m user:$TRAF_USER:rwx /hbase/archive)
command failed"
    exit -1
 fi
-sudo su hdfs --command "$HADOOP_BIN_PATH/hdfs dfs -setfacl -R -m default:user:$TRAF_USER:rwx
/hbase/archive"
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs dfs -setfacl -R
-m default:user:'"$TRAF_USER"':rwx /hbase/archive"'
 if [ $? != 0 ]; then
    echo "***ERROR: ($HADOOP_BIN_PATH/hdfs dfs -setfacl -R -m default:user:$TRAF_USER:rwx
/hbase/archive) command failed"
    exit -1
 fi
-sudo su hdfs --command "$HADOOP_BIN_PATH/hdfs dfs -setfacl -R -m mask::rwx /hbase/archive"
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs dfs -setfacl -R
-m mask::rwx /hbase/archive"'
 if [ $? != 0 ]; then
    echo "***ERROR: ($HADOOP_BIN_PATH/hdfs dfs -setfacl -R -m mask::rwx /hbase/archive) command
failed"
    exit -1
@@ -404,7 +414,7 @@ if [ $node_count -ne 1 ]; then
    cp $TRAF_CONFIG $LOCAL_WORKDIR
    $TRAF_PDCP $LOCAL_WORKDIR/$TRAF_CONFIG_FILE $HOME
    $TRAF_PDSH sudo mkdir -p $TRAF_CONFIG_DIR
-   $TRAF_PDSH sudo cp $HOME/$TRAF_CONFIG_FILE $TRAF_CONFIG_DIR
+   $TRAF_PDSH sudo cp -rf$HOME/$TRAF_CONFIG_FILE $TRAF_CONFIG_DIR
    $TRAF_PDSH sudo chmod 777 $TRAF_CONFIG
 fi
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/93728159/install/installer/traf_config_check
----------------------------------------------------------------------
diff --git a/install/installer/traf_config_check b/install/installer/traf_config_check
index a1ed2e9..85acb03 100755
--- a/install/installer/traf_config_check
+++ b/install/installer/traf_config_check
@@ -426,15 +426,10 @@ else
          fi
        done
     fi
-    hadoopPath="$HADOOP_PATH"
-    hadoopBinPath="$HADOOP_BIN_PATH"
-
-    export PATH="$PATH:$JAVA_HOME/bin"
+    export PATH=$PATH:$JAVA_HOME/bin
     sudo chmod 777 $TRAF_CONFIG
     sed -i '/PATH\=/d' $TRAF_CONFIG
     echo "export PATH=\"$PATH\"" >>$TRAF_CONFIG
-    echo "export HADOOP_BIN_PATH=\"$hadoopBinPath\"" >>$TRAF_CONFIG
-    echo "export HADOOP_PATH=\"$hadoopPath\"" >>$TRAF_CONFIG
     sudo chmod 777 $TRAF_CONFIG
     source $TRAF_CONFIG
 fi

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/93728159/install/installer/traf_createPasswordLessSSH
----------------------------------------------------------------------
diff --git a/install/installer/traf_createPasswordLessSSH b/install/installer/traf_createPasswordLessSSH
index da672a4..4391d6f 100755
--- a/install/installer/traf_createPasswordLessSSH
+++ b/install/installer/traf_createPasswordLessSSH
@@ -43,8 +43,8 @@ done
 
 for node in $NODES
 do
-    sudo su $TRAF_USER --command "ssh -q -oStrictHostKeyChecking=no $node hostname"
-    sudo su $TRAF_USER --command "ssh -q -oStrictHostKeyChecking=no localhost hostname"
+    sudo su $TRAF_USER --command "ssh -q -oStrictHostKeyChecking=no $node hostname" 2>&1
> /dev/null
+    sudo su $TRAF_USER --command "ssh -q -oStrictHostKeyChecking=no localhost hostname" 2>&1
> /dev/null
     if [ $? -ne 0 ]; then
         echo "***ERROR: Unable to ssh to node $node"
         exit -1
@@ -52,7 +52,7 @@ do
 
     for nodes in $NODES
     do
-       sudo su $TRAF_USER --command "ssh -q -oStrictHostKeyChecking=no $node ssh -q -oStrictHostKeyChecking=no
$nodes hostname"
+       sudo su $TRAF_USER --command "ssh -q -oStrictHostKeyChecking=no $node ssh -q -oStrictHostKeyChecking=no
$nodes hostname" 2>&1 > /dev/null
        if [ $? -ne 0 ]; then
           echo "***ERROR: Unable to ssh to node $node"
           exit -1

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/93728159/install/installer/traf_getHadoopNodes
----------------------------------------------------------------------
diff --git a/install/installer/traf_getHadoopNodes b/install/installer/traf_getHadoopNodes
index e397d4a..88cdfce 100755
--- a/install/installer/traf_getHadoopNodes
+++ b/install/installer/traf_getHadoopNodes
@@ -31,6 +31,7 @@ if [ $HADOOP_TYPE == "cloudera" ]; then
    curlRC=$?
    numberHadoopNodes=$(grep -r "hostname" tempFile | wc -l)
    grep -r "hostname" tempFile > tempFile2
+   grep -r "hostId" tempFile > tempHadoop
 
    if [ -d /opt/cloudera/parcels/CDH ]; then
       export HADOOP_PATH="/opt/cloudera/parcels/CDH/lib/hbase/lib"
@@ -42,14 +43,51 @@ if [ $HADOOP_TYPE == "cloudera" ]; then
   echo "***INFO: HADOOP_PATH=$HADOOP_PATH"
   echo "***INFO: HADOOP_BIN_PATH=$HADOOP_BIN_PATH"
 
+  HADOOP_IDS=""
+  HBASE_NODES=""
+  MY_HBASE_NODES=""
+  HDFS_NODES=""
+  MY_HDFS_NODES=""
+  while read line
+  do
+     hostName=$(echo $line | awk '{print $3}' | sed 's/\"//g' | sed 's/\..*//' | sed 's/\,//g')
+     HADOOP_IDS="$HADOOP_IDS $hostName"
+   done < tempHadoop
+
+   for id in $HADOOP_IDS
+   do
+      hasHBase=$(curl -k -su $ADMIN:$PASSWORD $URL/api/v10/hosts/$id | grep "serviceName"
| grep hbase | wc -l)
+      if [[ "$hasHBase" -ge "1" ]]; then
+         name=$(curl -k -su $ADMIN:$PASSWORD $URL/api/v10/hosts/$id | grep "hostname")
+         hostName=$(echo $name | awk '{print $3}' | sed 's/\"//g' | sed 's/\..*//' | sed
's/\,//g')
+         HBASE_NODES="$HBASE_NODES $hostName"
+         MY_HBASE_NODES="$MY_HBASE_NODES -w $hostName"
+      fi
+      
+      hasHDFS=$(curl -k -su $ADMIN:$PASSWORD $URL/api/v10/hosts/$id | grep "serviceName"
| grep hdfs | wc -l)
+      if [[ "$hasHDFS" -ge "1" ]]; then
+         name=$(curl -k -su $ADMIN:$PASSWORD $URL/api/v10/hosts/$id | grep "hostname")
+         hostName=$(echo $name | awk '{print $3}' | sed 's/\"//g' | sed 's/\..*//' | sed
's/\,//g')
+         HDFS_NODES="$HDFS_NODES $hostName"
+         MY_HDFS_NODES="$MY_HDFS_NODES -w $hostName"
+      fi 
+   done
 fi
 
 if [[ $HADOOP_TYPE == "hortonworks" ]]; then
    curl -k -su $ADMIN:$PASSWORD $URL/api/v1/clusters/$CLUSTER_NAME/hosts > tempFile
+   curl -k -su $ADMIN:$PASSWORD $URL/api/v1/clusters/$CLUSTER_NAME/services/HDFS > tempFileHDFS
+   curl -k -su $ADMIN:$PASSWORD $URL/api/v1/clusters/$CLUSTER_NAME/services/HBASE > tempFileHBase
    curlRC=$?
    numberHadoopNodes=$(grep -r "host_name" tempFile | wc -l)
    grep -r "host_name" tempFile > tempFile2
+      
+   numberHadoopNodes=$(grep -r "host_name" tempFileHDFS | wc -l)
+   grep -r "host_name" tempFileHDFS > tempFileHDFS2
 
+   numberHadoopNodes=$(grep -r "host_name" tempFileHBase | wc -l)
+   grep -r "host_name" tempFileHBase > tempFileHBase2
+ 
    if [ -d /usr/lib/hbase/lib ]; then
       HADOOP_PATH="/usr/lib/hbase/lib"
    else
@@ -57,6 +95,41 @@ if [[ $HADOOP_TYPE == "hortonworks" ]]; then
    fi
    echo "***INFO: HADOOP_PATH=$HADOOP_PATH"
 
+   HDFS_NODES=""
+   MY_HDFS_NODES=""
+   while read line
+   do
+      hostName=$(echo $line | awk '{print $3}' | sed 's/\"//g' | sed 's/\..*//' | sed 's/\,//g')
+      if [[ "$HDFS_NODES" != *"$hostName"* ]] && [[ "$hostName" != "null" ]]; then
+         HDFS_NODES="$HDFS_NODES $hostName"
+         MY_HDFS_NODES="$MY_HDFS_NODES -w $hostName"
+      fi
+   done < tempFileHDFS2
+
+   if [[ -z $HDFS_NODES ]]; then
+      echo "***ERROR: List of HDFS nodes not found."
+      echo "***ERROR: Check that $HADOOP_TYPE is up and running."
+      exit -1
+   fi
+
+   echo "***INFO: $HADOOP_TYPE list of HDFS nodes: $HDFS_NODES"
+
+   HBASE_NODES=""
+   MY_HBASE_NODES=""
+   while read line
+   do
+      hostName=$(echo $line | awk '{print $3}' | sed 's/\"//g' | sed 's/\..*//' | sed 's/\,//g')
+      if [[ "$HBASE_NODES" != *"$hostName"* ]] && [[ "$hostName" != "null" ]]; then
+         HBASE_NODES="$HBASE_NODES $hostName"
+         MY_HBASE_NODES="$MY_HBASE_NODES -w $hostName"
+      fi
+   done < tempFileHBase2
+
+   if [[ -z $HBASE_NODES ]]; then
+      echo "***ERROR: List of HBase nodes not found."
+      echo "***ERROR: Check that $HADOOP_TYPE is up and running."
+      exit -1
+   fi
 fi
 
 if [ $curlRC != 0 ]; then
@@ -95,6 +168,7 @@ if [[ -z $HADOOP_NODES ]]; then
 fi
 
 echo "***INFO: $HADOOP_TYPE list of nodes: $HADOOP_NODES"
+
 hadoop_node_count=$(echo $HADOOP_NODES | wc -w)
 
 sudo chmod 777 $TRAF_CONFIG
@@ -104,6 +178,18 @@ sudo chmod 777 $TRAF_CONFIG
 sed -i '/MY_HADOOP_NODES\=/d' $TRAF_CONFIG
 echo "export MY_HADOOP_NODES=\"$MY_HADOOP_NODES\"" >> $TRAF_CONFIG
 sudo chmod 777 $TRAF_CONFIG
+sed -i '/HDFS_NODES\=/d' $TRAF_CONFIG
+echo "export HDFS_NODES=\"$HDFS_NODES\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+sed -i '/MY_HDFS_NODES\=/d' $TRAF_CONFIG
+echo "export MY_HDFS_NODES=\"$MY_HDFS_NODES\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+sed -i '/HBASE_NODES\=/d' $TRAF_CONFIG
+echo "export HBASE_NODES=\"$HBASE_NODES\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+sed -i '/MY_HBASE_NODES\=/d' $TRAF_CONFIG
+echo "export MY_HBASE_NODES=\"$MY_HBASE_NODES\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
 sed -i '/HADOOP_PATH\=/d' $TRAF_CONFIG
 echo "export HADOOP_PATH=\"$HADOOP_PATH\"" >> $TRAF_CONFIG
 sudo chmod 777 $TRAF_CONFIG
@@ -143,3 +229,5 @@ if [[ $error == "1" ]]; then
    exit -1
 fi
 
+
+rm -rf tempFile*

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/93728159/install/installer/traf_hortonworks_mods98
----------------------------------------------------------------------
diff --git a/install/installer/traf_hortonworks_mods98 b/install/installer/traf_hortonworks_mods98
index cffb3be..df8dba8 100755
--- a/install/installer/traf_hortonworks_mods98
+++ b/install/installer/traf_hortonworks_mods98
@@ -33,8 +33,8 @@ export PDSH="pdsh -R exec"
 export PDSH_SSH_CMD="ssh -q -n %h"
 export PDCP="pdcp -R ssh"
 
-export PDSH_HADOOP_NODES="$PDSH $MY_HADOOP_NODES $PDSH_SSH_CMD"
-export PDCP_HADOOP_NODES="$PDCP $MY_HADOOP_NODES"
+export PDSH_HADOOP_NODES="$PDSH $MY_HBASE_NODES $PDSH_SSH_CMD"
+export PDCP_HADOOP_NODES="$PDCP $MY_HBASE_NODES"
 #=====================================
 # copy Trafodion hbase trx jar to /usr/lib/hbase/lib
 
@@ -42,22 +42,26 @@ cd $UNTAR_DIR
 
 PORT=`echo $URL | sed 's/.*://'`
 AMBARI_HOST=$(echo $URL | sed 's@.*://@@' | sed 's@:.*@@')
+HDFS_NODE=$(echo $HDFS_NODES | head -n1 | awk '{print $1;}')
+HBASE_NODE=$(echo $HBASE_NODES | head -n1 | awk '{print $1;}')
 echo "export AMBARI_HOST=\"$AMBARI_HOST\"" >> $TRAF_CONFIG
+echo "export HDFS_NODE=\"$HDFS_NODE\"" >> $TRAF_CONFIG
+echo "export HBASE_NODE=\"$HBASE_NODE\"" >> $TRAF_CONFIG
 sudo chmod 777 $TRAF_CONFIG
 source $TRAF_CONFIG
 
 #determine java version and choose corresponding jar files
 if [[ $CDH_5_3_HDP_2_2_SUPPORT == "N" ]]; then
    if [[ $HDP_2_3_SUPPORT == "Y" ]]; then
-      hbase_trx_jar="hbase-trx-hdp2_3-${TRAF_VERSION}.jar"
+      hbase_trx_jar="hbase-trx-hdp2_3-*.jar"
    else
-      hbase_trx_jar="hbase-trx-hdp2_1-${TRAF_VERSION}.jar"
+      hbase_trx_jar="hbase-trx-hdp2_1-*.jar"
    fi
 else
-   hbase_trx_jar="hbase-trx-hdp2_2-${TRAF_VERSION}.jar"
+   hbase_trx_jar="hbase-trx-hdp2_2-*.jar"
 fi
 
-traf_util_jar="trafodion-utility-${TRAF_VERSION}.jar"
+traf_util_jar="trafodion-utility-*.jar"
 
 
 # The permissions the Trafodion build process creates on the hbase-trx jar
@@ -93,7 +97,7 @@ if [ $node_count -ne 1 ]; then
     $PDSH_HADOOP_NODES rm $LOCAL_WORKDIR/$hbase_trx_jar 2>/dev/null
     $PDSH_HADOOP_NODES rm $LOCAL_WORKDIR/$traf_util_jar 2>/dev/null
 else
-    for node in $HADOOP_NODES
+    for node in $HBASE_NODES
     do 
     ssh -q -n $node sudo rm -rf $HADOOP_PATH/hbase-trx* 2>/dev/null
     ssh -q -n $node sudo rm -rf /usr/lib/hbase/lib/hbase-trx* 2>/dev/null
@@ -114,7 +118,7 @@ fi
 #=======================================
 #Check that HBase-trx copied to all nodes
 
-for node in $HADOOP_NODES
+for node in $HBASE_NODES
 do
    copiedOver=$(ssh -q -n $node sudo ls $HADOOP_PATH/hbase-trx* | wc -l)
    if [[ $copiedOver -ne "1" ]]; then
@@ -130,13 +134,13 @@ echo "***INFO: $hbase_trx_jar copied correctly! Huzzah."
 
 
 #Copy hbase-site.xml file
-ssh -q -n $AMBARI_HOST sudo cp /etc/hbase/conf/hbase-site.xml $HOME
-ssh -q -n $AMBARI_HOST sudo chown $(whoami).$(whoami) $HOME/hbase-site.xml
-ssh -q -n $AMBARI_HOST sudo chmod 777 $HOME/hbase-site.xml
+ssh -q -n $HBASE_NODE sudo cp /etc/hbase/conf/hbase-site.xml $HOME
+ssh -q -n $HBASE_NODE sudo chown $(whoami).$(whoami) $HOME/hbase-site.xml
+ssh -q -n $HBASE_NODE sudo chmod 777 $HOME/hbase-site.xml
 
-scp -q $(whoami)@$AMBARI_HOST:$HOME/hbase-site.xml $HOME
+scp -q $(whoami)@$HBASE_NODE:$HOME/hbase-site.xml $HOME
 if [[ $? -gt 1 ]]; then
-   echo "***ERROR: Unable to find /etc/hbase/conf/hbase-site.xml file on $AMBARI_HOST or
unable to copy."
+   echo "***ERROR: Unable to find /etc/hbase/conf/hbase-site.xml file on $HBASE_NODE or unable
to copy."
    exit -1
 fi
 sudo cp $HOME/hbase-site.xml $TRAF_WORKDIR
@@ -146,56 +150,56 @@ sudo chown trafodion.trafodion $TRAF_WORKDIR/hbase-site.xml
 # create new directories for bulkload and lobs if not already there
 rm $LOCAL_WORKDIR/traf_temp_output 2>/dev/null
 
-ssh -q -n $AMBARI_HOST 'sudo su' "$HDFS_USER" '--command "hadoop fs -mkdir /hbase-staging"
2> $HOME/traf_temp_output'
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "hadoop fs -mkdir /hbase-staging"
2> $HOME/traf_temp_output'
 if [ $? != 0 ]; then
    # ok if directory already exists
-   dir_exists=$(ssh -q -n $AMBARI_HOST 'grep "File exists" $HOME/traf_temp_output | wc -l')
+   dir_exists=$(ssh -q -n $HDFS_NODE 'grep "File exists" $HOME/traf_temp_output | wc -l')
    if [ $dir_exists -eq 0 ]; then
       echo "***ERROR: 'hadoop fs -mkdir /hbase-staging' command failed"
-      echo "***ERROR: $(ssh -q -n $AMBARI_HOST cat $HOME/traf_temp_output)"
+      echo "***ERROR: $(ssh -q -n $HDFS_NODE cat $HOME/traf_temp_output)"
       exit -1
    fi
 fi
-ssh -q -n $AMBARI_HOST 'sudo su' "$HDFS_USER" '--command "hadoop fs -chown -R' "$HBASE_USER"':'"$HBASE_GROUP"
'/hbase-staging"'
-ssh -q -n $AMBARI_HOST 'sudo su' "$HDFS_USER" '--command "hadoop fs -mkdir /bulkload" 2>
$HOME/traf_temp_output'
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "hadoop fs -chown -R' "$HBASE_USER"':'"$HBASE_GROUP"
'/hbase-staging"'
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "hadoop fs -mkdir /bulkload" 2>
$HOME/traf_temp_output'
 if [ $? != 0 ]; then
    # ok if directory already exists
-   dir_exists=$(ssh -q -n $AMBARI_HOST 'grep "File exists" $HOME/traf_temp_output | wc -l')
+   dir_exists=$(ssh -q -n $HDFS_NODE 'grep "File exists" $HOME/traf_temp_output | wc -l')
    if [ $dir_exists -eq 0 ]; then
       echo "***ERROR: 'hadoop fs -mkdir /bulkload' command failed"
-      echo "***ERROR: $(ssh -q -n $AMBARI_HOST cat $HOME/traf_temp_output)"
+      echo "***ERROR: $(ssh -q -n $HDFS_NODE cat $HOME/traf_temp_output)"
       exit -1
    fi
 fi
-ssh -q -n $AMBARI_HOST 'sudo su' "$HDFS_USER" '--command "hadoop fs -chown -R' "$TRAF_USER"':trafodion
/bulkload"'
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "hadoop fs -chown -R' "$TRAF_USER"':trafodion
/bulkload"'
 
 # Create lobs directory
-ssh -q -n $AMBARI_HOST 'sudo su hdfs --command "hadoop fs -mkdir /lobs" 2> $HOME/traf_temp_output'
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "hadoop fs -mkdir /lobs" 2> $HOME/traf_temp_output'
 if [ $? != 0 ]; then
    # ok if directory already exists
-   dir_exists=$(ssh -q -n $AMBARI_HOST 'grep "File exists" $HOME/traf_temp_output | wc -l')
+   dir_exists=$(ssh -q -n $HDFS_NODE 'grep "File exists" $HOME/traf_temp_output | wc -l')
    if [ $dir_exists -eq 0 ]; then
       echo "***ERROR: 'hadoop fs -mkdir /lobs' command failed"
-      echo "***ERROR: $(ssh -q -n $AMBARI_HOST cat $HOME/traf_temp_output)"
+      echo "***ERROR: $(ssh -q -n $HDFS_NODE cat $HOME/traf_temp_output)"
       exit -1
    fi
 fi
-ssh -q -n $AMBARI_HOST 'sudo su hdfs --command "hadoop fs -chown -R' "$TRAF_USER"':trafodion
/lobs"'
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "hadoop fs -chown -R' "$TRAF_USER"':trafodion
/lobs"'
 
-ssh -q -n $AMBARI_HOST 'sudo su' "$HDFS_USER" '--command "hadoop fs -mkdir /trafodion_backups"
2> $HOME/traf_temp_output'
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "hadoop fs -mkdir /trafodion_backups"
2> $HOME/traf_temp_output'
 if [ $? != 0 ]; then
    # ok if directory already exists
-   dir_exists=$(ssh -q -n $AMBARI_HOST 'grep "File exists" $HOME/traf_temp_output | wc -l')
+   dir_exists=$(ssh -q -n $HDFS_NODE 'grep "File exists" $HOME/traf_temp_output | wc -l')
    if [ $dir_exists -eq 0 ]; then
       echo "***ERROR: 'hadoop fs -mkdir /trafodion_backups' command failed"
-      echo "***ERROR: $(ssh -q -n $AMBARI_HOST cat $HOME/traf_temp_output)"
+      echo "***ERROR: $(ssh -q -n $HDFS_NODE cat $HOME/traf_temp_output)"
       exit -1
    fi
 fi
-ssh -q -n $AMBARI_HOST 'sudo su' "$HDFS_USER" '--command "hadoop fs -chown -R' "$TRAF_USER"':trafodion
/trafodion_backups"'
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "hadoop fs -chown -R' "$TRAF_USER"':trafodion
/trafodion_backups"'
 
 
-ssh -q -n $AMBARI_HOST 'rm -rf $HOME/traf_temp_output'
+ssh -q -n $HDFS_NODE 'rm -rf $HOME/traf_temp_output'
 #=====================================
 # change the hbase configuration using Ambari's script
 
@@ -539,59 +543,32 @@ echo "***INFO: HBase restart completed"
 # NOTE: These command must be done AFTER acls are 
 #       enabled and HDFS has been restarted
 echo "***INFO: Setting HDFS ACLs for snapshot scan support"
-ssh -q -n $AMBARI_HOST 'sudo su hdfs --command "hdfs dfs -mkdir -p /apps/hbase/data/archive"'
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "hdfs dfs -mkdir -p /apps/hbase/data/archive"'
 if [ $? != 0 ]; then
    echo "***ERROR: (hdfs dfs -mkdir -p /apps/hbase/data/archive) command failed"
    exit -1
 fi
-ssh -q -n $AMBARI_HOST 'sudo su hdfs --command "hdfs dfs -chown hbase:hdfs /apps/hbase/data/archive"'
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "hdfs dfs -chown hbase:hdfs /apps/hbase/data/archive"'
 if [ $? != 0 ]; then
    echo "***ERROR: (hdfs dfs -chown hbase:hdfs /apps/hbase/data/archive) command failed"
    exit -1
 fi
-ssh -q -n $AMBARI_HOST 'sudo su hdfs --command "hdfs dfs -setfacl -R -m user:'"$TRAF_USER"':rwx
/apps/hbase/data/archive"'
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "hdfs dfs -setfacl -R -m user:'"$TRAF_USER"':rwx
/apps/hbase/data/archive"'
 if [ $? != 0 ]; then
    echo "***ERROR: (hdfs dfs -setfacl -R -m mask::rwx /apps/hbase/data/archive) command failed"
    exit -1
 fi
-ssh -q -n $AMBARI_HOST 'sudo su hdfs --command "hdfs dfs -setfacl -R -m default:user:'"$TRAF_USER"':rwx
/apps/hbase/data/archive"'
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "hdfs dfs -setfacl -R -m default:user:'"$TRAF_USER"':rwx
/apps/hbase/data/archive"'
 if [ $? != 0 ]; then
    echo "***ERROR: (hdfs dfs -setfacl -R -m mask::rwx /apps/hbase/data/archive) command failed"
    exit -1
 fi
-ssh -q -n $AMBARI_HOST 'sudo su hdfs --command "hdfs dfs -setfacl -R -m mask::rwx /apps/hbase/data/archive"'
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "hdfs dfs -setfacl -R -m mask::rwx /apps/hbase/data/archive"'
 if [ $? != 0 ]; then
    echo "***ERROR: (hdfs dfs -setfacl -R -m mask::rwx /apps/hbase/data/archive) command failed"
    exit -1
 fi
 
-ssh -q -n $AMBARI_HOST 'sudo su hdfs --command "hdfs dfs -mkdir -p /apps/hbase/data/archive/data"'
-if [ $? != 0 ]; then
-   echo "***ERROR: (hdfs dfs -mkdir -p /apps/hbase/data/archive/data) command failed"
-   exit -1
-fi
-
-ssh -q -n $AMBARI_HOST 'sudo su hdfs --command "hdfs dfs -chown hbase:hdfs /apps/hbase/data/archive/data"'
-if [ $? != 0 ]; then
-   echo "***ERROR: (hdfs dfs -chown hbase:hdfs /apps/hbase/data/archive/data) command failed"
-   exit -1
-fi
-
-ssh -q -n $AMBARI_HOST 'sudo su hdfs --command "hdfs dfs -mkdir -p /apps/hbase/data/archive/data/default"'
-if [ $? != 0 ]; then
-   echo "***ERROR: (hdfs dfs -mkdir -p /apps/hbase/data/archive/data/default) command failed"
-   exit -1
-fi
-
-ssh -q -n $AMBARI_HOST 'sudo su hdfs --command "hdfs dfs -chown hbase:hdfs /apps/hbase/data/archive/data/default"'
-if [ $? != 0 ]; then
-   echo "***ERROR: (hdfs dfs -chown hbase:hdfs /apps/hbase/data/archive/data/default) command
failed"
-   exit -1
-fi
-
-
-
-
 # clean up files generated by Ambari's config.sh script
 ssh -q -n $AMBARI_HOST 'rm $HOME/doSet_version*'
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/93728159/install/installer/trafodion_apache_hadoop_install
----------------------------------------------------------------------
diff --git a/install/installer/trafodion_apache_hadoop_install b/install/installer/trafodion_apache_hadoop_install
index dc5be0a..ca36122 100755
--- a/install/installer/trafodion_apache_hadoop_install
+++ b/install/installer/trafodion_apache_hadoop_install
@@ -333,7 +333,7 @@ if [ ! -z $TRAF_PACKAGE ]; then
    # untar the package
    sudo tar -xzf $TRAF_PACKAGE --directory=$UNTAR_DIR
 
-   build_file=$(ls $UNTAR_DIR | grep "trafodion_server")
+   build_file=$(ls $UNTAR_DIR | grep "trafodion_.*server")
    if [[ -z $build_file ]]; then
       echo "***ERROR: trafodion_server-*.tgz could not be found in $UNTAR_DIR"
       exit -1
@@ -454,7 +454,7 @@ sudo rm -rf hbaseVersion.txt
 trafodionFullName=$(basename $TRAF_BUILD)
 echo "***DEBUG: trafodionFullName=$trafodionFullName"
 #Debug version
-trafodionVersion=$(echo $trafodionFullName | sed 's/trafodion_server-//' | sed 's/-debug//'
| sed 's/.tgz//')
+trafodionVersion=$(echo $trafodionFullName | sed 's/trafodion_.*server-//' | sed 's/-debug//'
| sed 's/.tgz//')
 
 echo "***INFO: Trafodion version = $trafodionVersion"
 #==============================================
@@ -474,12 +474,6 @@ if [ ${PIPESTATUS[0]} != 0 ]; then
     exit -1
 fi
 
-# parse the version string from the trafodion_server-n.n.n.jar filename
-traf_version=$(sudo ls $UNTAR_DIR | \
-               grep --max-count=1 trafodion_server | \
-               sed -e 's@trafodion_server-\([0-9*].*\).tgz@\1@')
-TRAF_VERSION=$(echo $traf_version | sed -e 's@\([0-9*].*\)-debug@\1@')
-
 # Detect java version
 
 for node in $HADOOP_NODES
@@ -527,8 +521,6 @@ else
 fi
 
 sudo chmod 777 $TRAF_CONFIG
-sed -i '/TRAF_VERSION\=/d' $TRAF_CONFIG
-echo "export TRAF_VERSION=\"$TRAF_VERSION\"" >> $TRAF_CONFIG
 sed -i '/JAVA_VERSION\=/d' $TRAF_CONFIG
 echo "export JAVA_VERSION=\"$JAVA_VERSION\"" >> $TRAF_CONFIG
 sudo chmod 777 $TRAF_CONFIG

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/93728159/install/installer/trafodion_install
----------------------------------------------------------------------
diff --git a/install/installer/trafodion_install b/install/installer/trafodion_install
index 7fc13dd..2e25bad 100755
--- a/install/installer/trafodion_install
+++ b/install/installer/trafodion_install
@@ -557,18 +557,6 @@ if [ ${PIPESTATUS[0]} != 0 ]; then
     exit -1
 fi
 
-# parse the version string from the trafodion_server-n.n.n.jar filename
-traf_version=$(sudo ls $UNTAR_DIR | \
-               grep --max-count=1 trafodion_.*server | \
-               sed -e 's@trafodion_.*server-\([0-9*].*\).tgz@\1@')
-TRAF_VERSION=$(echo $traf_version | sed -e 's@\([0-9*].*\)-debug@\1@')
-
-# Detect java version
-
-sudo chmod 777 $TRAF_CONFIG
-sed -i '/TRAF_VERSION\=/d' $TRAF_CONFIG
-echo "export TRAF_VERSION=\"$TRAF_VERSION\"" >> $TRAF_CONFIG
-sudo chmod 777 $TRAF_CONFIG
 #==============================================
 # Run trafodion_setup
 echo

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/93728159/install/installer/trafodion_uninstaller
----------------------------------------------------------------------
diff --git a/install/installer/trafodion_uninstaller b/install/installer/trafodion_uninstaller
index 0924dcd..e9e27b2 100755
--- a/install/installer/trafodion_uninstaller
+++ b/install/installer/trafodion_uninstaller
@@ -48,7 +48,7 @@ fi
 #=========================================
 # define defaults
 TRAF_GROUP="trafodion"
-HBASE_TRX="hbase-trx*"
+#HBASE_TRX="hbase-trx*"
 
 UNINSTALL_ALL="N"
 UNINSTALL_DIR=""
@@ -104,13 +104,13 @@ echo "***INFO: removing $HBASE_TRX from Hadoop directories"
 if [ $all_node_count -eq 1 ]; then
    sudo rm /etc/security/limits.d/trafodion.conf 2>/dev/null
    sudo rm /usr/share/cmf/lib/plugins/$HBASE_TRX 2>/dev/null
-   sudo rm /usr/lib/hbase/lib/$HBASE_TRX 2>/dev/null
-   sudo rm /usr/hdp/current/hbase-regionserver/lib/$HBASE_TRX 2>/dev/null
+   #sudo rm /usr/lib/hbase/lib/$HBASE_TRX 2>/dev/null
+   #sudo rm /usr/hdp/current/hbase-regionserver/lib/$HBASE_TRX 2>/dev/null
 else
    $TRAF_PDSH "sudo rm /etc/security/limits.d/trafodion.conf 2>/dev/null"
    $TRAF_PDSH "sudo rm /usr/share/cmf/lib/plugins/$HBASE_TRX 2>/dev/null"
-   $TRAF_PDSH "sudo rm /usr/lib/hbase/lib/$HBASE_TRX 2>/dev/null"
-   $TRAF_PDSH "sudo rm /usr/hdp/current/hbase-regionserver/lib/$HBASE_TRX 2>/dev/null"

+   #$TRAF_PDSH "sudo rm /usr/lib/hbase/lib/$HBASE_TRX 2>/dev/null"
+   #$TRAF_PDSH "sudo rm /usr/hdp/current/hbase-regionserver/lib/$HBASE_TRX 2>/dev/null"

 fi
 
 echo "***INFO remove the Trafodion userid and group"


Mime
View raw message