hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From w...@apache.org
Subject svn commit: r1619018 - in /hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/bin/ src/main/java/ src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ src/main/java/org/apache/hadoop/hdfs/server/datanode/ src/test/...
Date Wed, 20 Aug 2014 01:22:45 GMT
Author: wang
Date: Wed Aug 20 01:22:42 2014
New Revision: 1619018

URL: http://svn.apache.org/r1619018
Log:
Merge from trunk to branch.

Added:
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/WhitelistBasedTrustedChannelResolver.java
      - copied unchanged from r1618997, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/WhitelistBasedTrustedChannelResolver.java
Modified:
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/distribute-exclude.sh
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/refresh-namenodes.sh
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java

Propchange: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1618694-1618997

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1619018&r1=1619017&r2=1619018&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Aug 20 01:22:42 2014
@@ -402,6 +402,9 @@ Release 2.6.0 - UNRELEASED
     HDFS-6850. Move NFS out of order write unit tests into TestWrites class.
     (Zhe Zhang via atm)
 
+    HDFS-6188. An ip whitelist based implementation of TrustedChannelResolver.
+    (Benoy Antony via Arpit Agarwal)
+
   OPTIMIZATIONS
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)
@@ -514,6 +517,9 @@ Release 2.6.0 - UNRELEASED
     HDFS-6825. Edit log corruption due to delayed block removal.
     (Yongjun Zhang via wang)
 
+    HDFS-6569. OOB message can't be sent to the client when DataNode shuts down for upgrade
+    (brandonli)
+
 Release 2.5.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/distribute-exclude.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/distribute-exclude.sh?rev=1619018&r1=1619017&r2=1619018&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/distribute-exclude.sh (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/distribute-exclude.sh Wed Aug 20 01:22:42 2014
@@ -57,9 +57,9 @@ excludeFilenameRemote=$("$HADOOP_PREFIX/
 
 if [ "$excludeFilenameRemote" = '' ] ; then
   echo \
-    "Error: hdfs getconf -excludeFile returned empty string, " \
-    "please setup dfs.hosts.exclude in hdfs-site.xml in local cluster " \
-    "configuration and on all namenodes"
+  "Error: hdfs getconf -excludeFile returned empty string, " \
+  "please setup dfs.hosts.exclude in hdfs-site.xml in local cluster " \
+  "configuration and on all namenodes"
   exit 1
 fi
 

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs?rev=1619018&r1=1619017&r2=1619018&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs Wed Aug 20 01:22:42 2014
@@ -15,253 +15,241 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Environment Variables
-#
-#   JSVC_HOME  home directory of jsvc binary.  Required for starting secure
-#              datanode.
-#
-#   JSVC_OUTFILE  path to jsvc output file.  Defaults to
-#                 $HADOOP_LOG_DIR/jsvc.out.
-#
-#   JSVC_ERRFILE  path to jsvc error file.  Defaults to $HADOOP_LOG_DIR/jsvc.err.
-
-bin=`which $0`
-bin=`dirname ${bin}`
-bin=`cd "$bin" > /dev/null; pwd`
-
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
-
-function print_usage(){
-  echo "Usage: hdfs [--config confdir] COMMAND"
+function hadoop_usage
+{
+  echo "Usage: hdfs [--config confdir] [--daemon (start|stop|status)] COMMAND"
   echo "       where COMMAND is one of:"
-  echo "  dfs                  run a filesystem command on the file systems supported in Hadoop."
-  echo "  namenode -format     format the DFS filesystem"
-  echo "  secondarynamenode    run the DFS secondary namenode"
-  echo "  namenode             run the DFS namenode"
-  echo "  journalnode          run the DFS journalnode"
-  echo "  zkfc                 run the ZK Failover Controller daemon"
+  echo "  balancer             run a cluster balancing utility"
+  echo "  cacheadmin           configure the HDFS cache"
+  echo "  classpath            prints the class path needed to get the"
+  echo "                       Hadoop jar and the required libraries"
   echo "  datanode             run a DFS datanode"
+  echo "  dfs                  run a filesystem command on the file system"
   echo "  dfsadmin             run a DFS admin client"
-  echo "  haadmin              run a DFS HA admin client"
-  echo "  fsck                 run a DFS filesystem checking utility"
-  echo "  balancer             run a cluster balancing utility"
-  echo "  jmxget               get JMX exported values from NameNode or DataNode."
-  echo "  oiv                  apply the offline fsimage viewer to an fsimage"
-  echo "  oiv_legacy           apply the offline fsimage viewer to an legacy fsimage"
-  echo "  oev                  apply the offline edits viewer to an edits file"
   echo "  fetchdt              fetch a delegation token from the NameNode"
+  echo "  fsck                 run a DFS filesystem checking utility"
   echo "  getconf              get config values from configuration"
   echo "  groups               get the groups which users belong to"
-  echo "  snapshotDiff         diff two snapshots of a directory or diff the"
-  echo "                       current directory contents with a snapshot"
+  echo "  haadmin              run a DFS HA admin client"
+  echo "  jmxget               get JMX exported values from NameNode or DataNode."
+  echo "  journalnode          run the DFS journalnode"
   echo "  lsSnapshottableDir   list all snapshottable dirs owned by the current user"
-  echo "						Use -help to see options"
-  echo "  portmap              run a portmap service"
+  echo "                               Use -help to see options"
+  echo "  namenode             run the DFS namenode"
+  echo "                               Use -format to initialize the DFS filesystem"
   echo "  nfs3                 run an NFS version 3 gateway"
-  echo "  cacheadmin           configure the HDFS cache"
+  echo "  oev                  apply the offline edits viewer to an edits file"
+  echo "  oiv                  apply the offline fsimage viewer to an fsimage"
+  echo "  oiv_legacy           apply the offline fsimage viewer to a legacy fsimage"
+  echo "  portmap              run a portmap service"
+  echo "  secondarynamenode    run the DFS secondary namenode"
+  echo "  snapshotDiff         diff two snapshots of a directory or diff the"
+  echo "                       current directory contents with a snapshot"
+  echo "  zkfc                 run the ZK Failover Controller daemon"
   echo "  crypto               configure HDFS encryption zones"
   echo ""
   echo "Most commands print help when invoked w/o parameters."
 }
 
-if [ $# = 0 ]; then
-  print_usage
-  exit
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  this="${BASH_SOURCE-$0}"
+  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
+
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
+fi
+
+if [[ $# = 0 ]]; then
+  hadoop_exit_with_usage 1
 fi
 
 COMMAND=$1
 shift
 
-case $COMMAND in
-  # usage flags
-  --help|-help|-h)
-    print_usage
+case ${COMMAND} in
+  balancer)
+    CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_BALANCER_OPTS}"
+  ;;
+  cacheadmin)
+    CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
+  ;;
+  classpath)
+    hadoop_finalize
+    echo "${CLASSPATH}"
     exit
-    ;;
-esac
-
-# Determine if we're starting a secure datanode, and if so, redefine appropriate variables
-if [ "$COMMAND" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  if [ -n "$JSVC_HOME" ]; then
-    if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then
-      HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
-    fi
-  
-    if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then
-      HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
-      HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
-    fi
-   
-    HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
-    HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
-    starting_secure_dn="true"
-  else
-    echo "It looks like you're trying to start a secure DN, but \$JSVC_HOME"\
-      "isn't set. Falling back to starting insecure DN."
-  fi
-fi
-
-# Determine if we're starting a privileged NFS daemon, and if so, redefine appropriate variables
-if [ "$COMMAND" == "nfs3" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_PRIVILEGED_NFS_USER" ]; then
-  if [ -n "$JSVC_HOME" ]; then
-    if [ -n "$HADOOP_PRIVILEGED_NFS_PID_DIR" ]; then
-      HADOOP_PID_DIR=$HADOOP_PRIVILEGED_NFS_PID_DIR
+  ;;
+  crypto)
+    CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
+  ;;
+  datanode)
+    daemon="true"
+    # Determine if we're starting a secure datanode, and
+    # if so, redefine appropriate variables
+    if [[ -n "${HADOOP_SECURE_DN_USER}" ]]; then
+      secure_service="true"
+      secure_user="${HADOOP_SECURE_DN_USER}"
+      
+      # backward compatiblity
+      HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_DN_PID_DIR}"
+      HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_DN_LOG_DIR}"
+      
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DN_SECURE_EXTRA_OPTS} ${HADOOP_DATANODE_OPTS}"
+      CLASS="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter"
+    else
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DATANODE_OPTS}"
+      CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
     fi
-  
-    if [ -n "$HADOOP_PRIVILEGED_NFS_LOG_DIR" ]; then
-      HADOOP_LOG_DIR=$HADOOP_PRIVILEGED_NFS_LOG_DIR
-      HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
+  ;;
+  dfs)
+    CLASS=org.apache.hadoop.fs.FsShell
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+  ;;
+  dfsadmin)
+    CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+  ;;
+  fetchdt)
+    CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
+  ;;
+  fsck)
+    CLASS=org.apache.hadoop.hdfs.tools.DFSck
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+  ;;
+  getconf)
+    CLASS=org.apache.hadoop.hdfs.tools.GetConf
+  ;;
+  groups)
+    CLASS=org.apache.hadoop.hdfs.tools.GetGroups
+  ;;
+  haadmin)
+    CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
+    CLASSPATH="${CLASSPATH}:${TOOL_PATH}"
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+  ;;
+  journalnode)
+    daemon="true"
+    CLASS='org.apache.hadoop.hdfs.qjournal.server.JournalNode'
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_JOURNALNODE_OPTS}"
+  ;;
+  jmxget)
+    CLASS=org.apache.hadoop.hdfs.tools.JMXGet
+  ;;
+  lsSnapshottableDir)
+    CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
+  ;;
+  namenode)
+    daemon="true"
+    CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NAMENODE_OPTS}"
+  ;;
+  nfs3)
+    daemon="true"
+    if [[ -n "${HADOOP_PRIVILEGED_NFS_USER}" ]]; then
+      secure_service="true"
+      secure_user="${HADOOP_PRIVILEGED_NFS_USER}"
+      
+      # backward compatiblity
+      HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_NFS3_PID_DIR}"
+      HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_NFS3_LOG_DIR}"
+      
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_SECURE_EXTRA_OPTS} ${HADOOP_NFS3_OPTS}"
+      CLASS=org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter
+    else
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_OPTS}"
+      CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
     fi
-   
-    HADOOP_IDENT_STRING=$HADOOP_PRIVILEGED_NFS_USER
-    HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
-    starting_privileged_nfs="true"
-  else
-    echo "It looks like you're trying to start a privileged NFS server, but"\
-      "\$JSVC_HOME isn't set. Falling back to starting unprivileged NFS server."
-  fi
-fi
+  ;;
+  oev)
+    CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
+  ;;
+  oiv)
+    CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
+  ;;
+  oiv_legacy)
+    CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
+  ;;
+  portmap)
+    daemon="true"
+    CLASS=org.apache.hadoop.portmap.Portmap
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_PORTMAP_OPTS}"
+  ;;
+  secondarynamenode)
+    daemon="true"
+    CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_SECONDARYNAMENODE_OPTS}"
+  ;;
+  snapshotDiff)
+    CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
+  ;;
+  zkfc)
+    daemon="true"
+    CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_ZKFC_OPTS}"
+  ;;
+  -*)
+    hadoop_exit_with_usage 1
+  ;;
+  *)
+    CLASS="${COMMAND}"
+  ;;
+esac
 
-if [ "$COMMAND" = "namenode" ] ; then
-  CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS"
-elif [ "$COMMAND" = "zkfc" ] ; then
-  CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_ZKFC_OPTS"
-elif [ "$COMMAND" = "secondarynamenode" ] ; then
-  CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS"
-elif [ "$COMMAND" = "datanode" ] ; then
-  CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
-  if [ "$starting_secure_dn" = "true" ]; then
-    HADOOP_OPTS="$HADOOP_OPTS -jvm server $HADOOP_DATANODE_OPTS"
-  else
-    HADOOP_OPTS="$HADOOP_OPTS -server $HADOOP_DATANODE_OPTS"
+if [[ -n "${secure_service}" ]]; then
+  HADOOP_SECURE_USER="${secure_user}"
+  if hadoop_verify_secure_prereq; then
+    hadoop_setup_secure_service
+    priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.out"
+    priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.err"
+    priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.pid"
+    daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out"
+    daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}.pid"
   fi
-elif [ "$COMMAND" = "journalnode" ] ; then
-  CLASS='org.apache.hadoop.hdfs.qjournal.server.JournalNode'
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOURNALNODE_OPTS"
-elif [ "$COMMAND" = "dfs" ] ; then
-  CLASS=org.apache.hadoop.fs.FsShell
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
-elif [ "$COMMAND" = "dfsadmin" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
-elif [ "$COMMAND" = "haadmin" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
-  CLASSPATH=${CLASSPATH}:${TOOL_PATH}
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
-elif [ "$COMMAND" = "fsck" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.DFSck
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
-elif [ "$COMMAND" = "balancer" ] ; then
-  CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS"
-elif [ "$COMMAND" = "jmxget" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.JMXGet
-elif [ "$COMMAND" = "oiv" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
-elif [ "$COMMAND" = "oiv_legacy" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
-elif [ "$COMMAND" = "oev" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
-elif [ "$COMMAND" = "fetchdt" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
-elif [ "$COMMAND" = "getconf" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.GetConf
-elif [ "$COMMAND" = "groups" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.GetGroups
-elif [ "$COMMAND" = "snapshotDiff" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
-elif [ "$COMMAND" = "lsSnapshottableDir" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
-elif [ "$COMMAND" = "portmap" ] ; then
-  CLASS=org.apache.hadoop.portmap.Portmap
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_PORTMAP_OPTS"
-elif [ "$COMMAND" = "nfs3" ] ; then
-  CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NFS3_OPTS"
-elif [ "$COMMAND" = "cacheadmin" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
-elif [ "$COMMAND" = "crypto" ] ; then
-  CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
 else
-  CLASS="$COMMAND"
+  daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out"
+  daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}.pid"
 fi
 
-export CLASSPATH=$CLASSPATH
-
-HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
-
-# Check to see if we should start a secure datanode
-if [ "$starting_secure_dn" = "true" ]; then
-  if [ "$HADOOP_PID_DIR" = "" ]; then
-    HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid"
+if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then
+  # shellcheck disable=SC2034
+  HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}"
+  if [[ -n "${secure_service}" ]]; then
+    # shellcheck disable=SC2034
+    HADOOP_LOGFILE="hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.log"
   else
-    HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
-  fi
-
-  JSVC=$JSVC_HOME/jsvc
-  if [ ! -f $JSVC ]; then
-    echo "JSVC_HOME is not set correctly so jsvc cannot be found. jsvc is required to run secure datanodes. "
-    echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\
-      "and set JSVC_HOME to the directory containing the jsvc binary."
-    exit
+    # shellcheck disable=SC2034
+    HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.log"
   fi
+fi
 
-  if [[ ! $JSVC_OUTFILE ]]; then
-    JSVC_OUTFILE="$HADOOP_LOG_DIR/jsvc.out"
-  fi
+hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
+hadoop_finalize
 
-  if [[ ! $JSVC_ERRFILE ]]; then
-    JSVC_ERRFILE="$HADOOP_LOG_DIR/jsvc.err"
-  fi
+export CLASSPATH
 
-  exec "$JSVC" \
-           -Dproc_$COMMAND -outfile "$JSVC_OUTFILE" \
-           -errfile "$JSVC_ERRFILE" \
-           -pidfile "$HADOOP_SECURE_DN_PID" \
-           -nodetach \
-           -user "$HADOOP_SECURE_DN_USER" \
-            -cp "$CLASSPATH" \
-           $JAVA_HEAP_MAX $HADOOP_OPTS \
-           org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter "$@"
-elif [ "$starting_privileged_nfs" = "true" ] ; then
-  if [ "$HADOOP_PID_DIR" = "" ]; then
-    HADOOP_PRIVILEGED_NFS_PID="/tmp/hadoop_privileged_nfs3.pid"
+if [[ -n "${daemon}" ]]; then
+  if [[ -n "${secure_service}" ]]; then
+    hadoop_secure_daemon_handler \
+    "${HADOOP_DAEMON_MODE}" "${COMMAND}" "${CLASS}"\
+    "${daemon_pidfile}" "${daemon_outfile}" \
+    "${priv_pidfile}" "${priv_outfile}" "${priv_errfile}" "$@"
   else
-    HADOOP_PRIVILEGED_NFS_PID="$HADOOP_PID_DIR/hadoop_privileged_nfs3.pid"
-  fi
-
-  JSVC=$JSVC_HOME/jsvc
-  if [ ! -f $JSVC ]; then
-    echo "JSVC_HOME is not set correctly so jsvc cannot be found. jsvc is required to run privileged NFS gateways. "
-    echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\
-      "and set JSVC_HOME to the directory containing the jsvc binary."
-    exit
-  fi
-
-  if [[ ! $JSVC_OUTFILE ]]; then
-    JSVC_OUTFILE="$HADOOP_LOG_DIR/nfs3_jsvc.out"
+    hadoop_daemon_handler "${HADOOP_DAEMON_MODE}" "${COMMAND}" "${CLASS}"\
+    "${daemon_pidfile}" "${daemon_outfile}" "$@"
   fi
-
-  if [[ ! $JSVC_ERRFILE ]]; then
-    JSVC_ERRFILE="$HADOOP_LOG_DIR/nfs3_jsvc.err"
-  fi
-
-  exec "$JSVC" \
-           -Dproc_$COMMAND -outfile "$JSVC_OUTFILE" \
-           -errfile "$JSVC_ERRFILE" \
-           -pidfile "$HADOOP_PRIVILEGED_NFS_PID" \
-           -nodetach \
-           -user "$HADOOP_PRIVILEGED_NFS_USER" \
-           -cp "$CLASSPATH" \
-           $JAVA_HEAP_MAX $HADOOP_OPTS \
-           org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter "$@"
+  exit $?
 else
-  # run it
-  exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
+  # shellcheck disable=SC2086
+  hadoop_java_exec "${COMMAND}" "${CLASS}" "$@"
 fi
-

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh?rev=1619018&r1=1619017&r2=1619018&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh Wed Aug 20 01:22:42 2014
@@ -18,19 +18,67 @@
 # included in all the hdfs scripts with source command
 # should not be executed directly
 
-bin=`which "$0"`
-bin=`dirname "${bin}"`
-bin=`cd "$bin"; pwd`
+function hadoop_subproject_init
+{
+  if [ -e "${HADOOP_CONF_DIR}/hdfs-env.sh" ]; then
+    . "${HADOOP_CONF_DIR}/hdfs-env.sh"
+  fi
+  
+  # at some point in time, someone thought it would be a good idea to
+  # create separate vars for every subproject.  *sigh*
+  # let's perform some overrides and setup some defaults for bw compat
+  # this way the common hadoop var's == subproject vars and can be
+  # used interchangeable from here on out
+  # ...
+  # this should get deprecated at some point.
+  HADOOP_LOG_DIR="${HADOOP_HDFS_LOG_DIR:-$HADOOP_LOG_DIR}"
+  HADOOP_HDFS_LOG_DIR="${HADOOP_LOG_DIR}"
+  
+  HADOOP_LOGFILE="${HADOOP_HDFS_LOGFILE:-$HADOOP_LOGFILE}"
+  HADOOP_HDFS_LOGFILE="${HADOOP_LOGFILE}"
+  
+  HADOOP_NICENESS=${HADOOP_HDFS_NICENESS:-$HADOOP_NICENESS}
+  HADOOP_HDFS_NICENESS="${HADOOP_NICENESS}"
+  
+  HADOOP_STOP_TIMEOUT=${HADOOP_HDFS_STOP_TIMEOUT:-$HADOOP_STOP_TIMEOUT}
+  HADOOP_HDFS_STOP_TIMEOUT="${HADOOP_STOP_TIMEOUT}"
+  
+  HADOOP_PID_DIR="${HADOOP_HDFS_PID_DIR:-$HADOOP_PID_DIR}"
+  HADOOP_HDFS_PID_DIR="${HADOOP_PID_DIR}"
+  
+  HADOOP_ROOT_LOGGER=${HADOOP_HDFS_ROOT_LOGGER:-$HADOOP_ROOT_LOGGER}
+  HADOOP_HDFS_ROOT_LOGGER="${HADOOP_ROOT_LOGGER}"
+  
+  HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$HADOOP_HOME_DIR}"
+  
+  HADOOP_IDENT_STRING="${HADOOP_HDFS_IDENT_STRING:-$HADOOP_IDENT_STRING}"
+  HADOOP_HDFS_IDENT_STRING="${HADOOP_IDENT_STRING}"
+  
+  # turn on the defaults
+  
+  export HADOOP_NAMENODE_OPTS=${HADOOP_NAMENODE_OPTS:-"-Dhadoop.security.logger=INFO,RFAS -Dhdfs.audit.logger=INFO,NullAppender"}
+  export HADOOP_SECONDARYNAMENODE_OPTS=${HADOOP_SECONDARYNAMENODE_OPTS:-"-Dhadoop.security.logger=INFO,RFAS -Dhdfs.audit.logger=INFO,NullAppender"}
+  export HADOOP_DATANODE_OPTS=${HADOOP_DATANODE_OPTS:-"-Dhadoop.security.logger=ERROR,RFAS"}
+  export HADOOP_DN_SECURE_EXTRA_OPTS=${HADOOP_DN_SECURE_EXTRA_OPTS:-"-jvm server"}
+  export HADOOP_NFS3_SECURE_EXTRA_OPTS=${HADOOP_NFS3_SECURE_EXTRA_OPTS:-"-jvm server"}
+  export HADOOP_PORTMAP_OPTS=${HADOOP_PORTMAP_OPTS:-"-Xmx512m"}
+  
+  
+}
+
+if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
+  _hd_this="${BASH_SOURCE-$0}"
+  HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hd_this}")" >/dev/null && pwd -P)
+fi
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
 if [ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]; then
-  . ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
 elif [ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]; then
-  . "$HADOOP_COMMON_HOME"/libexec/hadoop-config.sh
+  . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
 elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then
-  . "$HADOOP_HOME"/libexec/hadoop-config.sh
+  . "${HADOOP_HOME}/libexec/hadoop-config.sh"
 else
-  echo "Hadoop common not found."
-  exit
+  echo "ERROR: Hadoop common not found." 2>&1
+  exit 1
 fi
+

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/refresh-namenodes.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/refresh-namenodes.sh?rev=1619018&r1=1619017&r2=1619018&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/refresh-namenodes.sh (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/refresh-namenodes.sh Wed Aug 20 01:22:42 2014
@@ -20,24 +20,40 @@
 # This script refreshes all namenodes, it's a simple wrapper
 # for dfsadmin to support multiple namenodes.
 
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  this="${BASH_SOURCE-$0}"
+  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
+fi
 
-namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -nnRpcAddresses)
-if [ "$?" != '0' ] ; then errorFlag='1' ; 
+namenodes=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -nnRpcAddresses)
+if [[ "$?" != '0' ]] ; then
+  errorFlag='1' ;
 else
-  for namenode in $namenodes ; do
-    echo "Refreshing namenode [$namenode]"
-    "$HADOOP_PREFIX/bin/hdfs" dfsadmin -fs hdfs://$namenode -refreshNodes
-    if [ "$?" != '0' ] ; then errorFlag='1' ; fi
+  for namenode in ${namenodes} ; do
+    echo "Refreshing namenode [${namenode}]"
+    "${HADOOP_HDFS_HOME}/bin/hdfs" dfsadmin \
+    -fs hdfs://${namenode} -refreshNodes
+    if [[ "$?" != '0' ]]; then
+      errorFlag='1'
+    fi
   done
 fi
 
-if [ "$errorFlag" = '1' ] ; then
+if [[ "${errorFlag}" = '1' ]] ; then
   echo "Error: refresh of namenodes failed, see error messages above."
   exit 1
 else

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh?rev=1619018&r1=1619017&r2=1619018&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh Wed Aug 20 01:22:42 2014
@@ -15,13 +15,31 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
+function usage
+{
+  echo "Usage: start-balancer.sh [--config confdir]  [-policy <policy>] [-threshold <threshold>]"
+}
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
+
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
+fi
 
 # Start balancer daemon.
 
-"$HADOOP_PREFIX"/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start balancer $@
+exec "${bin}/hadoop-daemon.sh" --config "${HADOOP_CONF_DIR}" start balancer "$@"

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh?rev=1619018&r1=1619017&r2=1619018&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh Wed Aug 20 01:22:42 2014
@@ -20,98 +20,128 @@
 # Optinally upgrade or rollback dfs state.
 # Run this on master node.
 
-usage="Usage: start-dfs.sh [-upgrade|-rollback] [other options such as -clusterId]"
+function hadoop_usage
+{
+  echo "Usage: start-dfs.sh [-upgrade|-rollback] [-clusterId]"
+}
+
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
+fi
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
 
 # get arguments
-if [ $# -ge 1 ]; then
-	nameStartOpt="$1"
-	shift
-	case "$nameStartOpt" in
-	  (-upgrade)
-	  	;;
-	  (-rollback) 
-	  	dataStartOpt="$nameStartOpt"
-	  	;;
-	  (*)
-		  echo $usage
-		  exit 1
-	    ;;
-	esac
+if [[ $# -ge 1 ]]; then
+  nameStartOpt="$1"
+  shift
+  case "$nameStartOpt" in
+    -upgrade)
+    ;;
+    -rollback)
+      dataStartOpt="$nameStartOpt"
+    ;;
+    *)
+      hadoop_exit_with_usage 1
+    ;;
+  esac
 fi
 
+
 #Add other possible options
 nameStartOpt="$nameStartOpt $@"
 
 #---------------------------------------------------------
 # namenodes
 
-NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes)
+NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes 2>/dev/null)
+
+if [[ -z "${NAMENODES}" ]]; then
+  NAMENODES=$(hostname)
+fi
 
 echo "Starting namenodes on [$NAMENODES]"
 
-"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-  --config "$HADOOP_CONF_DIR" \
-  --hostnames "$NAMENODES" \
-  --script "$bin/hdfs" start namenode $nameStartOpt
+"${bin}/hadoop-daemons.sh" \
+--config "${HADOOP_CONF_DIR}" \
+--hostnames "${NAMENODES}" \
+start namenode ${nameStartOpt}
 
 #---------------------------------------------------------
 # datanodes (using default slaves file)
 
-if [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  echo \
-    "Attempting to start secure cluster, skipping datanodes. " \
-    "Run start-secure-dns.sh as root to complete startup."
+if [[ -n "${HADOOP_SECURE_DN_USER}" ]] &&
+[[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
+  echo "ERROR: Attempting to start secure cluster, skipping datanodes. "
+  echo "Run start-secure-dns.sh as root or configure "
+  echo "\${HADOOP_SECURE_COMMAND} to complete startup."
 else
-  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-    --config "$HADOOP_CONF_DIR" \
-    --script "$bin/hdfs" start datanode $dataStartOpt
+  
+  echo "Starting datanodes"
+  
+  "${bin}/hadoop-daemons.sh" \
+  --config "${HADOOP_CONF_DIR}" \
+  start datanode ${dataStartOpt}
 fi
 
 #---------------------------------------------------------
 # secondary namenodes (if any)
 
-SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>/dev/null)
+SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -secondarynamenodes 2>/dev/null)
 
-if [ -n "$SECONDARY_NAMENODES" ]; then
-  echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
+if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
+  SECONDARY_NAMENODES=$(hostname)
+fi
 
-  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-      --config "$HADOOP_CONF_DIR" \
-      --hostnames "$SECONDARY_NAMENODES" \
-      --script "$bin/hdfs" start secondarynamenode
+if [[ -n "${SECONDARY_NAMENODES}" ]]; then
+  echo "Starting secondary namenodes [${SECONDARY_NAMENODES}]"
+  
+  "${bin}/hadoop-daemons.sh" \
+  --config "${HADOOP_CONF_DIR}" \
+  --hostnames "${SECONDARY_NAMENODES}" \
+  start secondarynamenode
 fi
 
 #---------------------------------------------------------
 # quorumjournal nodes (if any)
 
-SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
+SHARED_EDITS_DIR=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
 
-case "$SHARED_EDITS_DIR" in
-qjournal://*)
-  JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
-  echo "Starting journal nodes [$JOURNAL_NODES]"
-  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-      --config "$HADOOP_CONF_DIR" \
-      --hostnames "$JOURNAL_NODES" \
-      --script "$bin/hdfs" start journalnode ;;
+case "${SHARED_EDITS_DIR}" in
+  qjournal://*)
+    JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
+    echo "Starting journal nodes [${JOURNAL_NODES}]"
+    "${bin}/hadoop-daemons.sh" \
+    --config "${HADOOP_CONF_DIR}" \
+    --hostnames "${JOURNAL_NODES}" \
+    start journalnode
+  ;;
 esac
 
 #---------------------------------------------------------
 # ZK Failover controllers, if auto-HA is enabled
-AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled)
-if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then
-  echo "Starting ZK Failover Controllers on NN hosts [$NAMENODES]"
-  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-    --config "$HADOOP_CONF_DIR" \
-    --hostnames "$NAMENODES" \
-    --script "$bin/hdfs" start zkfc
+AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
+if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
+  echo "Starting ZK Failover Controllers on NN hosts [${NAMENODES}]"
+  "${bin}/hadoop-daemons.sh" \
+  --config "${HADOOP_CONF_DIR}" \
+  --hostnames "${NAMENODES}" \
+  start zkfc
 fi
 
 # eof

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh?rev=1619018&r1=1619017&r2=1619018&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh Wed Aug 20 01:22:42 2014
@@ -17,17 +17,33 @@
 
 # Run as root to start secure datanodes in a security-enabled cluster.
 
-usage="Usage (run as root in order to start secure datanodes): start-secure-dns.sh"
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
+function hadoop_usage {
+  echo "Usage: start-secure-dns.sh"
+}
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
 
-if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  "$HADOOP_PREFIX"/sbin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start datanode $dataStartOpt
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
 else
-  echo $usage
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
+
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
+fi
+
+if [[ "${EUID}" -eq 0 ]] && [[ -n "${HADOOP_SECURE_DN_USER}" ]]; then
+  exec "${bin}/hadoop-daemons.sh" --config "${HADOOP_CONF_DIR}" start datanode "${dataStartOpt}"
+else
+  echo hadoop_usage_and_exit 1
 fi

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh?rev=1619018&r1=1619017&r2=1619018&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh Wed Aug 20 01:22:42 2014
@@ -15,14 +15,32 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
+function hadoop_usage
+{
+  echo "Usage: stop-balancer.sh [--config confdir]"
+}
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
+
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
+fi
 
 # Stop balancer daemon.
 # Run this on the machine where the balancer is running
 
-"$HADOOP_PREFIX"/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop balancer
+"${bin}/hadoop-daemon.sh" --config "${HADOOP_CONF_DIR}" stop balancer

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh?rev=1619018&r1=1619017&r2=1619018&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh Wed Aug 20 01:22:42 2014
@@ -15,75 +15,100 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
+function hadoop_usage
+{
+  echo "Usage: start-balancer.sh [--config confdir]  [-policy <policy>] [-threshold <threshold>]"
+}
+
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
+fi
 
 #---------------------------------------------------------
 # namenodes
 
-NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes)
+NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes)
 
 echo "Stopping namenodes on [$NAMENODES]"
 
-"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-  --config "$HADOOP_CONF_DIR" \
-  --hostnames "$NAMENODES" \
-  --script "$bin/hdfs" stop namenode
+"${bin}/hadoop-daemons.sh" \
+--config "${HADOOP_CONF_DIR}" \
+--hostnames "${NAMENODES}" \
+stop namenode
 
 #---------------------------------------------------------
 # datanodes (using default slaves file)
 
-if [ -n "$HADOOP_SECURE_DN_USER" ]; then
+if [[ -n "${HADOOP_SECURE_DN_USER}" ]] &&
+[[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
   echo \
-    "Attempting to stop secure cluster, skipping datanodes. " \
-    "Run stop-secure-dns.sh as root to complete shutdown."
+  "ERROR: Attempting to stop secure cluster, skipping datanodes. " \
+  "Run stop-secure-dns.sh as root to complete shutdown."
 else
-  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-    --config "$HADOOP_CONF_DIR" \
-    --script "$bin/hdfs" stop datanode
+  
+  echo "Stopping datanodes"
+  
+  "${bin}/hadoop-daemons.sh" --config "${HADOOP_CONF_DIR}" stop datanode
 fi
 
 #---------------------------------------------------------
 # secondary namenodes (if any)
 
-SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>/dev/null)
+SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -secondarynamenodes 2>/dev/null)
 
-if [ -n "$SECONDARY_NAMENODES" ]; then
-  echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
+if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
+  SECONDARY_NAMENODES=$(hostname)
+fi
 
-  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-      --config "$HADOOP_CONF_DIR" \
-      --hostnames "$SECONDARY_NAMENODES" \
-      --script "$bin/hdfs" stop secondarynamenode
+if [[ -n "${SECONDARY_NAMENODES}" ]]; then
+  echo "Stopping secondary namenodes [${SECONDARY_NAMENODES}]"
+  
+  "${bin}/hadoop-daemons.sh" \
+  --config "${HADOOP_CONF_DIR}" \
+  --hostnames "${SECONDARY_NAMENODES}" \
+  stop secondarynamenode
 fi
 
 #---------------------------------------------------------
 # quorumjournal nodes (if any)
 
-SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
+SHARED_EDITS_DIR=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
 
-case "$SHARED_EDITS_DIR" in
-qjournal://*)
-  JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
-  echo "Stopping journal nodes [$JOURNAL_NODES]"
-  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-      --config "$HADOOP_CONF_DIR" \
-      --hostnames "$JOURNAL_NODES" \
-      --script "$bin/hdfs" stop journalnode ;;
+case "${SHARED_EDITS_DIR}" in
+  qjournal://*)
+    JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
+    echo "Stopping journal nodes [${JOURNAL_NODES}]"
+    "${bin}/hadoop-daemons.sh" \
+    --config "${HADOOP_CONF_DIR}" \
+    --hostnames "${JOURNAL_NODES}" \
+    stop journalnode
+  ;;
 esac
 
 #---------------------------------------------------------
 # ZK Failover controllers, if auto-HA is enabled
-AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled)
-if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then
-  echo "Stopping ZK Failover Controllers on NN hosts [$NAMENODES]"
-  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-    --config "$HADOOP_CONF_DIR" \
-    --hostnames "$NAMENODES" \
-    --script "$bin/hdfs" stop zkfc
+AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
+if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
+  echo "Stopping ZK Failover Controllers on NN hosts [${NAMENODES}]"
+  "${bin}/hadoop-daemons.sh" \
+  --config "${HADOOP_CONF_DIR}" \
+  --hostnames "${NAMENODES}" \
+  stop zkfc
 fi
 # eof

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh?rev=1619018&r1=1619017&r2=1619018&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh Wed Aug 20 01:22:42 2014
@@ -17,17 +17,33 @@
 
 # Run as root to start secure datanodes in a security-enabled cluster.
 
-usage="Usage (run as root in order to stop secure datanodes): stop-secure-dns.sh"
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
+function hadoop_usage {
+  echo "Usage (run as root in order to stop secure datanodes): stop-secure-dns.sh"
+}
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
 
-if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  "$HADOOP_PREFIX"/sbin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop datanode
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
 else
-  echo $usage
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
+
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
+fi
+
+if [[ "${EUID}" -eq 0 ]] && [[ -n "${HADOOP_SECURE_DN_USER}" ]]; then
+  "${bin}/hadoop-daemons.sh" --config "${HADOOP_CONF_DIR}" stop datanode
+else
+  hadoop_exit_with_usage 1
 fi

Propchange: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1618694-1618997

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1619018&r1=1619017&r2=1619018&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Wed Aug 20 01:22:42 2014
@@ -738,7 +738,12 @@ class BlockReceiver implements Closeable
       LOG.warn("Error managing cache for writer of block " + block, t);
     }
   }
-
+  
+  public void sendOOB() throws IOException, InterruptedException {
+    ((PacketResponder) responder.getRunnable()).sendOOBResponse(PipelineAck
+        .getRestartOOBStatus());
+  }
+  
   void receiveBlock(
       DataOutputStream mirrOut, // output to next datanode
       DataInputStream mirrIn,   // input from next datanode
@@ -830,9 +835,7 @@ class BlockReceiver implements Closeable
               // The worst case is not recovering this RBW replica. 
               // Client will fall back to regular pipeline recovery.
             }
-            try {
-              ((PacketResponder) responder.getRunnable()).
-                  sendOOBResponse(PipelineAck.getRestartOOBStatus());
+            try {              
               // Even if the connection is closed after the ack packet is
               // flushed, the client can react to the connection closure 
               // first. Insert a delay to lower the chance of client 
@@ -840,8 +843,6 @@ class BlockReceiver implements Closeable
               Thread.sleep(1000);
             } catch (InterruptedException ie) {
               // It is already going down. Ignore this.
-            } catch (IOException ioe) {
-              LOG.info("Error sending OOB Ack.", ioe);
             }
           }
           responder.interrupt();

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1619018&r1=1619017&r2=1619018&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Wed Aug 20 01:22:42 2014
@@ -270,6 +270,7 @@ public class DataNode extends Configured
   public final static String EMPTY_DEL_HINT = "";
   final AtomicInteger xmitsInProgress = new AtomicInteger();
   Daemon dataXceiverServer = null;
+  DataXceiverServer xserver = null;
   Daemon localDataXceiverServer = null;
   ShortCircuitRegistry shortCircuitRegistry = null;
   ThreadGroup threadGroup = null;
@@ -649,8 +650,8 @@ public class DataNode extends Configured
     streamingAddr = tcpPeerServer.getStreamingAddr();
     LOG.info("Opened streaming server at " + streamingAddr);
     this.threadGroup = new ThreadGroup("dataXceiverServer");
-    this.dataXceiverServer = new Daemon(threadGroup, 
-        new DataXceiverServer(tcpPeerServer, conf, this));
+    xserver = new DataXceiverServer(tcpPeerServer, conf, this);
+    this.dataXceiverServer = new Daemon(threadGroup, xserver);
     this.threadGroup.setDaemon(true); // auto destroy when empty
 
     if (conf.getBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,
@@ -1138,6 +1139,11 @@ public class DataNode extends Configured
   }
   
   @VisibleForTesting
+  public DataXceiverServer getXferServer() {
+    return xserver;  
+  }
+  
+  @VisibleForTesting
   public int getXferPort() {
     return streamingAddr.getPort();
   }
@@ -1395,6 +1401,7 @@ public class DataNode extends Configured
     // in order to avoid any further acceptance of requests, but the peers
     // for block writes are not closed until the clients are notified.
     if (dataXceiverServer != null) {
+      xserver.sendOOBToPeers();
       ((DataXceiverServer) this.dataXceiverServer.getRunnable()).kill();
       this.dataXceiverServer.interrupt();
     }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1619018&r1=1619017&r2=1619018&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Wed Aug 20 01:22:42 2014
@@ -103,7 +103,8 @@ class DataXceiver extends Receiver imple
   private long opStartTime; //the start time of receiving an Op
   private final InputStream socketIn;
   private OutputStream socketOut;
-
+  private BlockReceiver blockReceiver = null;
+  
   /**
    * Client Name used in previous operation. Not available on first request
    * on the socket.
@@ -159,6 +160,12 @@ class DataXceiver extends Receiver imple
     return socketOut;
   }
 
+  public void sendOOB() throws IOException, InterruptedException {
+    LOG.info("Sending OOB to peer: " + peer);
+    if(blockReceiver!=null)
+      blockReceiver.sendOOB();
+  }
+  
   /**
    * Read/write data from/to the DataXceiverServer.
    */
@@ -168,7 +175,7 @@ class DataXceiver extends Receiver imple
     Op op = null;
 
     try {
-      dataXceiverServer.addPeer(peer, Thread.currentThread());
+      dataXceiverServer.addPeer(peer, Thread.currentThread(), this);
       peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
       InputStream input = socketIn;
       IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut,
@@ -584,7 +591,6 @@ class DataXceiver extends Receiver imple
     DataOutputStream mirrorOut = null;  // stream to next target
     DataInputStream mirrorIn = null;    // reply from next target
     Socket mirrorSock = null;           // socket to next target
-    BlockReceiver blockReceiver = null; // responsible for data handling
     String mirrorNode = null;           // the name:port of next target
     String firstBadLink = "";           // first datanode that failed in connection setup
     Status mirrorInStatus = SUCCESS;
@@ -747,6 +753,7 @@ class DataXceiver extends Receiver imple
       IOUtils.closeStream(replyOut);
       IOUtils.closeSocket(mirrorSock);
       IOUtils.closeStream(blockReceiver);
+      blockReceiver = null;
     }
 
     //update metrics

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java?rev=1619018&r1=1619017&r2=1619018&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java Wed Aug 20 01:22:42 2014
@@ -27,11 +27,11 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.net.PeerServer;
-import org.apache.hadoop.hdfs.server.balancer.Balancer;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Daemon;
 
+import com.google.common.annotations.VisibleForTesting;
 
 /**
  * Server used for receiving/sending a block of data.
@@ -45,6 +45,7 @@ class DataXceiverServer implements Runna
   private final PeerServer peerServer;
   private final DataNode datanode;
   private final HashMap<Peer, Thread> peers = new HashMap<Peer, Thread>();
+  private final HashMap<Peer, DataXceiver> peersXceiver = new HashMap<Peer, DataXceiver>();
   private boolean closed = false;
   
   /**
@@ -217,18 +218,38 @@ class DataXceiverServer implements Runna
     }
   }
   
-  synchronized void addPeer(Peer peer, Thread t) throws IOException {
+  synchronized void addPeer(Peer peer, Thread t, DataXceiver xceiver)
+      throws IOException {
     if (closed) {
       throw new IOException("Server closed.");
     }
     peers.put(peer, t);
+    peersXceiver.put(peer, xceiver);
   }
 
   synchronized void closePeer(Peer peer) {
     peers.remove(peer);
+    peersXceiver.remove(peer);
     IOUtils.cleanup(null, peer);
   }
 
+  // Sending OOB to all peers
+  public synchronized void sendOOBToPeers() {
+    if (!datanode.shutdownForUpgrade) {
+      return;
+    }
+
+    for (Peer p : peers.keySet()) {
+      try {
+        peersXceiver.get(p).sendOOB();
+      } catch (IOException e) {
+        LOG.warn("Got error when sending OOB message.", e);
+      } catch (InterruptedException e) {
+        LOG.warn("Interrupted when sending OOB message.");
+      }
+    }
+  }
+  
   // Notify all peers of the shutdown and restart.
   // datanode.shouldRun should still be true and datanode.restarting should
   // be set true before calling this method.
@@ -247,6 +268,7 @@ class DataXceiverServer implements Runna
       IOUtils.cleanup(LOG, p);
     }
     peers.clear();
+    peersXceiver.clear();
   }
 
   // Return the number of peers.
@@ -254,7 +276,14 @@ class DataXceiverServer implements Runna
     return peers.size();
   }
 
+  // Return the number of peers and DataXceivers.
+  @VisibleForTesting
+  synchronized int getNumPeersXceiver() {
+    return peersXceiver.size();
+  }
+  
   synchronized void releasePeer(Peer peer) {
     peers.remove(peer);
+    peersXceiver.remove(peer);
   }
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java?rev=1619018&r1=1619017&r2=1619018&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java Wed Aug 20 01:22:42 2014
@@ -27,11 +27,14 @@ import static org.junit.Assert.assertTru
 
 import java.io.File;
 import java.io.IOException;
+import java.util.Random;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -67,6 +70,7 @@ public class TestDataNodeRollingUpgrade 
 
   private void startCluster() throws IOException {
     conf = new HdfsConfiguration();
+    conf.setInt("dfs.blocksize", 1024*1024);
     cluster = new Builder(conf).numDataNodes(REPL_FACTOR).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
@@ -243,4 +247,48 @@ public class TestDataNodeRollingUpgrade 
       shutdownCluster();
     }
   }
+  
+  @Test (timeout=600000)
+  // Test DatanodeXceiver has correct peer-dataxceiver pairs for sending OOB message
+  public void testDatanodePeersXceiver() throws Exception {
+    try {
+      startCluster();
+
+      // Create files in DFS.
+      String testFile1 = "/TestDataNodeXceiver1.dat";
+      String testFile2 = "/TestDataNodeXceiver2.dat";
+      String testFile3 = "/TestDataNodeXceiver3.dat";
+
+      DFSClient client1 = new DFSClient(NameNode.getAddress(conf), conf);
+      DFSClient client2 = new DFSClient(NameNode.getAddress(conf), conf);
+      DFSClient client3 = new DFSClient(NameNode.getAddress(conf), conf);
+
+      DFSOutputStream s1 = (DFSOutputStream) client1.create(testFile1, true);
+      DFSOutputStream s2 = (DFSOutputStream) client2.create(testFile2, true);
+      DFSOutputStream s3 = (DFSOutputStream) client3.create(testFile3, true);
+
+      byte[] toWrite = new byte[1024*1024*8];
+      Random rb = new Random(1111);
+      rb.nextBytes(toWrite);
+      s1.write(toWrite, 0, 1024*1024*8);
+      s1.flush();
+      s2.write(toWrite, 0, 1024*1024*8);
+      s2.flush();
+      s3.write(toWrite, 0, 1024*1024*8);
+      s3.flush();       
+
+      assertTrue(dn.getXferServer().getNumPeersXceiver() == dn.getXferServer()
+          .getNumPeersXceiver());
+      s1.close();
+      s2.close();
+      s3.close();
+      assertTrue(dn.getXferServer().getNumPeersXceiver() == dn.getXferServer()
+          .getNumPeersXceiver());
+      client1.close();
+      client2.close();
+      client3.close();      
+    } finally {
+      shutdownCluster();
+    }
+  }
 }



Mime
View raw message