hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1128459 [1/3] - in /hadoop/hdfs/branches/HDFS-1073: ./ bin/ ivy/ src/c++/libhdfs/ src/c++/libhdfs/tests/ src/contrib/ src/contrib/fuse-dfs/ src/contrib/fuse-dfs/src/ src/contrib/hdfsproxy/ src/docs/src/documentation/content/xdocs/ src/java...
Date Fri, 27 May 2011 21:12:05 GMT
Author: todd
Date: Fri May 27 21:12:02 2011
New Revision: 1128459

URL: http://svn.apache.org/viewvc?rev=1128459&view=rev
Log:
Merge trunk into HDFS-1073

Added:
    hadoop/hdfs/branches/HDFS-1073/src/packages/
      - copied from r1128452, hadoop/hdfs/trunk/src/packages/
    hadoop/hdfs/branches/HDFS-1073/src/packages/deb/
      - copied from r1128452, hadoop/hdfs/trunk/src/packages/deb/
    hadoop/hdfs/branches/HDFS-1073/src/packages/deb/hadoop.control/
      - copied from r1128452, hadoop/hdfs/trunk/src/packages/deb/hadoop.control/
    hadoop/hdfs/branches/HDFS-1073/src/packages/deb/hadoop.control/conffile
      - copied unchanged from r1128452, hadoop/hdfs/trunk/src/packages/deb/hadoop.control/conffile
    hadoop/hdfs/branches/HDFS-1073/src/packages/deb/hadoop.control/control
      - copied unchanged from r1128452, hadoop/hdfs/trunk/src/packages/deb/hadoop.control/control
    hadoop/hdfs/branches/HDFS-1073/src/packages/deb/hadoop.control/postinst
      - copied unchanged from r1128452, hadoop/hdfs/trunk/src/packages/deb/hadoop.control/postinst
    hadoop/hdfs/branches/HDFS-1073/src/packages/deb/hadoop.control/postrm
      - copied unchanged from r1128452, hadoop/hdfs/trunk/src/packages/deb/hadoop.control/postrm
    hadoop/hdfs/branches/HDFS-1073/src/packages/deb/hadoop.control/preinst
      - copied unchanged from r1128452, hadoop/hdfs/trunk/src/packages/deb/hadoop.control/preinst
    hadoop/hdfs/branches/HDFS-1073/src/packages/deb/hadoop.control/prerm
      - copied unchanged from r1128452, hadoop/hdfs/trunk/src/packages/deb/hadoop.control/prerm
    hadoop/hdfs/branches/HDFS-1073/src/packages/deb/init.d/
      - copied from r1128452, hadoop/hdfs/trunk/src/packages/deb/init.d/
    hadoop/hdfs/branches/HDFS-1073/src/packages/deb/init.d/hadoop-datanode
      - copied unchanged from r1128452, hadoop/hdfs/trunk/src/packages/deb/init.d/hadoop-datanode
    hadoop/hdfs/branches/HDFS-1073/src/packages/deb/init.d/hadoop-namenode
      - copied unchanged from r1128452, hadoop/hdfs/trunk/src/packages/deb/init.d/hadoop-namenode
    hadoop/hdfs/branches/HDFS-1073/src/packages/rpm/
      - copied from r1128452, hadoop/hdfs/trunk/src/packages/rpm/
    hadoop/hdfs/branches/HDFS-1073/src/packages/rpm/init.d/
      - copied from r1128452, hadoop/hdfs/trunk/src/packages/rpm/init.d/
    hadoop/hdfs/branches/HDFS-1073/src/packages/rpm/init.d/hadoop-datanode
      - copied unchanged from r1128452, hadoop/hdfs/trunk/src/packages/rpm/init.d/hadoop-datanode
    hadoop/hdfs/branches/HDFS-1073/src/packages/rpm/init.d/hadoop-namenode
      - copied unchanged from r1128452, hadoop/hdfs/trunk/src/packages/rpm/init.d/hadoop-namenode
    hadoop/hdfs/branches/HDFS-1073/src/packages/rpm/spec/
      - copied from r1128452, hadoop/hdfs/trunk/src/packages/rpm/spec/
    hadoop/hdfs/branches/HDFS-1073/src/packages/rpm/spec/hadoop-hdfs.spec
      - copied unchanged from r1128452, hadoop/hdfs/trunk/src/packages/rpm/spec/hadoop-hdfs.spec
    hadoop/hdfs/branches/HDFS-1073/src/packages/templates/
      - copied from r1128452, hadoop/hdfs/trunk/src/packages/templates/
    hadoop/hdfs/branches/HDFS-1073/src/packages/templates/conf/
      - copied from r1128452, hadoop/hdfs/trunk/src/packages/templates/conf/
    hadoop/hdfs/branches/HDFS-1073/src/packages/templates/conf/hdfs-site.xml
      - copied unchanged from r1128452, hadoop/hdfs/trunk/src/packages/templates/conf/hdfs-site.xml
    hadoop/hdfs/branches/HDFS-1073/src/packages/update-hdfs-env.sh
      - copied unchanged from r1128452, hadoop/hdfs/trunk/src/packages/update-hdfs-env.sh
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
      - copied, changed from r1128452, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
Modified:
    hadoop/hdfs/branches/HDFS-1073/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/CHANGES.txt
    hadoop/hdfs/branches/HDFS-1073/bin/distribute-exclude.sh
    hadoop/hdfs/branches/HDFS-1073/bin/hdfs
    hadoop/hdfs/branches/HDFS-1073/bin/hdfs-config.sh
    hadoop/hdfs/branches/HDFS-1073/bin/refresh-namenodes.sh
    hadoop/hdfs/branches/HDFS-1073/bin/start-balancer.sh
    hadoop/hdfs/branches/HDFS-1073/bin/start-dfs.sh
    hadoop/hdfs/branches/HDFS-1073/bin/start-secure-dns.sh
    hadoop/hdfs/branches/HDFS-1073/bin/stop-balancer.sh
    hadoop/hdfs/branches/HDFS-1073/bin/stop-dfs.sh
    hadoop/hdfs/branches/HDFS-1073/bin/stop-secure-dns.sh
    hadoop/hdfs/branches/HDFS-1073/build.xml   (contents, props changed)
    hadoop/hdfs/branches/HDFS-1073/ivy.xml
    hadoop/hdfs/branches/HDFS-1073/ivy/libraries.properties
    hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/hdfsJniHelper.c
    hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/tests/test-libhdfs.sh
    hadoop/hdfs/branches/HDFS-1073/src/contrib/build-contrib.xml
    hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/README
    hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/build.xml
    hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/src/Makefile.am
    hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh
    hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/README
    hadoop/hdfs/branches/HDFS-1073/src/docs/src/documentation/content/xdocs/hdfsproxy.xml
    hadoop/hdfs/branches/HDFS-1073/src/docs/src/documentation/content/xdocs/libhdfs.xml
    hadoop/hdfs/branches/HDFS-1073/src/java/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
    hadoop/hdfs/branches/HDFS-1073/src/test/system/conf/system-test-hdfs.xml
    hadoop/hdfs/branches/HDFS-1073/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java
    hadoop/hdfs/branches/HDFS-1073/src/webapps/datanode/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/webapps/hdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/webapps/secondary/   (props changed)

Propchange: hadoop/hdfs/branches/HDFS-1073/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 27 21:12:02 2011
@@ -2,4 +2,4 @@
 /hadoop/hdfs/branches/HDFS-1052:987665-1095512
 /hadoop/hdfs/branches/HDFS-265:796829-820463
 /hadoop/hdfs/branches/branch-0.21:820487
-/hadoop/hdfs/trunk:1086482-1126286
+/hadoop/hdfs/trunk:1086482-1128452

Modified: hadoop/hdfs/branches/HDFS-1073/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/CHANGES.txt?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-1073/CHANGES.txt Fri May 27 21:12:02 2011
@@ -101,8 +101,8 @@ Trunk (unreleased changes)
     HDFS-1663. Federation: Rename getPoolId() everywhere to 
     getBlockPoolId() (tanping via boryas)
 
-    HDFS-1652. FederationL Add support for multiple namenodes in MiniDFSCluster.
-    (suresh)
+    HDFS-1652. FederationL Add support for multiple namenodes in 
+    MiniDFSCluster. (suresh)
 
     HDFS-1672. Federation: refactor stopDatanode(name) to work 
     with multiple Block Pools (boryas)
@@ -243,8 +243,8 @@ Trunk (unreleased changes)
 
     HDFS-1754. Federation: testFsck fails. (boryas)
 
-    HDFS-1755. Federation: The BPOfferService must always connect to namenode as 
-    the login user. (jitendra)
+    HDFS-1755. Federation: The BPOfferService must always connect to namenode
+    as the login user. (jitendra)
 
     HDFS-1675. Support transferring RBW between datanodes. (szetszwo)
 
@@ -281,6 +281,9 @@ Trunk (unreleased changes)
     HDFS-1914. Federation: namenode storage directories must be configurable
     specific to name service. (suresh)
 
+    HDFS-1963. Create RPM and Debian packages for HDFS. Changes deployment
+    layout to be consistent across the binary tgz, rpm, and deb.
+    (Eric Yang via omalley)
 
   IMPROVEMENTS
 
@@ -442,14 +445,17 @@ Trunk (unreleased changes)
     HDFS-1905. Improve namenode -format command by not making -clusterId
     parameter mandatory. (Bharath Mundlapudi via suresh)
 
-    HDFS-1941. Remove -genclusterid option from namenode command.
-    (Bharath Mundlapudi via suresh)
-
     HDFS-1877.  Add a new test for concurrent read and write.  (CW Chung
     via szetszwo)
 
     HDFS-1959. Better error message for missing namenode directory. (eli)
 
+    HDFS-1996.  ivy: hdfs test jar should be independent to common test jar.
+    (Eric Yang via szetszwo)
+
+    HDFS-1812. TestHDFSCLI should clean up cluster in teardown method.
+    (Uma Maheswara Rao G via todd)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
@@ -618,6 +624,17 @@ Trunk (unreleased changes)
     HDFS-1371. One bad node can incorrectly flag many files as corrupt.
     (Tanping Wang via jitendra)
 
+    HDFS-1943. Fail to start datanode while start-dfs.sh is executed by 
+    root user. (Wei Yongjun via jghoman)
+
+    HDFS-1983. Fix path display for copy and rm commands in TestHDFSCLI and
+    TestDFSShell. (Daryn Sharp via todd)
+
+    HDFS-1999. Tests use deprecated configs. (Aaron T. Myers via eli)
+
+    HDFS-1592. Datanode startup doesn't honor volumes.tolerated. 
+    (Bharath Mundlapudi via jitendra)
+
 Release 0.22.0 - Unreleased
 
   INCOMPATIBLE CHANGES
@@ -1053,6 +1070,17 @@ Release 0.22.0 - Unreleased
     HDFS-1965. IPCs done using block token-based tickets can't reuse
     connections (todd)
 
+    HDFS-1978. All but first option in LIBHDFS_OPTS is ignored. (eli)
+
+    HDFS-1964. Fix incorrect HTML unescaping in DatanodeJspHelper
+    (Aaron T. Myers via todd)
+
+    HDFS-1997. Image transfer process misreports client side exceptions.
+    (todd via eli)
+
+    HDFS-2000. Missing deprecation for io.bytes.per.checksum.
+    (Aaron T. Myers vie eli)
+
 Release 0.21.1 - Unreleased
     HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
 
@@ -2137,15 +2165,12 @@ Release 0.21.0 - 2010-08-13
     HDFS-1750. ListPathsServlet should not use HdfsFileStatus.getLocalName()
     to get file name since it may return an empty string.  (szetszwo)
 
-Release 0.20.3 - 2011-1-5
+Release 0.20.3 - Unreleased
 
   IMPROVEMENTS
 
   BUG FIXES
 
-    HDFS-955. New implementation of saveNamespace() to avoid loss of edits 
-    when name-node fails during saving. (shv)
-
     HDFS-1041. DFSClient.getFileChecksum(..) should retry if connection to
     the first datanode fails.  (szetszwo)
 
@@ -2155,10 +2180,22 @@ Release 0.20.3 - 2011-1-5
     HDFS-1258. Clearing namespace quota on "/" corrupts fs image.
     (Aaron T. Myers via szetszwo)
 
+    HDFS-1406. TestCLI fails on Ubuntu with default /etc/hosts. (cos)
+
+Release 0.20.203.0 - 2011-5-11
+
+  IMPROVEMENTS
+
+    HADOOP-7259. Contrib modules should include the build.properties from
+    the enclosing hadoop directory. (omalley)
+
+  BUG FIXES
+
     HDFS-132. Fix namenode to not report files deleted metrics for deletions
     done while replaying edits during startup. (suresh & shv)
 
-    HDFS-1406. TestCLI fails on Ubuntu with default /etc/hosts. (cos)
+    HDFS-955. New implementation of saveNamespace() to avoid loss of edits 
+    when name-node fails during saving. (shv)
 
 Release 0.20.2 - 2009-09-01
 

Modified: hadoop/hdfs/branches/HDFS-1073/bin/distribute-exclude.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/distribute-exclude.sh?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/distribute-exclude.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/distribute-exclude.sh Fri May 27 21:12:02 2011
@@ -36,7 +36,7 @@
 bin=`dirname "$0"`
 bin=`cd "$bin"; pwd`
 
-. "$bin/hdfs-config.sh"
+. "$bin/../libexec/hdfs-config.sh"
 
 if [ "$1" = '' ] ; then
   "Error: please specify local exclude file as a first argument"
@@ -50,8 +50,8 @@ if [ ! -f "$excludeFilenameLocal" ] ; th
   exit 1
 fi
 
-namenodes=$("$HADOOP_HDFS_HOME/bin/hdfs" getconf -namenodes)
-excludeFilenameRemote=$("$HADOOP_HDFS_HOME/bin/hdfs" getconf -excludeFile)
+namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -namenodes)
+excludeFilenameRemote=$("$HADOOP_PREFIX/bin/hdfs" getconf -excludeFile)
 
 if [ "$excludeFilenameRemote" = '' ] ; then
   echo \

Modified: hadoop/hdfs/branches/HDFS-1073/bin/hdfs
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/hdfs?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/hdfs (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/hdfs Fri May 27 21:12:02 2011
@@ -15,10 +15,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-bin=`dirname "$0"`
+bin=`which $0`
+bin=`dirname ${bin}`
 bin=`cd "$bin"; pwd`
 
-. "$bin"/hdfs-config.sh
+. "$bin"/../libexec/hdfs-config.sh
 
 function print_usage(){
   echo "Usage: hdfs [--config confdir] COMMAND"
@@ -71,7 +72,7 @@ elif [ "$COMMAND" = "secondarynamenode" 
   HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS"
 elif [ "$COMMAND" = "datanode" ] ; then
   CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
-  if [[ $EUID -eq 0 ]]; then
+  if [ "$starting_secure_dn" = "true" ]; then
     HADOOP_OPTS="$HADOOP_OPTS -jvm server $HADOOP_DATANODE_OPTS"
   else
     HADOOP_OPTS="$HADOOP_OPTS -server $HADOOP_DATANODE_OPTS"
@@ -107,29 +108,29 @@ else
 fi
 
 # for developers, add hdfs classes to CLASSPATH
-if [ -d "$HADOOP_HDFS_HOME/build/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/classes
+if [ -d "$HADOOP_PREFIX/build/classes" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/classes
 fi
-if [ -d "$HADOOP_HDFS_HOME/build/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build
+if [ -d "$HADOOP_PREFIX/build/webapps" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build
 fi
-if [ -d "$HADOOP_HDFS_HOME/build/test/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/test/classes
+if [ -d "$HADOOP_PREFIX/build/test/classes" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/test/classes
 fi
-if [ -d "$HADOOP_HDFS_HOME/build/tools" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/tools
+if [ -d "$HADOOP_PREFIX/build/tools" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/tools
 fi
 
 # for releases, add core hdfs jar & webapps to CLASSPATH
-if [ -d "$HADOOP_HDFS_HOME/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME
+if [ -d "$HADOOP_PREFIX/share/hadoop/hdfs/webapps" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/share/hadoop/hdfs
 fi
-for f in $HADOOP_HDFS_HOME/hadoop-hdfs-*.jar; do
+for f in $HADOOP_PREFIX/share/hadoop-hdfs/*.jar; do
   CLASSPATH=${CLASSPATH}:$f;
 done
 
 # add libs to CLASSPATH
-for f in $HADOOP_HDFS_HOME/lib/*.jar; do
+for f in $HADOOP_PREFIX/lib/*.jar; do
   CLASSPATH=${CLASSPATH}:$f;
 done
 
@@ -146,7 +147,7 @@ if [ "$starting_secure_dn" = "true" ]; t
    HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
   fi
 
-  exec "$HADOOP_HDFS_HOME/bin/jsvc" \
+  exec "$HADOOP_PREFIX/bin/jsvc" \
            -Dproc_$COMMAND -outfile "$HADOOP_LOG_DIR/jsvc.out" \
            -errfile "$HADOOP_LOG_DIR/jsvc.err" \
            -pidfile "$HADOOP_SECURE_DN_PID" \

Modified: hadoop/hdfs/branches/HDFS-1073/bin/hdfs-config.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/hdfs-config.sh?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/hdfs-config.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/hdfs-config.sh Fri May 27 21:12:02 2011
@@ -18,15 +18,14 @@
 # included in all the hdfs scripts with source command
 # should not be executed directly
 
-bin=`dirname "$0"`
+bin=`which "$0"`
+bin=`dirname "${bin}"`
 bin=`cd "$bin"; pwd`
 
-export HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$bin/..}"
+export HADOOP_PREFIX="${HADOOP_PREFIX:-$bin/..}"
 
-if [ -d "${HADOOP_COMMON_HOME}" ]; then
-  . "$HADOOP_COMMON_HOME"/bin/hadoop-config.sh
-elif [ -d "${HADOOP_HOME}" ]; then
-  . "$HADOOP_HOME"/bin/hadoop-config.sh
+if [ -d "$bin" ]; then
+  . $bin/../libexec/hadoop-config.sh
 elif [ -e "${HADOOP_HDFS_HOME}"/bin/hadoop-config.sh ]; then
   . "$HADOOP_HDFS_HOME"/bin/hadoop-config.sh
 else

Modified: hadoop/hdfs/branches/HDFS-1073/bin/refresh-namenodes.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/refresh-namenodes.sh?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/refresh-namenodes.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/refresh-namenodes.sh Fri May 27 21:12:02 2011
@@ -23,13 +23,13 @@
 bin=`dirname "$0"`
 bin=`cd "$bin"; pwd`
 
-. "$bin/hdfs-config.sh"
+. "$bin/../libexec/hdfs-config.sh"
 
-namenodes=$("$HADOOP_HDFS_HOME/bin/hdfs" getconf -namenodes)
+namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -namenodes)
 
 for namenode in $namenodes ; do
   echo "Refreshing namenode [$namenode]"
-  "$HADOOP_HDFS_HOME/bin/hdfs" dfsadmin -refreshNodes
+  "$HADOOP_PREFIX/bin/hdfs" dfsadmin -refreshNodes
   if [ "$?" != '0' ] ; then errorFlag='1' ; fi
 done
 

Modified: hadoop/hdfs/branches/HDFS-1073/bin/start-balancer.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/start-balancer.sh?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/start-balancer.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/start-balancer.sh Fri May 27 21:12:02 2011
@@ -18,8 +18,8 @@
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
-. "$bin"/hdfs-config.sh
+. "$bin"/../libexec/hdfs-config.sh
 
 # Start balancer daemon.
 
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start balancer $@
+"$HADOOP_PREFIX"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start balancer $@

Modified: hadoop/hdfs/branches/HDFS-1073/bin/start-dfs.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/start-dfs.sh?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/start-dfs.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/start-dfs.sh Fri May 27 21:12:02 2011
@@ -25,7 +25,7 @@ usage="Usage: start-dfs.sh [-upgrade|-ro
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
-. "$bin/hdfs-config.sh"
+. "$bin"/../libexec/hdfs-config.sh
 
 # get arguments
 if [ $# -ge 1 ]; then
@@ -47,11 +47,11 @@ fi
 #---------------------------------------------------------
 # namenodes
 
-NAMENODES=$($HADOOP_HDFS_HOME/bin/hdfs getconf -namenodes)
+NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes)
 
 echo "Starting namenodes on [$NAMENODES]"
 
-"$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+"$HADOOP_PREFIX/bin/hadoop-daemons.sh" \
   --config "$HADOOP_CONF_DIR" \
   --hostnames "$NAMENODES" \
   --script "$bin/hdfs" start namenode $nameStartOpt
@@ -64,7 +64,7 @@ if [ -n "$HADOOP_SECURE_DN_USER" ]; then
     "Attempting to start secure cluster, skipping datanodes. " \
     "Run start-secure-dns.sh as root to complete startup."
 else
-  "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+  "$HADOOP_PREFIX/bin/hadoop-daemons.sh" \
     --config "$HADOOP_CONF_DIR" \
     --script "$bin/hdfs" start datanode $dataStartOpt
 fi
@@ -74,7 +74,7 @@ fi
 
 # if there are no secondary namenodes configured it returns
 # 0.0.0.0 or empty string
-SECONDARY_NAMENODES=$($HADOOP_HDFS_HOME/bin/hdfs getconf -secondarynamenodes 2>&-)
+SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-)
 SECONDARY_NAMENODES=${SECONDARY_NAMENODES:='0.0.0.0'}
 
 if [ "$SECONDARY_NAMENODES" = '0.0.0.0' ] ; then
@@ -84,7 +84,7 @@ if [ "$SECONDARY_NAMENODES" = '0.0.0.0' 
 else
   echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
 
-  "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+  "$HADOOP_PREFIX/bin/hadoop-daemons.sh" \
     --config "$HADOOP_CONF_DIR" \
     --hostnames "$SECONDARY_NAMENODES" \
     --script "$bin/hdfs" start secondarynamenode

Modified: hadoop/hdfs/branches/HDFS-1073/bin/start-secure-dns.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/start-secure-dns.sh?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/start-secure-dns.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/start-secure-dns.sh Fri May 27 21:12:02 2011
@@ -22,10 +22,10 @@ usage="Usage (run as root in order to st
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
-. "$bin"/hdfs-config.sh
+. "$bin"/../libexec/hdfs-config.sh
 
 if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  "$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start datanode $dataStartOpt
+  "$HADOOP_PREFIX"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start datanode $dataStartOpt
 else
   echo $usage
 fi

Modified: hadoop/hdfs/branches/HDFS-1073/bin/stop-balancer.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/stop-balancer.sh?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/stop-balancer.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/stop-balancer.sh Fri May 27 21:12:02 2011
@@ -18,9 +18,9 @@
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
-. "$bin"/hdfs-config.sh
+. "$bin"/../libexec/hdfs-config.sh
 
 # Stop balancer daemon.
 # Run this on the machine where the balancer is running
 
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop balancer
+"$HADOOP_PREFIX"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop balancer

Modified: hadoop/hdfs/branches/HDFS-1073/bin/stop-dfs.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/stop-dfs.sh?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/stop-dfs.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/stop-dfs.sh Fri May 27 21:12:02 2011
@@ -18,16 +18,16 @@
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
-. "$bin"/hdfs-config.sh
+. "$bin"/../libexec/hdfs-config.sh
 
 #---------------------------------------------------------
 # namenodes
 
-NAMENODES=$($HADOOP_HDFS_HOME/bin/hdfs getconf -namenodes)
+NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes)
 
 echo "Stopping namenodes on [$NAMENODES]"
 
-"$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+"$HADOOP_PREFIX/bin/hadoop-daemons.sh" \
   --config "$HADOOP_CONF_DIR" \
   --hostnames "$NAMENODES" \
   --script "$bin/hdfs" stop namenode
@@ -40,7 +40,7 @@ if [ -n "$HADOOP_SECURE_DN_USER" ]; then
     "Attempting to stop secure cluster, skipping datanodes. " \
     "Run stop-secure-dns.sh as root to complete shutdown."
 else
-  "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+  "$HADOOP_PREFIX/bin/hadoop-daemons.sh" \
     --config "$HADOOP_CONF_DIR" \
     --script "$bin/hdfs" stop datanode
 fi
@@ -50,7 +50,7 @@ fi
 
 # if there are no secondary namenodes configured it returns
 # 0.0.0.0 or empty string
-SECONDARY_NAMENODES=$($HADOOP_HDFS_HOME/bin/hdfs getconf -secondarynamenodes 2>&-)
+SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-)
 SECONDARY_NAMENODES=${SECONDARY_NAMENODES:-'0.0.0.0'}
 
 if [ "$SECONDARY_NAMENODES" = '0.0.0.0' ] ; then
@@ -60,7 +60,7 @@ if [ "$SECONDARY_NAMENODES" = '0.0.0.0' 
 else
   echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
 
-  "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+  "$HADOOP_PREFIX/bin/hadoop-daemons.sh" \
     --config "$HADOOP_CONF_DIR" \
     --hostnames "$SECONDARY_NAMENODES" \
     --script "$bin/hdfs" stop secondarynamenode

Modified: hadoop/hdfs/branches/HDFS-1073/bin/stop-secure-dns.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/stop-secure-dns.sh?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/stop-secure-dns.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/stop-secure-dns.sh Fri May 27 21:12:02 2011
@@ -22,10 +22,10 @@ usage="Usage (run as root in order to st
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
-. "$bin"/hdfs-config.sh
+. "$bin"/../libexec/hdfs-config.sh
 
 if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  "$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop datanode
+  "$HADOOP_PREFIX"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop datanode
 else
   echo $usage
 fi

Modified: hadoop/hdfs/branches/HDFS-1073/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/build.xml?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/build.xml (original)
+++ hadoop/hdfs/branches/HDFS-1073/build.xml Fri May 27 21:12:02 2011
@@ -25,15 +25,18 @@
   <!-- to contribute (without having to type -D or edit this file -->
   <property file="${user.home}/build.properties" />
   <property file="${basedir}/build.properties" />
- 
+
+  <property name="module" value="hdfs"/> 
   <property name="Name" value="Hadoop-Hdfs"/>
-  <property name="name" value="hadoop-hdfs"/>
+  <property name="name" value="hadoop-${module}"/>
   <!-- ATTN: Need to change aop.xml's project.version prop. synchronously -->
-  <property name="version" value="0.23.0-SNAPSHOT"/>
+  <property name="_version" value="0.23.0"/>
+  <property name="version" value="${_version}-SNAPSHOT"/>
   <property name="final.name" value="${name}-${version}"/>
   <property name="test.hdfs.final.name" value="${name}-test-${version}"/>
   <property name="ant.final.name" value="${name}-ant-${version}"/>
   <property name="year" value="2009"/>
+  <property name="package.release" value="1"/>
 
   <property name="src.dir" value="${basedir}/src"/>  	
   <property name="java.src.dir" value="${src.dir}/java"/>
@@ -212,6 +215,17 @@
      <equals arg1="${repo}" arg2="staging"/>
   </condition>
 
+  <!-- packaging properties -->
+  <property name="package.prefix" value="/usr"/>
+  <property name="package.conf.dir" value="/etc/hadoop"/>
+  <property name="package.log.dir" value="/var/log/hadoop/hdfs"/>
+  <property name="package.pid.dir" value="/var/run/hadoop"/>
+  <property name="package.var.dir" value="/var/lib/hadoop"/>
+  <property name="package.share.dir" value="share/hadoop/${module}"/>
+  <!-- Use fixed path to build rpm for avoiding rpmbuild conflict with dash path names -->
+  <property name="package.buildroot" value="/tmp/hadoop_package_hdfs_build_${user.name}"/>
+  <property name="package.build.dir" value="/tmp/hadoop_package_hdfs_build_${user.name}/BUILD"/>
+
   <!-- the normal classpath -->
   <path id="classpath">
     <pathelement location="${build.classes}"/>
@@ -1086,16 +1100,20 @@
 		description="assembles artifacts for binary target">
     <mkdir dir="${dist.dir}"/>
     <mkdir dir="${dist.dir}/lib"/>
-    <mkdir dir="${dist.dir}/contrib"/>
-  	<mkdir dir="${dist.dir}/bin"/>
+    <mkdir dir="${dist.dir}/${package.share.dir}/contrib"/>
+    <mkdir dir="${dist.dir}/${package.share.dir}/lib"/>
+    <mkdir dir="${dist.dir}/${package.share.dir}/templates"/>
+    <mkdir dir="${dist.dir}/bin"/>
+    <mkdir dir="${dist.dir}/libexec"/>
+    <mkdir dir="${dist.dir}/sbin"/>
 
-    <copy todir="${dist.dir}/lib" includeEmptyDirs="false" flatten="true">
+    <copy todir="${dist.dir}/${package.share.dir}/lib" includeEmptyDirs="false" flatten="true">
       <fileset dir="${hdfs.ivy.lib.dir}"/>
     </copy>
 
     <copy todir="${dist.dir}/lib" includeEmptyDirs="false">
-      <fileset dir="lib">
-        <exclude name="**/native/**"/>
+      <fileset dir="${build.dir}/c++/${build.platform}/lib">
+        <include name="**"/>
       </fileset>
     </copy>
 
@@ -1103,42 +1121,62 @@
       <!--Pass down the version in case its needed again and the target
       distribution directory so contribs know where to install to.-->
       <property name="version" value="${version}"/>
-      <property name="dist.dir" value="${dist.dir}"/>
+      <property name="dist.dir" value="${dist.dir}/${package.share.dir}"/>
       <fileset file="${contrib.dir}/build.xml"/>
-    </subant>  	
-
-    <copy todir="${dist.dir}/webapps">
-      <fileset dir="${build.webapps}"/>
-    </copy>
+    </subant>  
 
-    <copy todir="${dist.dir}"> 
+    <copy todir="${dist.dir}/${package.share.dir}"> 
       <fileset file="${build.dir}/${name}-*.jar"/>
     </copy>
   	
     <copy todir="${dist.dir}/bin">
-      <fileset dir="bin"/>
+      <fileset dir="bin">
+        <include name="hdfs"/>
+      </fileset>
     </copy>
+
+    <copy todir="${dist.dir}/libexec">
+      <fileset dir="bin">
+        <include name="hdfs-config.sh"/>
+      </fileset>
+    </copy>
+
+    <copy todir="${dist.dir}/sbin">
+      <fileset dir="bin">
+        <include name="start-*.sh"/>
+        <include name="stop-*.sh"/>
+      </fileset>
+    </copy>
+
+    <copy file="${basedir}/src/packages/rpm/init.d/hadoop-namenode" tofile="${dist.dir}/sbin/hadoop-namenode.redhat"/>
+    <copy file="${basedir}/src/packages/rpm/init.d/hadoop-datanode" tofile="${dist.dir}/sbin/hadoop-datanode.redhat"/>
+    <copy file="${basedir}/src/packages/deb/init.d/hadoop-namenode" tofile="${dist.dir}/sbin/hadoop-namenode.debian"/>
+    <copy file="${basedir}/src/packages/deb/init.d/hadoop-datanode" tofile="${dist.dir}/sbin/hadoop-datanode.debian"/>
+
+    <copy file="${basedir}/src/packages/update-hdfs-env.sh" tofile="${dist.dir}/sbin/update-hdfs-env.sh"/>
     
-    <copy todir="${dist.dir}/conf">
+    <copy todir="${dist.dir}/etc/hadoop">
       <fileset dir="${conf.dir}" excludes="**/*.template"/>
+      <fileset dir="${basedir}/src/packages/templates/conf" includes="*.template"/>
     </copy>
 
-    <copy file="ivy.xml" tofile="${dist.dir}/ivy.xml"/>
+    <copy todir="${dist.dir}/${package.share.dir}/templates">
+      <fileset dir="${basedir}/src/packages/templates/conf" includes="*"/>
+    </copy>
 
-    <copy todir="${dist.dir}/ivy">
-      <fileset dir="ivy"/>
+    <copy todir="${dist.dir}/${package.share.dir}/webapps">
+      <fileset dir="${build.webapps}"/>
     </copy>
 
-    <copy todir="${dist.dir}">
+    <copy todir="${dist.dir}/share/doc/hadoop/${module}">
       <fileset dir=".">
         <include name="*.txt" />
       </fileset>
     </copy>
   	
-    <copy todir="${dist.dir}/" file="build.xml"/>
-
     <chmod perm="ugo+x" type="file" parallel="false">
         <fileset dir="${dist.dir}/bin"/>
+        <fileset dir="${dist.dir}/sbin"/>
     </chmod>
   </target>
 
@@ -1167,12 +1205,15 @@
         <param.listofitems>
           <tarfileset dir="${system-test-build-dir}" mode="664">
             <exclude name="${final.name}/bin/*" />
+            <exclude name="${final.name}/libexec/*" />
             <exclude name="${final.name}/src/**" />
             <exclude name="${final.name}/docs/**" />
             <include name="${final.name}/**" />
           </tarfileset>
           <tarfileset dir="${build.dir}" mode="755">
             <include name="${final.name}/bin/*" />
+            <include name="${final.name}/libexec/*" />
+            <include name="${final.name}/sbin/*" />
           </tarfileset>
         </param.listofitems>
       </macro_tar>
@@ -1183,17 +1224,129 @@
       <param.listofitems>
         <tarfileset dir="${build.dir}" mode="664">
           <exclude name="${final.name}/bin/*" />
+          <exclude name="${final.name}/libexec/*" />
+          <exclude name="${final.name}/sbin/*" />
           <exclude name="${final.name}/src/**" />
           <exclude name="${final.name}/docs/**" />
           <include name="${final.name}/**" />
         </tarfileset>
         <tarfileset dir="${build.dir}" mode="755">
           <include name="${final.name}/bin/*" />
+          <include name="${final.name}/libexec/*" />
+          <include name="${final.name}/sbin/*" />
         </tarfileset>
       </param.listofitems>
     </macro_tar>
   </target>
 
+  <target name="rpm" depends="binary" description="Make rpm package">
+    <mkdir dir="${package.buildroot}/BUILD" />
+    <mkdir dir="${package.buildroot}/RPMS" />
+    <mkdir dir="${package.buildroot}/SRPMS" />
+    <mkdir dir="${package.buildroot}/SOURCES" />
+    <mkdir dir="${package.buildroot}/SPECS" />
+    <copy todir="${package.buildroot}/SOURCES">
+      <fileset dir="${build.dir}">
+        <include name="${final.name}-bin.tar.gz" />
+      </fileset>
+    </copy>
+    <copy file="${src.dir}/packages/rpm/spec/hadoop-hdfs.spec" todir="${package.buildroot}/SPECS">
+      <filterchain>
+        <replacetokens>
+          <token key="final.name" value="${final.name}" />
+          <token key="version" value="${_version}" />
+          <token key="package.release" value="${package.release}" />
+          <token key="package.build.dir" value="${package.build.dir}" />
+          <token key="package.prefix" value="${package.prefix}" />
+          <token key="package.conf.dir" value="${package.conf.dir}" />
+          <token key="package.log.dir" value="${package.log.dir}" />
+          <token key="package.pid.dir" value="${package.pid.dir}" />
+          <token key="package.var.dir" value="${package.var.dir}" />
+        </replacetokens>
+      </filterchain>
+    </copy>
+    <rpm specFile="hadoop-hdfs.spec" command="-bb --target ${os.arch}" topDir="${package.buildroot}" cleanBuildDir="true" failOnError="true"/>
+    <copy todir="${build.dir}/" flatten="true">
+      <fileset dir="${package.buildroot}/RPMS">
+        <include name="**/*.rpm" />
+      </fileset>
+    </copy>
+    <delete dir="${package.buildroot}" quiet="true" verbose="false"/>
+  </target>
+
+  <target name="deb" depends="ivy-retrieve-package, binary" description="Make deb package">
+    <taskdef name="deb"
+           classname="org.vafer.jdeb.ant.DebAntTask">
+      <classpath refid="ivy-package.classpath" />
+    </taskdef>
+
+    <mkdir dir="${package.build.dir}/hadoop.control" />
+    <mkdir dir="${package.buildroot}/${package.prefix}" />
+    <copy todir="${package.buildroot}/${package.prefix}">
+      <fileset dir="${build.dir}/${final.name}">
+        <include name="**" />
+      </fileset>
+    </copy>
+    <copy todir="${package.build.dir}/hadoop.control">
+      <fileset dir="${src.dir}/packages/deb/hadoop.control">
+        <exclude name="control" />
+      </fileset>
+    </copy>
+    <copy file="${src.dir}/packages/deb/hadoop.control/control" todir="${package.build.dir}/hadoop.control">
+      <filterchain>
+        <replacetokens>
+          <token key="final.name" value="${final.name}" />
+          <token key="version" value="${_version}" />
+          <token key="package.release" value="${package.release}" />
+          <token key="package.build.dir" value="${package.build.dir}" />
+          <token key="package.prefix" value="${package.prefix}" />
+          <token key="package.conf.dir" value="${package.conf.dir}" />
+          <token key="package.log.dir" value="${package.log.dir}" />
+          <token key="package.pid.dir" value="${package.pid.dir}" />
+        </replacetokens>
+      </filterchain>
+    </copy>
+    <deb destfile="${package.buildroot}/${name}_${_version}-${package.release}_${os.arch}.deb" control="${package.build.dir}/hadoop.control">
+      <tarfileset dir="${build.dir}/${final.name}" filemode="644" prefix="${package.prefix}">
+        <exclude name="bin/*" />
+        <exclude name="${package.share.dir}/contrib/*/bin/*" />
+        <exclude name="etc" />
+        <exclude name="etc/**" />
+        <exclude name="libexec/*" />
+        <exclude name="sbin/*" />
+        <include name="**" />
+      </tarfileset>
+      <tarfileset dir="${build.dir}/${final.name}" filemode="755" prefix="${package.prefix}">
+        <include name="bin/*" />
+        <exclude name="sbin/*.redhat" />
+        <exclude name="sbin/*.debian" />
+        <include name="sbin/*" />
+        <include name="libexec/*" />
+        <include name="${package.share.dir}/contrib/*/bin/*" />
+      </tarfileset>
+      <tarfileset dir="${src.dir}/packages" filemode="755" prefix="${package.prefix}/sbin">
+        <include name="*.sh" />
+      </tarfileset>
+      <tarfileset dir="${build.dir}/${final.name}/etc/hadoop" filemode="644" prefix="${package.conf.dir}">
+        <include name="**" />
+        <exclude name="configuration.xsl" />
+        <exclude name="hadoop-metrics2.properties" />
+        <exclude name="core-site.xml" />
+        <exclude name="hdfs-site.xml" />
+        <exclude name="mapred-site.xml" />
+      </tarfileset>
+      <tarfileset dir="${basedir}/src/packages/deb/init.d" filemode="755" prefix="/etc/init.d">
+        <include name="**" />
+      </tarfileset>
+    </deb>
+    <copy todir="${build.dir}/" flatten="true">
+      <fileset dir="${package.buildroot}">
+        <include name="**/${name}*.deb" />
+      </fileset>
+    </copy>
+    <delete dir="${package.buildroot}" quiet="true" verbose="false"/>
+  </target>
+
   <!-- ================================================================== -->
   <!-- Perform audit activities for the release                           -->
   <!-- ================================================================== -->
@@ -1293,7 +1446,7 @@
         <env key="OS_ARCH" value="${os.arch}"/>
         <env key="JVM_ARCH" value="${jvm.arch}"/>
         <env key="LIBHDFS_BUILD_DIR" value="${build.c++.libhdfs}"/>
-        <env key="HADOOP_HOME" value="${basedir}"/>
+        <env key="HADOOP_PREFIX" value="${basedir}"/>
         <env key="HADOOP_CONF_DIR" value="${test.libhdfs.dir}/conf"/>
         <env key="HADOOP_LOG_DIR" value="${test.libhdfs.dir}/logs"/>
         <env key="LIBHDFS_TEST_DIR" value="${test.libhdfs.dir}"/>
@@ -1776,11 +1929,21 @@
       log="${ivyresolvelog}"/>
   </target>
 
+  <target name="ivy-resolve-compile" depends="ivy-init">
+    <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="compile"
+      log="${ivyresolvelog}"/>
+  </target>
+
   <target name="ivy-resolve-common" depends="ivy-init">
     <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="common"
       log="${ivyresolvelog}"/>
   </target>
 
+  <target name="ivy-resolve-package" depends="ivy-init">
+    <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="package"
+        log="${ivyresolvelog}"/>
+  </target>
+
   <target name="ivy-resolve-hdfs" depends="ivy-init">
     <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="hdfs"
       log="${ivyresolvelog}"/>
@@ -1840,14 +2003,30 @@
     <ivy:cachepath pathid="ivy-test.classpath" conf="test"/>
   </target>
 
-  <target name="ivy-retrieve-common" depends="ivy-resolve-common"
+  <target name="ivy-retrieve-compile" depends="ivy-resolve-compile"
     description="Retrieve Ivy-managed artifacts for the compile configurations">
     <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"
       pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"
         log="${ivyretrievelog}"/>
+    <ivy:cachepath pathid="ivy-compile.classpath" conf="compile"/>
+  </target>
+
+  <target name="ivy-retrieve-common" depends="ivy-resolve-common"
+    description="Retrieve Ivy-managed artifacts for the runtime configurations">
+    <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"
+      pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"
+        log="${ivyretrievelog}"/>
     <ivy:cachepath pathid="ivy-common.classpath" conf="common"/>
   </target>
 
+  <target name="ivy-retrieve-package" depends="ivy-resolve-package"
+    description="Retrieve Ivy-managed artifacts for the package configurations">
+    <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"
+      pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"
+                log="${ivyretrievelog}"/>
+    <ivy:cachepath pathid="ivy-package.classpath" conf="package"/>
+  </target>
+
   <target name="ivy-retrieve-hdfs" depends="ivy-resolve-hdfs"
     description="Retrieve Ivy-managed artifacts for the hdfs configurations">
     <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"

Propchange: hadoop/hdfs/branches/HDFS-1073/build.xml
            ('svn:mergeinfo' removed)

Modified: hadoop/hdfs/branches/HDFS-1073/ivy.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/ivy.xml?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/ivy.xml (original)
+++ hadoop/hdfs/branches/HDFS-1073/ivy.xml Fri May 27 21:12:02 2011
@@ -40,6 +40,7 @@
     <conf name="hdfs" visibility="private" extends="compile,runtime" description="HDFS dependent artifacts"/>
     <conf name="javadoc" visibility="private" description="artiracts required while performing doc generation" extends="common"/>
     <conf name="test" extends="master" visibility="private" description="the classpath needed to run tests"/>
+    <conf name="package" extends="master" description="the classpath needed for packaging"/>
     <conf name="system" extends="test" visibility="private" description="the classpath needed to run system tests"/>
 
     <conf name="test-hdfswithmr" extends="test, common" visibility="private" description="the classpath needed to run tests"/>
@@ -63,17 +64,19 @@
     <dependency org="org.apache.hadoop" name="hadoop-common-instrumented" rev="${hadoop-common.version}" conf="system->default">
       <exclude module="ant"/>
     </dependency>
-    <dependency org="commons-logging" name="commons-logging" rev="${commons-logging.version}" conf="common->master"/>
+    <dependency org="commons-logging" name="commons-logging" rev="${commons-logging.version}" conf="compile->master"/>
     <dependency org="commons-daemon" name="commons-daemon" rev="${commons-daemon.version}" conf="hdfs->default" />
     <dependency org="log4j" name="log4j" rev="${log4j.version}" conf="common->master"/>
-    <dependency org="com.google.guava" name="guava" rev="${guava.version}" conf="common->default" />
-    <dependency org="org.apache.hadoop" name="avro" rev="${avro.version}" conf="common->default">
+    <dependency org="com.google.guava" name="guava" rev="${guava.version}" conf="hdfs->default" />
+    <dependency org="org.apache.hadoop" name="avro" rev="${avro.version}" conf="compile->master">
       <exclude module="ant"/>
       <exclude module="jetty"/>
       <exclude module="slf4j-simple"/>
     </dependency>
-    <dependency org="org.aspectj" name="aspectjrt" rev="${aspectj.version}" conf="common->default"/>
-    <dependency org="org.aspectj" name="aspectjtools" rev="${aspectj.version}" conf="common->default"/>
+    <dependency org="org.aspectj" name="aspectjrt" rev="${aspectj.version}" conf="compile->master"/>
+    <dependency org="org.aspectj" name="aspectjtools" rev="${aspectj.version}" conf="compile->master"/>
+
+    <dependency org="junit" name="junit" rev="${junit.version}" conf="test->master"/>
 
     <dependency org="org.slf4j" name="slf4j-api" rev="${slf4j-api.version}" conf="test->master"/>
     <dependency org="org.slf4j" name="slf4j-log4j12" rev="${slf4j-log4j12.version}" conf="test->master"/>
@@ -90,8 +93,9 @@
 
     <dependency org="org.apache.lucene" name="lucene-core" rev="${lucene-core.version}" conf="javadoc->default"/> 
 
-    <dependency org="org.mockito" name="mockito-all" rev="${mockito-all.version}" conf="common->master"/>
+    <dependency org="org.mockito" name="mockito-all" rev="${mockito-all.version}" conf="test->master"/>
 
+    <dependency org="org.vafer" name="jdeb" rev="${jdeb.version}" conf="package->master"/>
    </dependencies>
   
 </ivy-module>

Modified: hadoop/hdfs/branches/HDFS-1073/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/ivy/libraries.properties?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/ivy/libraries.properties (original)
+++ hadoop/hdfs/branches/HDFS-1073/ivy/libraries.properties Fri May 27 21:12:02 2011
@@ -44,6 +44,7 @@ hsqldb.version=1.8.0.10
 ivy.version=2.1.0
 
 jasper.version=5.5.12
+jdeb.version=0.8
 jsp.version=2.1
 jsp-api.version=5.5.12
 jetty.version=6.1.14

Propchange: hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 27 21:12:02 2011
@@ -1,4 +1,4 @@
 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
 /hadoop/core/trunk/src/c++/libhdfs:776175-784663
 /hadoop/hdfs/branches/HDFS-1052/src/c++/libhdfs:987665-1095512
-/hadoop/hdfs/trunk/src/c++/libhdfs:1086482-1126286
+/hadoop/hdfs/trunk/src/c++/libhdfs:1086482-1128452

Modified: hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/hdfsJniHelper.c
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/c%2B%2B/libhdfs/hdfsJniHelper.c?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/hdfsJniHelper.c (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/hdfsJniHelper.c Fri May 27 21:12:02 2011
@@ -421,28 +421,35 @@ JNIEnv* getJNIEnv(void)
         snprintf(optHadoopClassPath, optHadoopClassPathLen,
                 "%s%s", hadoopClassPathVMArg, hadoopClassPath);
 
+        // Determine the # of LIBHDFS_OPTS args
         int noArgs = 1;
-        //determine how many arguments were passed as LIBHDFS_OPTS env var
         char *hadoopJvmArgs = getenv("LIBHDFS_OPTS");
         char jvmArgDelims[] = " ";
+        char *str, *token, *savePtr;
         if (hadoopJvmArgs != NULL)  {
-                char *result = NULL;
-                result = strtok( hadoopJvmArgs, jvmArgDelims );
-                while ( result != NULL ) {
-                        noArgs++;
-        		result = strtok( NULL, jvmArgDelims);
-           	}
+          hadoopJvmArgs = strdup(hadoopJvmArgs);
+          for (noArgs = 1, str = hadoopJvmArgs; ; noArgs++, str = NULL) {
+            token = strtok_r(str, jvmArgDelims, &savePtr);
+            if (NULL == token) {
+              break;
+            }
+          }
+          free(hadoopJvmArgs);
         }
+
+        // Now that we know the # args, populate the options array
         JavaVMOption options[noArgs];
         options[0].optionString = optHadoopClassPath;
-		//fill in any specified arguments
+        hadoopJvmArgs = getenv("LIBHDFS_OPTS");
 	if (hadoopJvmArgs != NULL)  {
-            char *result = NULL;
-            result = strtok( hadoopJvmArgs, jvmArgDelims );	
-            int argNum = 1;
-            for (;argNum < noArgs ; argNum++) {
-                options[argNum].optionString = result; //optHadoopArg;
+          hadoopJvmArgs = strdup(hadoopJvmArgs);
+          for (noArgs = 1, str = hadoopJvmArgs; ; noArgs++, str = NULL) {
+            token = strtok_r(str, jvmArgDelims, &savePtr);
+            if (NULL == token) {
+              break;
             }
+            options[noArgs].optionString = token;
+          }
         }
 
         //Create the VM
@@ -454,14 +461,18 @@ JNIEnv* getJNIEnv(void)
         vm_args.ignoreUnrecognized = 1;
 
         rv = JNI_CreateJavaVM(&vm, (void*)&env, &vm_args);
+
+        if (hadoopJvmArgs != NULL)  {
+          free(hadoopJvmArgs);
+        }
+        free(optHadoopClassPath);
+
         if (rv != 0) {
             fprintf(stderr, "Call to JNI_CreateJavaVM failed "
                     "with error: %d\n", rv);
             UNLOCK_JVM_MUTEX();
             return NULL;
         }
-
-        free(optHadoopClassPath);
     }
     else {
         //Attach this thread to the VM

Modified: hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/tests/test-libhdfs.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/c%2B%2B/libhdfs/tests/test-libhdfs.sh?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/tests/test-libhdfs.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/tests/test-libhdfs.sh Fri May 27 21:12:02 2011
@@ -19,7 +19,7 @@
 #
 # Note: This script depends on 8 environment variables to function correctly:
 # a) CLASSPATH
-# b) HADOOP_HOME
+# b) HADOOP_PREFIX
 # c) HADOOP_CONF_DIR 
 # d) HADOOP_LOG_DIR 
 # e) LIBHDFS_BUILD_DIR
@@ -30,10 +30,10 @@
 #
 
 HDFS_TEST=hdfs_test
-HADOOP_LIB_DIR=$HADOOP_HOME/lib
-HADOOP_BIN_DIR=$HADOOP_HOME/bin
+HADOOP_LIB_DIR=$HADOOP_PREFIX/lib
+HADOOP_BIN_DIR=$HADOOP_PREFIX/bin
 
-COMMON_BUILD_DIR=$HADOOP_HOME/build/ivy/lib/Hadoop-Hdfs/common
+COMMON_BUILD_DIR=$HADOOP_PREFIX/build/ivy/lib/Hadoop-Hdfs/common
 COMMON_JAR=$COMMON_BUILD_DIR/hadoop-common-0.22.0-SNAPSHOT.jar
 
 cat > $HADOOP_CONF_DIR/core-site.xml <<EOF
@@ -77,9 +77,9 @@ EOF
 # If we are running from the hdfs repo we need to make sure
 # HADOOP_BIN_DIR contains the common scripts.  
 # If the bin directory does not and we've got a common jar extract its
-# bin directory to HADOOP_HOME/bin. The bin scripts hdfs-config.sh and
+# bin directory to HADOOP_PREFIX/bin. The bin scripts hdfs-config.sh and
 # hadoop-config.sh assume the bin directory is named "bin" and that it
-# is located in HADOOP_HOME.
+# is located in HADOOP_PREFIX.
 unpacked_common_bin_dir=0
 if [ ! -f $HADOOP_BIN_DIR/hadoop-config.sh ]; then
   if [ -f $COMMON_JAR ]; then
@@ -91,7 +91,7 @@ fi
 
 # Manipulate HADOOP_CONF_DIR too
 # which is necessary to circumvent bin/hadoop
-HADOOP_CONF_DIR=$HADOOP_CONF_DIR:$HADOOP_HOME/conf
+HADOOP_CONF_DIR=$HADOOP_CONF_DIR:$HADOOP_PREFIX/conf
 
 # set pid file dir so they are not written to /tmp
 export HADOOP_PID_DIR=$HADOOP_LOG_DIR
@@ -101,14 +101,14 @@ CLASSPATH="${HADOOP_CONF_DIR}"
 CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
 
 # for developers, add Hadoop classes to CLASSPATH
-if [ -d "$HADOOP_HOME/build/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/classes
+if [ -d "$HADOOP_PREFIX/build/classes" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/classes
 fi
-if [ -d "$HADOOP_HOME/build/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build
+if [ -d "$HADOOP_PREFIX/build/webapps" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build
 fi
-if [ -d "$HADOOP_HOME/build/test/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/test/classes
+if [ -d "$HADOOP_PREFIX/build/test/classes" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/test/classes
 fi
 
 # add Clover jar file needed for code coverage runs
@@ -118,14 +118,14 @@ CLASSPATH=${CLASSPATH}:${CLOVER_JAR};
 IFS=
 
 # add libs to CLASSPATH
-for f in $HADOOP_HOME/lib/*.jar; do
+for f in $HADOOP_PREFIX/lib/*.jar; do
   CLASSPATH=${CLASSPATH}:$f;
 done
 
-for f in $HADOOP_HOME/*.jar; do 
+for f in $HADOOP_PREFIX/*.jar; do 
   CLASSPATH=${CLASSPATH}:$f
 done
-for f in $HADOOP_HOME/lib/jsp-2.1/*.jar; do
+for f in $HADOOP_PREFIX/lib/jsp-2.1/*.jar; do
   CLASSPATH=${CLASSPATH}:$f;
 done
 
@@ -176,7 +176,7 @@ echo  LIB_JVM_DIR = $LIB_JVM_DIR
 echo  "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
 # Put delays to ensure hdfs is up and running and also shuts down 
 # after the tests are complete
-cd $HADOOP_HOME
+cd $HADOOP_PREFIX
 echo Y | $HADOOP_BIN_DIR/hdfs namenode -format &&
 $HADOOP_BIN_DIR/hadoop-daemon.sh --script $HADOOP_BIN_DIR/hdfs start namenode && sleep 2
 $HADOOP_BIN_DIR/hadoop-daemon.sh --script $HADOOP_BIN_DIR/hdfs start datanode && sleep 2

Modified: hadoop/hdfs/branches/HDFS-1073/src/contrib/build-contrib.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/contrib/build-contrib.xml?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/contrib/build-contrib.xml (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/contrib/build-contrib.xml Fri May 27 21:12:02 2011
@@ -23,13 +23,14 @@
 
   <property name="name" value="${ant.project.name}"/>
   <property name="root" value="${basedir}"/>
+  <property name="hadoop.root" location="${root}/../../../"/>
 
   <!-- Load all the default properties, and any the user wants    -->
   <!-- to contribute (without having to type -D or edit this file -->
   <property file="${user.home}/${name}.build.properties" />
   <property file="${root}/build.properties" />
+  <property file="${hadoop.root}/build.properties" />
 
-  <property name="hadoop.root" location="${root}/../../../"/>
   <property name="src.dir"  location="${root}/src/java"/>
   <property name="src.test" location="${root}/src/test"/>
   <property name="src.examples" location="${root}/src/examples"/>

Modified: hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/README
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/README?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/README (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/README Fri May 27 21:12:02 2011
@@ -33,9 +33,9 @@ Requirements
 
 BUILDING
 
-   1. in HADOOP_HOME: `ant compile-libhdfs -Dlibhdfs=1
-   2. in HADOOP_HOME: `ant package` to deploy libhdfs
-   3. in HADOOP_HOME: `ant compile-contrib -Dlibhdfs=1 -Dfusedfs=1`
+   1. in HADOOP_PREFIX: `ant compile-libhdfs -Dlibhdfs=1
+   2. in HADOOP_PREFIX: `ant package` to deploy libhdfs
+   3. in HADOOP_PREFIX: `ant compile-contrib -Dlibhdfs=1 -Dfusedfs=1`
 
 NOTE: for amd64 architecture, libhdfs will not compile unless you edit
 the Makefile in src/c++/libhdfs/Makefile and set OS_ARCH=amd64
@@ -111,7 +111,7 @@ NOTE - you cannot export this with a FUS
 
 RECOMMENDATIONS
 
-1. From /bin, `ln -s $HADOOP_HOME/contrib/fuse-dfs/fuse_dfs* .`
+1. From /bin, `ln -s $HADOOP_PREFIX/contrib/fuse-dfs/fuse_dfs* .`
 
 2. Always start with debug on so you can see if you are missing a classpath or something like that.
 

Modified: hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/build.xml?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/build.xml (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/build.xml Fri May 27 21:12:02 2011
@@ -46,7 +46,7 @@
     <exec executable="make" failonerror="true">
       <env key="OS_NAME" value="${os.name}"/>
       <env key="OS_ARCH" value="${os.arch}"/>
-      <env key="HADOOP_HOME" value="${hadoop.root}"/>
+      <env key="HADOOP_PREFIX" value="${hadoop.root}"/>
       <env key="PACKAGE_VERSION" value="0.1.0"/>
       <env key="BUILD_PLATFORM" value="${build.platform}" />
       <env key="PERMS" value="${perms}"/>

Modified: hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/src/Makefile.am
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/src/Makefile.am?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/src/Makefile.am (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/src/Makefile.am Fri May 27 21:12:02 2011
@@ -17,5 +17,5 @@
 bin_PROGRAMS = fuse_dfs
 fuse_dfs_SOURCES = fuse_dfs.c fuse_options.c fuse_trash.c fuse_stat_struct.c fuse_users.c fuse_init.c fuse_connect.c fuse_impls_access.c fuse_impls_chmod.c  fuse_impls_chown.c  fuse_impls_create.c  fuse_impls_flush.c fuse_impls_getattr.c  fuse_impls_mkdir.c  fuse_impls_mknod.c  fuse_impls_open.c fuse_impls_read.c fuse_impls_release.c fuse_impls_readdir.c fuse_impls_rename.c fuse_impls_rmdir.c fuse_impls_statfs.c fuse_impls_symlink.c fuse_impls_truncate.c fuse_impls_utimens.c  fuse_impls_unlink.c fuse_impls_write.c
 AM_CFLAGS= -Wall -g
-AM_CPPFLAGS= -DPERMS=$(PERMS) -D_FILE_OFFSET_BITS=64 -I$(JAVA_HOME)/include -I$(HADOOP_HOME)/src/c++/libhdfs -I$(JAVA_HOME)/include/linux -D_FUSE_DFS_VERSION=\"$(PACKAGE_VERSION)\" -DPROTECTED_PATHS=\"$(PROTECTED_PATHS)\" -I$(FUSE_HOME)/include
-AM_LDFLAGS= -L$(HADOOP_HOME)/build/c++/$(BUILD_PLATFORM)/lib -lhdfs -L$(FUSE_HOME)/lib -lfuse -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server -ljvm
+AM_CPPFLAGS= -DPERMS=$(PERMS) -D_FILE_OFFSET_BITS=64 -I$(JAVA_HOME)/include -I$(HADOOP_PREFIX)/src/c++/libhdfs -I$(JAVA_HOME)/include/linux -D_FUSE_DFS_VERSION=\"$(PACKAGE_VERSION)\" -DPROTECTED_PATHS=\"$(PROTECTED_PATHS)\" -I$(FUSE_HOME)/include
+AM_LDFLAGS= -L$(HADOOP_PREFIX)/build/c++/$(BUILD_PLATFORM)/lib -lhdfs -L$(FUSE_HOME)/lib -lfuse -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server -ljvm

Modified: hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh Fri May 27 21:12:02 2011
@@ -16,8 +16,8 @@
 # limitations under the License.
 #
 
-if [ "$HADOOP_HOME" = "" ]; then
-export HADOOP_HOME=/usr/local/share/hadoop
+if [ "$HADOOP_PREFIX" = "" ]; then
+export HADOOP_PREFIX=/usr/local/share/hadoop
 fi
 
 if [ "$OS_ARCH" = "" ]; then
@@ -33,16 +33,16 @@ export LD_LIBRARY_PATH=$JAVA_HOME/jre/li
 fi
 
 # If dev build set paths accordingly
-if [ -d $HADOOP_HDFS_HOME/build ]; then
-  export HADOOP_HOME=$HADOOP_HDFS_HOME
-  for f in ${HADOOP_HOME}/build/*.jar ; do
+if [ -d $HADOOP_PREFIX/build ]; then
+  export HADOOP_PREFIX=$HADOOP_PREFIX
+  for f in ${HADOOP_PREFIX}/build/*.jar ; do
     export CLASSPATH=$CLASSPATH:$f
   done
-  for f in $HADOOP_HOME/build/ivy/lib/Hadoop-Hdfs/common/*.jar ; do
+  for f in $HADOOP_PREFIX/build/ivy/lib/Hadoop-Hdfs/common/*.jar ; do
     export CLASSPATH=$CLASSPATH:$f
   done
-  export PATH=$HADOOP_HOME/build/contrib/fuse-dfs:$PATH
-  export LD_LIBRARY_PATH=$HADOOP_HOME/build/c++/lib:$JAVA_HOME/jre/lib/$OS_ARCH/server
+  export PATH=$HADOOP_PREFIX/build/contrib/fuse-dfs:$PATH
+  export LD_LIBRARY_PATH=$HADOOP_PREFIX/build/c++/lib:$JAVA_HOME/jre/lib/$OS_ARCH/server
 fi
 
 fuse_dfs $@

Propchange: hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 27 21:12:02 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/contrib/hdfsproxy:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/contrib/hdfsproxy:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy:820487
-/hadoop/hdfs/trunk/src/contrib/hdfsproxy:1086482-1126286
+/hadoop/hdfs/trunk/src/contrib/hdfsproxy:1086482-1128452

Modified: hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/README
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/README?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/README (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/README Fri May 27 21:12:02 2011
@@ -38,10 +38,10 @@ B) With Tomcat-based Installation, suppo
 > Standard HTTPS Get Support for file transfer
 
 The detailed configuration/set-up guide is in the Forrest 
-documentation, which can be found at $HADOOP_HDFS_HOME/docs. In order to build the 
+documentation, which can be found at $HADOOP_PREFIX/docs. In order to build the 
 documentation on your own from source please use the following command in 
 the downloaded source folder:
 
 ant docs -Dforrest.home=path to forrest -Djava5.home= path to jdk5. 
 
-The documentation so built would be under $HADOOP_HDFS_HOME/build/docs
+The documentation so built would be under $HADOOP_PREFIX/build/docs

Modified: hadoop/hdfs/branches/HDFS-1073/src/docs/src/documentation/content/xdocs/hdfsproxy.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/docs/src/documentation/content/xdocs/hdfsproxy.xml?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/docs/src/documentation/content/xdocs/hdfsproxy.xml (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/docs/src/documentation/content/xdocs/hdfsproxy.xml Fri May 27 21:12:02 2011
@@ -272,11 +272,11 @@
       </section>
       <section>
         <title> Build Process </title>        
-        <p>Under <code>$HADOOP_HDFS_HOME</code> do the following <br/>
+        <p>Under <code>$HADOOP_PREFIX</code> do the following <br/>
           <code> $ ant clean tar</code> <br/>
           <code> $ cd src/contrib/hdfsproxy/</code> <br/>
           <code> $ ant clean tar</code> <br/>
-          The <code>hdfsproxy-*.tar.gz</code> file will be generated under <code>$HADOOP_HDFS_HOME/build/contrib/hdfsproxy/</code>. Use this tar ball to proceed for the server start-up/shutdown process after necessary configuration. 
+          The <code>hdfsproxy-*.tar.gz</code> file will be generated under <code>$HADOOP_PREFIX/build/contrib/hdfsproxy/</code>. Use this tar ball to proceed for the server start-up/shutdown process after necessary configuration. 
         </p>
       </section>  
       <section>
@@ -494,22 +494,22 @@
           <title> Build and Deployment Process </title>  
           <section>
             <title> Build forwarding war (ROOT.war) </title>
-            <p>Suppose hdfsproxy-default.xml has been properly configured and it is under ${user.home}/proxy-root-conf dir. Under <code>$HADOOP_HDFS_HOME</code> do the following <br/>
+            <p>Suppose hdfsproxy-default.xml has been properly configured and it is under ${user.home}/proxy-root-conf dir. Under <code>$HADOOP_PREFIX</code> do the following <br/>
               <code> $ export HDFSPROXY_CONF_DIR=${user.home}/proxy-root-conf</code> <br/>
               <code> $ ant clean tar</code> <br/>
               <code> $ cd src/contrib/hdfsproxy/</code> <br/>
               <code> $ ant clean forward</code> <br/>
-              The <code>hdfsproxy-forward-*.war</code> file will be generated under <code>$HADOOP_HDFS_HOME/build/contrib/hdfsproxy/</code>. Copy this war file to tomcat's webapps directory and rename it at ROOT.war (if ROOT dir already exists, remove it first) for deployment. 
+              The <code>hdfsproxy-forward-*.war</code> file will be generated under <code>$HADOOP_PREFIX/build/contrib/hdfsproxy/</code>. Copy this war file to tomcat's webapps directory and rename it at ROOT.war (if ROOT dir already exists, remove it first) for deployment. 
             </p>
           </section>
           <section>
             <title> Build cluster client war (client.war) </title>
-            <p>Suppose hdfsproxy-default.xml has been properly configured and it is under ${user.home}/proxy-client-conf dir. Under <code>$HADOOP_HDFS_HOME</code> do the following <br/>
+            <p>Suppose hdfsproxy-default.xml has been properly configured and it is under ${user.home}/proxy-client-conf dir. Under <code>$HADOOP_PREFIX</code> do the following <br/>
               <code> $ export HDFSPROXY_CONF_DIR=${user.home}/proxy-client-conf</code> <br/>
               <code> $ ant clean tar</code> <br/>
               <code> $ cd src/contrib/hdfsproxy/</code> <br/>
               <code> $ ant clean war</code> <br/>
-              The <code>hdfsproxy-*.war</code> file will be generated under <code>$HADOOP_HDFS_HOME/build/contrib/hdfsproxy/</code>. Copy this war file to tomcat's webapps directory and rename it properly for deployment. 
+              The <code>hdfsproxy-*.war</code> file will be generated under <code>$HADOOP_PREFIX/build/contrib/hdfsproxy/</code>. Copy this war file to tomcat's webapps directory and rename it properly for deployment. 
             </p>
           </section>
           <section>

Modified: hadoop/hdfs/branches/HDFS-1073/src/docs/src/documentation/content/xdocs/libhdfs.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/docs/src/documentation/content/xdocs/libhdfs.xml?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/docs/src/documentation/content/xdocs/libhdfs.xml (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/docs/src/documentation/content/xdocs/libhdfs.xml Fri May 27 21:12:02 2011
@@ -34,7 +34,7 @@
 libhdfs is a JNI based C API for Hadoop's Distributed File System (HDFS).
 It provides C APIs to a subset of the HDFS APIs to manipulate HDFS files and
 the filesystem. libhdfs is part of the Hadoop distribution and comes 
-pre-compiled in ${HADOOP_HOME}/libhdfs/libhdfs.so .
+pre-compiled in ${HADOOP_PREFIX}/libhdfs/libhdfs.so .
 </p>
 
 </section>
@@ -45,7 +45,7 @@ pre-compiled in ${HADOOP_HOME}/libhdfs/l
 The libhdfs APIs are a subset of: <a href="api/org/apache/hadoop/fs/FileSystem.html" >hadoop fs APIs</a>.  
 </p>
 <p>
-The header file for libhdfs describes each API in detail and is available in ${HADOOP_HOME}/src/c++/libhdfs/hdfs.h
+The header file for libhdfs describes each API in detail and is available in ${HADOOP_PREFIX}/src/c++/libhdfs/hdfs.h
 </p>
 </section>
 <section>
@@ -77,8 +77,8 @@ int main(int argc, char **argv) {
 <section>
 <title>How To Link With The Library</title>
 <p>
-See the Makefile for hdfs_test.c in the libhdfs source directory (${HADOOP_HOME}/src/c++/libhdfs/Makefile) or something like:<br />
-gcc above_sample.c -I${HADOOP_HOME}/src/c++/libhdfs -L${HADOOP_HOME}/libhdfs -lhdfs -o above_sample
+See the Makefile for hdfs_test.c in the libhdfs source directory (${HADOOP_PREFIX}/src/c++/libhdfs/Makefile) or something like:<br />
+gcc above_sample.c -I${HADOOP_PREFIX}/src/c++/libhdfs -L${HADOOP_PREFIX}/libhdfs -lhdfs -o above_sample
 </p>
 </section>
 <section>
@@ -86,8 +86,8 @@ gcc above_sample.c -I${HADOOP_HOME}/src/
 <p>
 The most common problem is the CLASSPATH is not set properly when calling a program that uses libhdfs. 
 Make sure you set it to all the Hadoop jars needed to run Hadoop itself. Currently, there is no way to 
-programmatically generate the classpath, but a good bet is to include all the jar files in ${HADOOP_HOME} 
-and ${HADOOP_HOME}/lib as well as the right configuration directory containing hdfs-site.xml
+programmatically generate the classpath, but a good bet is to include all the jar files in ${HADOOP_PREFIX} 
+and ${HADOOP_PREFIX}/lib as well as the right configuration directory containing hdfs-site.xml
 </p>
 </section>
 <section>

Propchange: hadoop/hdfs/branches/HDFS-1073/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 27 21:12:02 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/java:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/java:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/java:820487
-/hadoop/hdfs/trunk/src/java:1086482-1126286
+/hadoop/hdfs/trunk/src/java:1086482-1128452

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/HdfsConfiguration.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/HdfsConfiguration.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/HdfsConfiguration.java Fri May 27 21:12:02 2011
@@ -104,5 +104,6 @@ public class HdfsConfiguration extends C
     deprecate("dfs.write.packet.size", DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY);
     deprecate("dfs.block.size", DFSConfigKeys.DFS_BLOCK_SIZE_KEY);
     deprecate("dfs.datanode.max.xcievers", DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY);
+    deprecate("io.bytes.per.checksum", DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY);
   }
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java Fri May 27 21:12:02 2011
@@ -42,6 +42,7 @@ public interface HdfsConstants {
   static public enum StartupOption{
     FORMAT  ("-format"),
     CLUSTERID ("-clusterid"),
+    GENCLUSTERID ("-genclusterid"),
     REGULAR ("-regular"),
     BACKUP  ("-backup"),
     CHECKPOINT("-checkpoint"),

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri May 27 21:12:02 2011
@@ -998,9 +998,14 @@ public class DataNode extends Configured
       if (blockScanner != null) {
         blockScanner.removeBlockPool(this.getBlockPoolId());
       }
-     
-      data.shutdownBlockPool(this.getBlockPoolId());
-      storage.removeBlockPoolStorage(this.getBlockPoolId());
+    
+      if (data != null) { 
+        data.shutdownBlockPool(this.getBlockPoolId());
+      }
+
+      if (storage != null) {
+        storage.removeBlockPoolStorage(this.getBlockPoolId());
+      }
     }
 
     /**

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Fri May 27 21:12:02 2011
@@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DiskChecker;
+import org.apache.hadoop.util.StringUtils;
 
 /** 
  * Data storage information file.
@@ -170,14 +171,17 @@ public class DataStorage extends Storage
         }
       } catch (IOException ioe) {
         sd.unlock();
-        throw ioe;
+        LOG.warn("Ignoring storage directory "+ dataDir
+        		+ " due to an exception: " + StringUtils.stringifyException(ioe));
+        //continue with other good dirs
+        continue;
       }
       // add to the storage list
       addStorageDir(sd);
       dataDirStates.add(curState);
     }
 
-    if (dataDirs.size() == 0)  // none of the data dirs exist
+    if (dataDirs.size() == 0 || dataDirStates.size() == 0)  // none of the data dirs exist
       throw new IOException(
           "All specified directories are not accessible or do not exist.");
 

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java Fri May 27 21:12:02 2011
@@ -84,8 +84,8 @@ public class DatanodeJspHelper {
                                          Configuration conf
                                          ) throws IOException,
                                                   InterruptedException {
-    final String dir = StringEscapeUtils.unescapeHtml(
-        JspHelper.validatePath(req.getParameter("dir")));
+    final String dir = JspHelper.validatePath(
+        StringEscapeUtils.unescapeHtml(req.getParameter("dir")));
     if (dir == null) {
       out.print("Invalid input");
       return;
@@ -621,7 +621,7 @@ public class DatanodeJspHelper {
     }
 
     final String filename = JspHelper
-        .validatePath(req.getParameter(StringEscapeUtils.unescapeHtml("filename")));
+        .validatePath(StringEscapeUtils.unescapeHtml(req.getParameter("filename")));
     if (filename == null) {
       out.print("Invalid input (file name absent)");
       return;

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Fri May 27 21:12:02 2011
@@ -1145,12 +1145,21 @@ public class FSDataset implements FSCons
     final int volFailuresTolerated =
       conf.getInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
                   DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);
-    this.validVolsRequired = storage.getNumStorageDirs() - volFailuresTolerated; 
-    if (validVolsRequired < 1 ||
-        validVolsRequired > storage.getNumStorageDirs()) {
-      DataNode.LOG.error("Invalid value " + volFailuresTolerated + " for " +
-          DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY);
+
+    String[] dataDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
+
+    int volsConfigured = (dataDirs == null) ? 0 : dataDirs.length;
+
+    this.validVolsRequired = volsConfigured - volFailuresTolerated;
+
+    if (validVolsRequired < 1
+        || validVolsRequired > storage.getNumStorageDirs()) {
+      throw new DiskErrorException("Too many failed volumes - "
+          + "current valid volumes: " + storage.getNumStorageDirs() 
+          + ", volumes configured: " + volsConfigured 
+          + ", volume failures tolerated: " + volFailuresTolerated );
     }
+
     FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
     for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
       volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(),

Propchange: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
            ('svn:mergeinfo' removed)

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri May 27 21:12:02 2011
@@ -1570,6 +1570,8 @@ public class NameNode implements Namenod
           i += 2;
           startOpt.setClusterId(args[i]);
         }
+      } else if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) {
+        startOpt = StartupOption.GENCLUSTERID;
       } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.REGULAR;
       } else if (StartupOption.BACKUP.getName().equalsIgnoreCase(cmd)) {
@@ -1649,6 +1651,11 @@ public class NameNode implements Namenod
         boolean aborted = format(conf, true);
         System.exit(aborted ? 1 : 0);
         return null; // avoid javac warning
+      case GENCLUSTERID:
+        System.err.println("Generating new cluster id:");
+        System.out.println(NNStorage.newClusterID());
+        System.exit(0);
+        return null;
       case FINALIZE:
         aborted = finalize(conf, true);
         System.exit(aborted ? 1 : 0);

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java Fri May 27 21:12:02 2011
@@ -187,6 +187,7 @@ class TransferFsImage implements FSConst
       digester = MD5Hash.getDigester();
       stream = new DigestInputStream(stream, digester);
     }
+    boolean finishedReceiving = false;
 
     if (localPaths == null) {
       localPaths = Collections.emptyList(); 
@@ -212,13 +213,17 @@ class TransferFsImage implements FSConst
           }
         }
       }
+      finishedReceiving = true;
     } finally {
       stream.close();
       for (FileOutputStream fos : outputStreams) {
         fos.getChannel().force(true);
         fos.close();
       }
-      if (received != advertisedSize) {
+      if (finishedReceiving && received != advertisedSize) {
+        // only throw this exception if we think we read all of it on our end
+        // -- otherwise a client-side IOException would be masked by this
+        // exception that makes it look like a server-side problem!
         throw new IOException("File " + str + " received length " + received +
                               " is not of the advertised size " +
                               advertisedSize);

Propchange: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 27 21:12:02 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/test/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/test/hdfs:820487
-/hadoop/hdfs/trunk/src/test/hdfs:1086482-1126286
+/hadoop/hdfs/trunk/src/test/hdfs:1086482-1128452

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java Fri May 27 21:12:02 2011
@@ -34,7 +34,7 @@ import org.junit.Test;
 public class TestHDFSCLI extends CLITestHelperDFS {
 
   protected MiniDFSCluster dfsCluster = null;
-  protected DistributedFileSystem dfs = null;
+  protected FileSystem fs = null;
   protected String namenode = null;
   
   @Before
@@ -61,10 +61,9 @@ public class TestHDFSCLI extends CLITest
     
     username = System.getProperty("user.name");
 
-    FileSystem fs = dfsCluster.getFileSystem();
+    fs = dfsCluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
                fs instanceof DistributedFileSystem);
-    dfs = (DistributedFileSystem) fs;
   }
 
   @Override
@@ -75,7 +74,8 @@ public class TestHDFSCLI extends CLITest
   @After
   @Override
   public void tearDown() throws Exception {
-    dfs.close();
+    if (null != fs)
+      fs.close();
     dfsCluster.shutdown();
     Thread.sleep(2000);
     super.tearDown();



Mime
View raw message