hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1150969 [1/2] - in /hadoop/common/branches/HDFS-1073/common: ./ bin/ conf/ src/ src/docs/ src/docs/cn/ src/java/ src/java/org/apache/hadoop/conf/ src/java/org/apache/hadoop/fs/ src/java/org/apache/hadoop/fs/shell/ src/java/org/apache/hadoo...
Date Tue, 26 Jul 2011 01:53:19 GMT
Author: todd
Date: Tue Jul 26 01:53:10 2011
New Revision: 1150969

URL: http://svn.apache.org/viewvc?rev=1150969&view=rev
Log:
Merge hdfs and common trunk into branch

Added:
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/TrashPolicy.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/java/org/apache/hadoop/fs/TrashPolicy.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/TrashPolicyDefault.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/java/org/apache/hadoop/fs/TrashPolicyDefault.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/DataInputByteBuffer.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/java/org/apache/hadoop/io/DataInputByteBuffer.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/DataOutputByteBuffer.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/java/org/apache/hadoop/io/DataOutputByteBuffer.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/DefaultFailoverProxyProvider.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/java/org/apache/hadoop/io/retry/DefaultFailoverProxyProvider.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/FailoverProxyProvider.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/java/org/apache/hadoop/io/retry/FailoverProxyProvider.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/Idempotent.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/java/org/apache/hadoop/io/retry/Idempotent.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/ipc/StandbyException.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/java/org/apache/hadoop/ipc/StandbyException.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/metrics2/sink/ganglia/
      - copied from r1150966, hadoop/common/trunk/common/src/java/org/apache/hadoop/metrics2/sink/ganglia/
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaConf.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaConf.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaMetricVisitor.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaMetricVisitor.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink31.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink31.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/util/PureJavaCrc32C.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/java/org/apache/hadoop/util/PureJavaCrc32C.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/io/TestDataByteBuffers.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/test/core/org/apache/hadoop/io/TestDataByteBuffers.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/io/retry/TestFailoverProxy.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/test/core/org/apache/hadoop/io/retry/TestFailoverProxy.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/test/core/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/metrics2/sink/
      - copied from r1150966, hadoop/common/trunk/common/src/test/core/org/apache/hadoop/metrics2/sink/
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/metrics2/sink/ganglia/
      - copied from r1150966, hadoop/common/trunk/common/src/test/core/org/apache/hadoop/metrics2/sink/ganglia/
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/metrics2/sink/ganglia/GangliaMetricsTestHelper.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/test/core/org/apache/hadoop/metrics2/sink/ganglia/GangliaMetricsTestHelper.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/test/MultithreadedTestUtil.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/test/core/org/apache/hadoop/test/MultithreadedTestUtil.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/test/TestMultithreadedTestUtil.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/test/core/org/apache/hadoop/test/TestMultithreadedTestUtil.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/util/TestDataChecksum.java
      - copied unchanged from r1150966, hadoop/common/trunk/common/src/test/core/org/apache/hadoop/util/TestDataChecksum.java
Removed:
    hadoop/common/branches/HDFS-1073/common/src/docs/cn/
Modified:
    hadoop/common/branches/HDFS-1073/common/   (props changed)
    hadoop/common/branches/HDFS-1073/common/CHANGES.txt   (contents, props changed)
    hadoop/common/branches/HDFS-1073/common/bin/hadoop-daemon.sh
    hadoop/common/branches/HDFS-1073/common/build.xml
    hadoop/common/branches/HDFS-1073/common/conf/hadoop-metrics2.properties
    hadoop/common/branches/HDFS-1073/common/src/docs/   (props changed)
    hadoop/common/branches/HDFS-1073/common/src/java/   (props changed)
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/conf/Configuration.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/LocalDirAllocator.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/Trash.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/shell/CopyCommands.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/shell/Delete.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/http/HttpServer.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryPolicies.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryPolicy.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryProxy.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/ipc/Client.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/log/LogLevel.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/metrics2/util/MetricsCache.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/net/DNSToSwitchMapping.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/net/ScriptBasedMapping.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/AnnotatedSecurityInfo.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/SecurityInfo.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/SecurityUtil.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/util/DataChecksum.java
    hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/util/StringUtils.java
    hadoop/common/branches/HDFS-1073/common/src/saveVersion.sh
    hadoop/common/branches/HDFS-1073/common/src/test/core/   (props changed)
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/conf/TestConfiguration.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/fs/TestTrash.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/http/TestHttpServer.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/io/TestSequenceFile.java   (props changed)
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/io/file/tfile/TestTFileByteArrays.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/io/file/tfile/TestTFileJClassComparatorByteArrays.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/io/file/tfile/TestTFileStreams.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/io/retry/UnreliableImplementation.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/io/retry/UnreliableInterface.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/ipc/TestSaslRPC.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/metrics2/util/TestMetricsCache.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/net/StaticMapping.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/net/TestScriptBasedMapping.java
    hadoop/common/branches/HDFS-1073/common/src/test/core/org/apache/hadoop/util/TestPureJavaCrc32.java

Propchange: hadoop/common/branches/HDFS-1073/common/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Jul 26 01:53:10 2011
@@ -1,2 +1,2 @@
-/hadoop/common/trunk/common:1134995-1143556
+/hadoop/common/trunk/common:1134995-1150966
 /hadoop/core/branches/branch-0.19/core:713112

Modified: hadoop/common/branches/HDFS-1073/common/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/CHANGES.txt?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-1073/common/CHANGES.txt Tue Jul 26 01:53:10 2011
@@ -12,6 +12,8 @@ Trunk (unreleased changes)
 
   NEW FEATURES
 
+    HADOOP-7324. Ganglia plugins for metrics v2. (Priyo Mustafi via llu)
+
     HADOOP-7342. Add an utility API in FileUtil for JDK File.list
     avoid NPEs on File.list() (Bharath Mundlapudi via mattf)
 
@@ -53,6 +55,11 @@ Trunk (unreleased changes)
     HADOOP-7329. Add the capability of getting invividual attribute of a mbean
     using JMXProxyServlet. (tanping)
 
+    HADOOP-7380. Add client failover functionality to o.a.h.io.(ipc|retry).
+    (atm via eli)
+
+    HADOOP-7460. Support pluggable trash policies. (Usman Masoon via suresh)
+
   IMPROVEMENTS
 
     HADOOP-7042. Updates to test-patch.sh to include failed test names and
@@ -239,6 +246,43 @@ Trunk (unreleased changes)
 
     HADOOP-7429. Add another IOUtils#copyBytes method. (eli)
 
+    HADOOP-7451. Generalize StringUtils#join. (Chris Douglas via mattf)
+
+    HADOOP-7449. Add Data(In,Out)putByteBuffer to work with ByteBuffer similar 
+    to Data(In,Out)putBuffer for byte[].  Merge from yahoo-merge branch,
+    -r 1079163.  Fix missing Apache license headers. (Chris Douglas via mattf)
+
+    HADOOP-7361. Provide an option, -overwrite/-f, in put and copyFromLocal
+    shell commands.  (Uma Maheswara Rao G via szetszwo)
+
+    HADOOP-7430. Improve error message when moving to trash fails due to 
+    quota issue. (Ravi Prakash via mattf)
+
+    HADOOP-7457. Remove out-of-date Chinese language documentation.
+    (Jakob Homan via eli)
+
+    HADOOP-7444. Add Checksum API to verify and calculate checksums "in bulk"
+    (todd)
+
+    HADOOP-7443. Add CRC32C as another DataChecksum implementation (todd)
+
+    HADOOP-7305. Eclipse project files are incomplete. (Niels Basjes via eli)
+
+    HADOOP-7314. Add support for throwing UnknownHostException when a host doesn't 
+    resolve. (Jeffrey Naisbitt via jitendra)
+
+    HADOOP-7465. A several tiny improvements for the LOG format.
+    (Xie Xianshan via eli)
+
+    HADOOP-7434. Display error when using "daemonlog -setlevel" with
+    illegal level. (yanjinshuang via eli)
+
+    HADOOP-7463. Adding a configuration parameter to SecurityInfo interface.
+    (mahadev)
+
+    HADOOP-7298. Add test utility for writing multi-threaded tests. (todd and
+    Harsh J Chouraria via todd)
+
   OPTIMIZATIONS
   
     HADOOP-7333. Performance improvement in PureJavaCrc32. (Eric Caspole
@@ -306,7 +350,8 @@ Trunk (unreleased changes)
     HADOOP-7268. FileContext.getLocalFSFileContext() behavior needs to be fixed
     w.r.t tokens. (jitendra)
 
-    HADOOP-7290. Unit test failure in TestUserGroupInformation.testGetServerSideGroups. (Trevor Robison via eli)
+    HADOOP-7290. Unit test failure in 
+    TestUserGroupInformation.testGetServerSideGroups. (Trevor Robison via eli)
 
     HADOOP-7292. Fix racy test case TestSinkQueue. (Luke Lu via todd)
 
@@ -358,6 +403,23 @@ Trunk (unreleased changes)
     HADOOP-7419. new hadoop-config.sh doesn't manage classpath for
     HADOOP_CONF_DIR correctly. (Bing Zheng and todd via todd)
 
+    HADOOP-7448. merge from yahoo-merge branch (via mattf):
+    -r 1079157: Fix content type for /stacks servlet to be 
+    plain text (Luke Lu)
+    -r 1079164: No need to escape plain text (Luke Lu)
+
+    HADOOP-7471. The saveVersion.sh script sometimes fails to extract SVN URL.
+    (Alejandro Abdelnur via eli)
+
+    HADOOP-2081. Configuration getInt, getLong, and getFloat replace
+    invalid numbers with the default value. (Harsh J via eli)
+
+    HADOOP-7111. Several TFile tests failing when native libraries are
+    present. (atm)
+
+    HADOOP-7438. Fix deprecated warnings from hadoop-daemon.sh script.
+    (Ravi Prakash via suresh)
+
 Release 0.22.0 - Unreleased
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/common/branches/HDFS-1073/common/CHANGES.txt
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Jul 26 01:53:10 2011
@@ -1,4 +1,5 @@
-/hadoop/common/trunk/common/CHANGES.txt:1134995-1143556
+/hadoop/common/branches/yahoo-merge/CHANGES.txt:1079157,1079163-1079164,1079167
+/hadoop/common/trunk/common/CHANGES.txt:1134995-1150966
 /hadoop/core/branches/branch-0.18/CHANGES.txt:727226
 /hadoop/core/branches/branch-0.19/CHANGES.txt:713112
 /hadoop/core/trunk/CHANGES.txt:776175-785643,785929-786278

Modified: hadoop/common/branches/HDFS-1073/common/bin/hadoop-daemon.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/bin/hadoop-daemon.sh?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/bin/hadoop-daemon.sh (original)
+++ hadoop/common/branches/HDFS-1073/common/bin/hadoop-daemon.sh Tue Jul 26 01:53:10 2011
@@ -137,7 +137,19 @@ case $startStop in
     hadoop_rotate_log $log
     echo starting $command, logging to $log
     cd "$HADOOP_PREFIX"
-    nohup nice -n $HADOOP_NICENESS $hadoopScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+    case $command in
+      namenode|secondarynamenode|datanode|dfs|dfsadmin|fsck|balancer)
+        if [ -z "$HADOOP_HDFS_HOME" ]; then
+          hdfsScript="$HADOOP_PREFIX"/bin/hdfs
+        else
+          hdfsScript="$HADOOP_HDFS_HOME"/bin/hdfs
+        fi
+        nohup nice -n $HADOOP_NICENESS $hdfsScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+      ;;
+      (*)
+        nohup nice -n $HADOOP_NICENESS $hadoopScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+      ;;
+    esac
     echo $! > $pid
     sleep 1; head "$log"
     sleep 3;

Modified: hadoop/common/branches/HDFS-1073/common/build.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/build.xml?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/build.xml (original)
+++ hadoop/common/branches/HDFS-1073/common/build.xml Tue Jul 26 01:53:10 2011
@@ -43,7 +43,6 @@
   <property name="lib.dir" value="${basedir}/lib"/>
   <property name="conf.dir" value="${basedir}/conf"/>
   <property name="docs.src" value="${basedir}/src/docs"/>
-  <property name="src.docs.cn" value="${basedir}/src/docs/cn"/>
   <property name="changes.src" value="${docs.src}/changes"/>
   <property name="src.webapps" value="${basedir}/src/webapps"/>
 
@@ -64,7 +63,6 @@
             value="${sun.arch.data.model}"/>
   <property name="build.native" value="${build.dir}/native/${build.platform}"/>
   <property name="build.docs" value="${build.dir}/docs"/>
-  <property name="build.docs.cn" value="${build.dir}/docs/cn"/>
   <property name="build.javadoc" value="${build.docs}/api"/>
   <property name="build.javadoc.timestamp" value="${build.javadoc}/index.html" />
   <property name="build.javadoc.dev" value="${build.docs}/dev-api"/>
@@ -321,9 +319,6 @@
        <arg line="src/saveVersion.sh ${version} ${build.dir}"/>
     </exec>
 	
-   <exec executable="sh">
-       <arg line="src/fixFontsPath.sh ${src.docs.cn}"/>
-   </exec>
   </target>
 
   <import file="${test.src.dir}/aop/build/aop.xml"/>
@@ -955,21 +950,6 @@
     <style basedir="${java.src.dir}" destdir="${build.docs}"
            includes="core-default.xml" style="conf/configuration.xsl"/>
     <antcall target="changes-to-html"/>
-    <antcall target="cn-docs"/>
-  </target>
-
-  <target name="cn-docs" depends="forrest.check, init" description="Generate forrest-based Chinese documentation. 
-        To use, specify -Dforrest.home=&lt;base of Apache Forrest installation&gt; on the command line." 
-        if="forrest.home">
-    <exec dir="${src.docs.cn}" executable="${forrest.home}/bin/forrest" failonerror="true">
-      <env key="LANG" value="en_US.utf8"/>
-    </exec>
-    <copy todir="${build.docs.cn}">
-      <fileset dir="${src.docs.cn}/build/site/" />
-    </copy>
-    <style basedir="${java.src.dir}" destdir="${build.docs.cn}"
-           includes="core-default.xml" style="conf/configuration.xsl"/>
-    <antcall target="changes-to-html"/>
   </target>
 
   <target name="forrest.check" unless="forrest.home">
@@ -1613,7 +1593,6 @@
     <delete file="${basedir}/ivy/hadoop-common-test-pom.xml"/>
     <delete file="${basedir}/ivy/hadoop-common-${herriot.suffix}.xml"/>
     <delete dir="${docs.src}/build"/>
-    <delete dir="${src.docs.cn}/build"/>
   </target>
 
   <target name="clean-sign" description="Clean.  Delete .asc files">
@@ -1735,10 +1714,29 @@
   <target name="eclipse" 
           depends="init,ant-eclipse-download,ivy-retrieve-common,ivy-retrieve-test,compile-core-test"
           description="Create eclipse project files">
-	     <pathconvert property="eclipse.project">
-	       <path path="${basedir}"/>
-	       <regexpmapper from="^.*/([^/]+)$$" to="\1" handledirsep="yes"/>
-	     </pathconvert>
+
+    <property environment="env"/>
+
+    <!-- Locate the tools.jar which is part of the JDK -->
+    <condition property="jdk.tools.jar" value="${env.JDK_HOME}/lib/tools.jar">
+      <available file="${env.JDK_HOME}/lib/tools.jar"/>
+    </condition>
+    <condition property="jdk.tools.jar" value="${env.JAVA_HOME}/lib/tools.jar">
+      <available file="${env.JAVA_HOME}/lib/tools.jar"/>
+    </condition>
+    <condition property="jdk.tools.jar" value="${java.home}/../lib/tools.jar">
+      <available file="${java.home}/../lib/tools.jar"/>
+    </condition>
+
+    <!-- The tools.jar from the JDK is called classes.jar on OS X. -->
+    <condition property="jdk.tools.jar" value="${java.home}/bundle/Classes/classes.jar">
+      <available file="${java.home}/bundle/Classes/classes.jar"/>
+    </condition>  	
+
+    <pathconvert property="eclipse.project">
+      <path path="${basedir}"/>
+      <regexpmapper from="^.*/([^/]+)$$" to="\1" handledirsep="yes"/>
+    </pathconvert>
     <taskdef name="eclipse"
              classname="prantl.ant.eclipse.EclipseTask"
              classpath="${build.dir}/lib/ant-eclipse-1.0-jvm1.2.jar" />
@@ -1758,7 +1756,7 @@
         <library pathref="ivy-test.classpath" exported="false" />
         <variable path="ANT_HOME/lib/ant.jar" exported="false" />
         <library path="${conf.dir}" exported="false" />
-        <library path="${java.home}/../lib/tools.jar" exported="false" />
+        <library path="${jdk.tools.jar}" exported="false" />
       </classpath>
     </eclipse>
   </target>

Modified: hadoop/common/branches/HDFS-1073/common/conf/hadoop-metrics2.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/conf/hadoop-metrics2.properties?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/conf/hadoop-metrics2.properties (original)
+++ hadoop/common/branches/HDFS-1073/common/conf/hadoop-metrics2.properties Tue Jul 26 01:53:10 2011
@@ -25,3 +25,33 @@
 
 #reducetask.sink.file.filename=reducetask-metrics.out
 
+
+#
+# Below are for sending metrics to Ganglia
+#
+# for Ganglia 3.0 support
+# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30
+#
+# for Ganglia 3.1 support
+# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+
+# *.sink.ganglia.period=10
+
+# default for supportsparse is false
+# *.sink.ganglia.supportsparse=true
+
+#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#jobtracker.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#tasktracker.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#maptask.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#reducetask.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+

Propchange: hadoop/common/branches/HDFS-1073/common/src/docs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Jul 26 01:53:10 2011
@@ -1,2 +1,2 @@
-/hadoop/common/trunk/common/src/docs:1134995-1143556
+/hadoop/common/trunk/common/src/docs:1134995-1150966
 /hadoop/core/branches/branch-0.19/src/docs:713112

Propchange: hadoop/common/branches/HDFS-1073/common/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Jul 26 01:53:10 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/common/src/java:1134995-1143556
+/hadoop/common/trunk/common/src/java:1134995-1150966
 /hadoop/core/branches/branch-0.19/core/src/java:713112
 /hadoop/core/trunk/src/core:776175-785643,785929-786278

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/conf/Configuration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/conf/Configuration.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/conf/Configuration.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/conf/Configuration.java Tue Jul 26 01:53:10 2011
@@ -682,11 +682,13 @@ public class Configuration implements It
   /** 
    * Get the value of the <code>name</code> property as an <code>int</code>.
    *   
-   * If no such property exists, or if the specified value is not a valid
-   * <code>int</code>, then <code>defaultValue</code> is returned.
+   * If no such property exists, the provided default value is returned,
+   * or if the specified value is not a valid <code>int</code>,
+   * then an error is thrown.
    * 
    * @param name property name.
    * @param defaultValue default value.
+   * @throws NumberFormatException when the value is invalid
    * @return property value as an <code>int</code>, 
    *         or <code>defaultValue</code>. 
    */
@@ -694,15 +696,11 @@ public class Configuration implements It
     String valueString = getTrimmed(name);
     if (valueString == null)
       return defaultValue;
-    try {
-      String hexString = getHexDigits(valueString);
-      if (hexString != null) {
-        return Integer.parseInt(hexString, 16);
-      }
-      return Integer.parseInt(valueString);
-    } catch (NumberFormatException e) {
-      return defaultValue;
+    String hexString = getHexDigits(valueString);
+    if (hexString != null) {
+      return Integer.parseInt(hexString, 16);
     }
+    return Integer.parseInt(valueString);
   }
 
   /** 
@@ -718,11 +716,13 @@ public class Configuration implements It
 
   /** 
    * Get the value of the <code>name</code> property as a <code>long</code>.  
-   * If no such property is specified, or if the specified value is not a valid
-   * <code>long</code>, then <code>defaultValue</code> is returned.
+   * If no such property exists, the provided default value is returned,
+   * or if the specified value is not a valid <code>long</code>,
+   * then an error is thrown.
    * 
    * @param name property name.
    * @param defaultValue default value.
+   * @throws NumberFormatException when the value is invalid
    * @return property value as a <code>long</code>, 
    *         or <code>defaultValue</code>. 
    */
@@ -730,15 +730,11 @@ public class Configuration implements It
     String valueString = getTrimmed(name);
     if (valueString == null)
       return defaultValue;
-    try {
-      String hexString = getHexDigits(valueString);
-      if (hexString != null) {
-        return Long.parseLong(hexString, 16);
-      }
-      return Long.parseLong(valueString);
-    } catch (NumberFormatException e) {
-      return defaultValue;
+    String hexString = getHexDigits(valueString);
+    if (hexString != null) {
+      return Long.parseLong(hexString, 16);
     }
+    return Long.parseLong(valueString);
   }
 
   private String getHexDigits(String value) {
@@ -771,11 +767,13 @@ public class Configuration implements It
 
   /** 
    * Get the value of the <code>name</code> property as a <code>float</code>.  
-   * If no such property is specified, or if the specified value is not a valid
-   * <code>float</code>, then <code>defaultValue</code> is returned.
-   * 
+   * If no such property exists, the provided default value is returned,
+   * or if the specified value is not a valid <code>float</code>,
+   * then an error is thrown.
+   *
    * @param name property name.
    * @param defaultValue default value.
+   * @throws NumberFormatException when the value is invalid
    * @return property value as a <code>float</code>, 
    *         or <code>defaultValue</code>. 
    */
@@ -783,11 +781,7 @@ public class Configuration implements It
     String valueString = getTrimmed(name);
     if (valueString == null)
       return defaultValue;
-    try {
-      return Float.parseFloat(valueString);
-    } catch (NumberFormatException e) {
-      return defaultValue;
-    }
+    return Float.parseFloat(valueString);
   }
   /**
    * Set the value of the <code>name</code> property to a <code>float</code>.

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/LocalDirAllocator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/LocalDirAllocator.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/LocalDirAllocator.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/LocalDirAllocator.java Tue Jul 26 01:53:10 2011
@@ -232,7 +232,7 @@ public class LocalDirAllocator {
                 dirs.add(localDirs[i]);
                 dfList.add(new DF(new File(localDirs[i]), 30000));
               } catch (DiskErrorException de) {
-                LOG.warn( localDirs[i] + "is not writable\n", de);
+                LOG.warn( localDirs[i] + " is not writable\n", de);
               }
             } else {
               LOG.warn( "Failed to create " + localDirs[i]);

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/Trash.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/Trash.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/Trash.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/Trash.java Tue Jul 26 01:53:10 2011
@@ -17,60 +17,26 @@
  */
 package org.apache.hadoop.fs;
 
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_KEY;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
-
-import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.text.DateFormat;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.util.StringUtils;
-
-/** Provides a <i>trash</i> feature.  Files are moved to a user's trash
- * directory, a subdirectory of their home directory named ".Trash".  Files are
- * initially moved to a <i>current</i> sub-directory of the trash directory.
- * Within that sub-directory their original path is preserved.  Periodically
- * one may checkpoint the current trash and remove older checkpoints.  (This
- * design permits trash management without enumeration of the full trash
- * content, without date support in the filesystem, and without clock
- * synchronization.)
+
+/** 
+ * Provides a trash facility which supports pluggable Trash policies. 
+ *
+ * See the implementation of the configured TrashPolicy for more
+ * details.
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class Trash extends Configured {
-  private static final Log LOG =
-    LogFactory.getLog(Trash.class);
-
-  private static final Path CURRENT = new Path("Current");
-  private static final Path TRASH = new Path(".Trash/");
-  
-
-  private static final FsPermission PERMISSION =
-    new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
-
-  private static final DateFormat CHECKPOINT = new SimpleDateFormat("yyMMddHHmmss");
-  private static final int MSECS_PER_MINUTE = 60*1000;
+  private TrashPolicy trashPolicy; // configured trash policy instance
 
-  private final FileSystem fs;
-  private final Path trash;
-  private final Path current;
-  private final long deletionInterval;
-  private final Path homesParent;
-
-  /** Construct a trash can accessor.
+  /** 
+   * Construct a trash can accessor.
    * @param conf a Configuration
    */
   public Trash(Configuration conf) throws IOException {
@@ -79,22 +45,18 @@ public class Trash extends Configured {
 
   /**
    * Construct a trash can accessor for the FileSystem provided.
+   * @param fs the FileSystem
+   * @param conf a Configuration
    */
   public Trash(FileSystem fs, Configuration conf) throws IOException {
     super(conf);
-    this.fs = fs;
-    this.trash = new Path(fs.getHomeDirectory(), TRASH);
-    this.homesParent = fs.getHomeDirectory().getParent();
-    this.current = new Path(trash, CURRENT);
-    this.deletionInterval = (long) (conf.getFloat(FS_TRASH_INTERVAL_KEY,
-                                         FS_TRASH_INTERVAL_DEFAULT) *
-                                MSECS_PER_MINUTE);
+    trashPolicy = TrashPolicy.getInstance(conf, fs, fs.getHomeDirectory());
   }
-  
+
   /**
    * In case of the symlinks or mount points, one has to move the appropriate
    * trashbin in the actual volume of the path p being deleted.
-   * 
+   *
    * Hence we get the file system of the fully-qualified resolved-path and
    * then move the path p to the trashbin in that volume,
    * @param fs - the filesystem of path p
@@ -115,240 +77,49 @@ public class Trash extends Configured {
     return success;
   }
   
-  private Trash(Path home, Configuration conf) throws IOException {
-    super(conf);
-    this.fs = home.getFileSystem(conf);
-    this.trash = new Path(home, TRASH);
-    this.homesParent = home.getParent();
-    this.current = new Path(trash, CURRENT);
-    this.deletionInterval = (long) (conf.getFloat(FS_TRASH_INTERVAL_KEY,
-                                         FS_TRASH_INTERVAL_DEFAULT) *
-                                MSECS_PER_MINUTE);
-  }
-  
-  private Path makeTrashRelativePath(Path basePath, Path rmFilePath) {
-    return new Path(basePath + rmFilePath.toUri().getPath());
-  }
-
   /**
    * Returns whether the trash is enabled for this filesystem
    */
   public boolean isEnabled() {
-    return (deletionInterval != 0);
+    return trashPolicy.isEnabled();
   }
 
   /** Move a file or directory to the current trash directory.
    * @return false if the item is already in the trash or trash is disabled
    */ 
   public boolean moveToTrash(Path path) throws IOException {
-    if (!isEnabled())
-      return false;
-
-    if (!path.isAbsolute())                       // make path absolute
-      path = new Path(fs.getWorkingDirectory(), path);
-
-    if (!fs.exists(path))                         // check that path exists
-      throw new FileNotFoundException(path.toString());
-
-    String qpath = fs.makeQualified(path).toString();
-
-    if (qpath.startsWith(trash.toString())) {
-      return false;                               // already in trash
-    }
-
-    if (trash.getParent().toString().startsWith(qpath)) {
-      throw new IOException("Cannot move \"" + path +
-                            "\" to the trash, as it contains the trash");
-    }
-
-    Path trashPath = makeTrashRelativePath(current, path);
-    Path baseTrashPath = makeTrashRelativePath(current, path.getParent());
-    
-    IOException cause = null;
-
-    // try twice, in case checkpoint between the mkdirs() & rename()
-    for (int i = 0; i < 2; i++) {
-      try {
-        if (!fs.mkdirs(baseTrashPath, PERMISSION)) {      // create current
-          LOG.warn("Can't create(mkdir) trash directory: "+baseTrashPath);
-          return false;
-        }
-      } catch (IOException e) {
-        LOG.warn("Can't create trash directory: "+baseTrashPath);
-        cause = e;
-        break;
-      }
-      try {
-        //
-        // if the target path in Trash already exists, then append with 
-        // a current time in millisecs.
-        //
-        String orig = trashPath.toString();
-        
-        while(fs.exists(trashPath)) {
-          trashPath = new Path(orig + System.currentTimeMillis());
-        }
-        
-        if (fs.rename(path, trashPath))           // move to current trash
-          return true;
-      } catch (IOException e) {
-        cause = e;
-      }
-    }
-    throw (IOException)
-      new IOException("Failed to move to trash: "+path).initCause(cause);
+    return trashPolicy.moveToTrash(path);
   }
 
   /** Create a trash checkpoint. */
   public void checkpoint() throws IOException {
-    if (!fs.exists(current))                      // no trash, no checkpoint
-      return;
-
-    Path checkpoint;
-    synchronized (CHECKPOINT) {
-      checkpoint = new Path(trash, CHECKPOINT.format(new Date()));
-    }
-
-    if (fs.rename(current, checkpoint)) {
-      LOG.info("Created trash checkpoint: "+checkpoint.toUri().getPath());
-    } else {
-      throw new IOException("Failed to checkpoint trash: "+checkpoint);
-    }
+    trashPolicy.createCheckpoint();
   }
 
-  /** Delete old checkpoints. */
+  /** Delete old checkpoint(s). */
   public void expunge() throws IOException {
-    FileStatus[] dirs = null;
-    
-    try {
-      dirs = fs.listStatus(trash);            // scan trash sub-directories
-    } catch (FileNotFoundException fnfe) {
-      return;
-    }
-
-    long now = System.currentTimeMillis();
-    for (int i = 0; i < dirs.length; i++) {
-      Path path = dirs[i].getPath();
-      String dir = path.toUri().getPath();
-      String name = path.getName();
-      if (name.equals(CURRENT.getName()))         // skip current
-        continue;
-
-      long time;
-      try {
-        synchronized (CHECKPOINT) {
-          time = CHECKPOINT.parse(name).getTime();
-        }
-      } catch (ParseException e) {
-        LOG.warn("Unexpected item in trash: "+dir+". Ignoring.");
-        continue;
-      }
-
-      if ((now - deletionInterval) > time) {
-        if (fs.delete(path, true)) {
-          LOG.info("Deleted trash checkpoint: "+dir);
-        } else {
-          LOG.warn("Couldn't delete checkpoint: "+dir+" Ignoring.");
-        }
-      }
-    }
+    trashPolicy.deleteCheckpoint();
   }
 
-  //
-  // get the current working directory
-  //
+  /** get the current working directory */
   Path getCurrentTrashDir() {
-    return current;
+    return trashPolicy.getCurrentTrashDir();
+  }
+
+  /** get the configured trash policy */
+  TrashPolicy getTrashPolicy() {
+    return trashPolicy;
   }
 
   /** Return a {@link Runnable} that periodically empties the trash of all
-   * users, intended to be run by the superuser.  Only one checkpoint is kept
-   * at a time.
+   * users, intended to be run by the superuser.
    */
   public Runnable getEmptier() throws IOException {
-    return new Emptier(getConf());
-  }
-
-  private class Emptier implements Runnable {
-
-    private Configuration conf;
-    private long emptierInterval;
-
-    Emptier(Configuration conf) throws IOException {
-      this.conf = conf;
-      this.emptierInterval = (long) (conf.getFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY,
-                                     FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT) *
-                                     MSECS_PER_MINUTE);
-      if (this.emptierInterval > deletionInterval ||
-          this.emptierInterval == 0) {
-        LOG.warn("The configured interval for checkpoint is " +
-                 this.emptierInterval + " minutes." +
-                 " Using interval of " + deletionInterval +
-                 " minutes that is used for deletion instead");
-        this.emptierInterval = deletionInterval;
-      }
-    }
-
-    public void run() {
-      if (emptierInterval == 0)
-        return;                                   // trash disabled
-      long now = System.currentTimeMillis();
-      long end;
-      while (true) {
-        end = ceiling(now, emptierInterval);
-        try {                                     // sleep for interval
-          Thread.sleep(end - now);
-        } catch (InterruptedException e) {
-          break;                                  // exit on interrupt
-        }
-
-        try {
-          now = System.currentTimeMillis();
-          if (now >= end) {
-
-            FileStatus[] homes = null;
-            try {
-              homes = fs.listStatus(homesParent);         // list all home dirs
-            } catch (IOException e) {
-              LOG.warn("Trash can't list homes: "+e+" Sleeping.");
-              continue;
-            }
-
-            for (FileStatus home : homes) {         // dump each trash
-              if (!home.isDirectory())
-                continue;
-              try {
-                Trash trash = new Trash(home.getPath(), conf);
-                trash.expunge();
-                trash.checkpoint();
-              } catch (IOException e) {
-                LOG.warn("Trash caught: "+e+". Skipping "+home.getPath()+".");
-              } 
-            }
-          }
-        } catch (Exception e) {
-          LOG.warn("RuntimeException during Trash.Emptier.run(): ", e); 
-        }
-      }
-      try {
-        fs.close();
-      } catch(IOException e) {
-        LOG.warn("Trash cannot close FileSystem: ", e);
-      }
-    }
-
-    private long ceiling(long time, long interval) {
-      return floor(time, interval) + interval;
-    }
-    private long floor(long time, long interval) {
-      return (time / interval) * interval;
-    }
-
+    return trashPolicy.getEmptier();
   }
 
   /** Run an emptier.*/
   public static void main(String[] args) throws Exception {
     new Trash(new Configuration()).getEmptier().run();
   }
-
 }

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/shell/CommandWithDestination.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/shell/CommandWithDestination.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/shell/CommandWithDestination.java Tue Jul 26 01:53:10 2011
@@ -38,7 +38,12 @@ abstract class CommandWithDestination ex
   protected PathData dst;
   protected boolean overwrite = false;
   
-  // TODO: commands should implement a -f to enable this
+  /**
+   * 
+   * This method is used to enable the force(-f)  option while copying the files.
+   * 
+   * @param flag true/false
+   */
   protected void setOverwrite(boolean flag) {
     overwrite = flag;
   }

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/shell/CopyCommands.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/shell/CopyCommands.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/shell/CopyCommands.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/shell/CopyCommands.java Tue Jul 26 01:53:10 2011
@@ -94,15 +94,16 @@ class CopyCommands {  
     
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
-      CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE);
+      CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "f");
       cf.parse(args);
+      setOverwrite(cf.getOpt("f"));
       getRemoteDestination(args);
     }
 
     @Override
     protected void processPath(PathData src, PathData target)
     throws IOException {
-      if (!FileUtil.copy(src.fs, src.path, target.fs, target.path, false, getConf())) {
+      if (!FileUtil.copy(src.fs, src.path, target.fs, target.path, false, overwrite, getConf())) {
         // we have no idea what the error is...  FileUtils masks it and in
         // some cases won't even report an error
         throw new PathIOException(src.toString());
@@ -216,8 +217,9 @@ class CopyCommands {  
 
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
-      CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE);
+      CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, "f");
       cf.parse(args);
+      setOverwrite(cf.getOpt("f"));
       getRemoteDestination(args);
     }
 
@@ -246,7 +248,7 @@ class CopyCommands {  
     @Override
     protected void processPath(PathData src, PathData target)
     throws IOException {
-      target.fs.copyFromLocalFile(false, false, src.path, target.path);
+      target.fs.copyFromLocalFile(false, overwrite, src.path, target.path);
     }
 
     /** Copies from stdin to the destination file. */

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/shell/Delete.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/shell/Delete.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/shell/Delete.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/fs/shell/Delete.java Tue Jul 26 01:53:10 2011
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.fs.shell;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.LinkedList;
 
@@ -85,7 +86,13 @@ class Delete extends FsCommand {
     private boolean moveToTrash(PathData item) throws IOException {
       boolean success = false;
       if (!skipTrash) {
-        success = Trash.moveToAppropriateTrash(item.fs, item.path, getConf());
+        try {
+          success = Trash.moveToAppropriateTrash(item.fs, item.path, getConf());
+        } catch(FileNotFoundException fnfe) {
+          throw fnfe;
+        } catch (IOException ioe) {
+            throw new IOException(ioe.getMessage() + ". Consider using -skipTrash option", ioe);
+        }
       }
       return success;
     }

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/http/HttpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/http/HttpServer.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/http/HttpServer.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/http/HttpServer.java Tue Jul 26 01:53:10 2011
@@ -411,7 +411,7 @@ public class HttpServer implements Filte
     for (Context ctx : defaultContexts.keySet()) {
       defineFilter(ctx, name, classname, parameters, ALL_URLS);
     }
-    LOG.info("Added global filter" + name + " (class=" + classname + ")");
+    LOG.info("Added global filter '" + name + "' (class=" + classname + ")");
   }
 
   /**
@@ -737,13 +737,12 @@ public class HttpServer implements Filte
     @Override
     public void doGet(HttpServletRequest request, HttpServletResponse response)
       throws ServletException, IOException {
-
+      response.setContentType("text/plain; charset=UTF-8");
       // Do the authorization
       if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
           response)) {
         return;
       }
-
       PrintWriter out = response.getWriter();
       ReflectionUtils.printThreadInfo(out, "");
       out.close();

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java Tue Jul 26 01:53:10 2011
@@ -25,25 +25,30 @@ import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
 
 class RetryInvocationHandler implements InvocationHandler {
   public static final Log LOG = LogFactory.getLog(RetryInvocationHandler.class);
-  private Object implementation;
+  private FailoverProxyProvider proxyProvider;
   
   private RetryPolicy defaultPolicy;
   private Map<String,RetryPolicy> methodNameToPolicyMap;
+  private Object currentProxy;
   
-  public RetryInvocationHandler(Object implementation, RetryPolicy retryPolicy) {
-    this.implementation = implementation;
+  public RetryInvocationHandler(FailoverProxyProvider proxyProvider,
+      RetryPolicy retryPolicy) {
+    this.proxyProvider = proxyProvider;
     this.defaultPolicy = retryPolicy;
     this.methodNameToPolicyMap = Collections.emptyMap();
+    this.currentProxy = proxyProvider.getProxy();
   }
   
-  public RetryInvocationHandler(Object implementation, Map<String, RetryPolicy> methodNameToPolicyMap) {
-    this.implementation = implementation;
+  public RetryInvocationHandler(FailoverProxyProvider proxyProvider,
+      Map<String, RetryPolicy> methodNameToPolicyMap) {
+    this.proxyProvider = proxyProvider;
     this.defaultPolicy = RetryPolicies.TRY_ONCE_THEN_FAIL;
     this.methodNameToPolicyMap = methodNameToPolicyMap;
+    this.currentProxy = proxyProvider.getProxy();
   }
 
   public Object invoke(Object proxy, Method method, Object[] args)
@@ -53,24 +58,35 @@ class RetryInvocationHandler implements 
       policy = defaultPolicy;
     }
     
+    int failovers = 0;
     int retries = 0;
     while (true) {
       try {
         return invokeMethod(method, args);
       } catch (Exception e) {
-        if (!policy.shouldRetry(e, retries++)) {
-          LOG.info("Exception while invoking " + method.getName()
-                   + " of " + implementation.getClass() + ". Not retrying."
-                   , e);
+        boolean isMethodIdempotent = proxyProvider.getInterface()
+            .getMethod(method.getName(), method.getParameterTypes())
+            .isAnnotationPresent(Idempotent.class);
+        RetryAction action = policy.shouldRetry(e, retries++, failovers,
+            isMethodIdempotent);
+        if (action == RetryAction.FAIL) {
+          LOG.warn("Exception while invoking " + method.getName()
+                   + " of " + currentProxy.getClass() + ". Not retrying.", e);
           if (!method.getReturnType().equals(Void.TYPE)) {
             throw e; // non-void methods can't fail without an exception
           }
           return null;
+        } else if (action == RetryAction.FAILOVER_AND_RETRY) {
+          LOG.warn("Exception while invoking " + method.getName()
+              + " of " + currentProxy.getClass()
+              + ". Trying to fail over.", e);
+          failovers++;
+          proxyProvider.performFailover(currentProxy);
+          currentProxy = proxyProvider.getProxy();
         }
         if(LOG.isDebugEnabled()) {
           LOG.debug("Exception while invoking " + method.getName()
-              + " of " + implementation.getClass() + ". Retrying."
-              , e);
+              + " of " + currentProxy.getClass() + ". Retrying.", e);
         }
       }
     }
@@ -81,7 +97,7 @@ class RetryInvocationHandler implements 
       if (!method.isAccessible()) {
         method.setAccessible(true);
       }
-      return method.invoke(implementation, args);
+      return method.invoke(currentProxy, args);
     } catch (InvocationTargetException e) {
       throw e.getCause();
     }

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryPolicies.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryPolicies.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryPolicies.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryPolicies.java Tue Jul 26 01:53:10 2011
@@ -17,14 +17,21 @@
  */
 package org.apache.hadoop.io.retry;
 
+import java.io.IOException;
+import java.net.ConnectException;
+import java.net.NoRouteToHostException;
+import java.net.SocketException;
+import java.net.UnknownHostException;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Random;
-import java.util.Set;
 import java.util.Map.Entry;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.StandbyException;
 
 /**
  * <p>
@@ -33,6 +40,8 @@ import org.apache.hadoop.ipc.RemoteExcep
  */
 public class RetryPolicies {
   
+  public static final Log LOG = LogFactory.getLog(RetryPolicies.class);
+  
   /**
    * <p>
    * Try once, and fail by re-throwing the exception.
@@ -122,20 +131,32 @@ public class RetryPolicies {
     return new RemoteExceptionDependentRetry(defaultPolicy, exceptionToPolicyMap);
   }
   
+  public static final RetryPolicy failoverOnNetworkException(int maxFailovers) {
+    return failoverOnNetworkException(TRY_ONCE_THEN_FAIL, maxFailovers);
+  }
+  
+  public static final RetryPolicy failoverOnNetworkException(
+      RetryPolicy fallbackPolicy, int maxFailovers) {
+    return new FailoverOnNetworkExceptionRetry(fallbackPolicy, maxFailovers);
+  }
+  
   static class TryOnceThenFail implements RetryPolicy {
-    public boolean shouldRetry(Exception e, int retries) throws Exception {
+    public RetryAction shouldRetry(Exception e, int retries, int failovers,
+        boolean isMethodIdempotent) throws Exception {
       throw e;
     }
   }
   static class TryOnceDontFail implements RetryPolicy {
-    public boolean shouldRetry(Exception e, int retries) throws Exception {
-      return false;
+    public RetryAction shouldRetry(Exception e, int retries, int failovers,
+        boolean isMethodIdempotent) throws Exception {
+      return RetryAction.FAIL;
     }
   }
   
   static class RetryForever implements RetryPolicy {
-    public boolean shouldRetry(Exception e, int retries) throws Exception {
-      return true;
+    public RetryAction shouldRetry(Exception e, int retries, int failovers,
+        boolean isMethodIdempotent) throws Exception {
+      return RetryAction.RETRY;
     }
   }
   
@@ -150,7 +171,8 @@ public class RetryPolicies {
       this.timeUnit = timeUnit;
     }
 
-    public boolean shouldRetry(Exception e, int retries) throws Exception {
+    public RetryAction shouldRetry(Exception e, int retries, int failovers,
+        boolean isMethodIdempotent) throws Exception {
       if (retries >= maxRetries) {
         throw e;
       }
@@ -159,7 +181,7 @@ public class RetryPolicies {
       } catch (InterruptedException ie) {
         // retry
       }
-      return true;
+      return RetryAction.RETRY;
     }
     
     protected abstract long calculateSleepTime(int retries);
@@ -204,12 +226,13 @@ public class RetryPolicies {
       this.exceptionToPolicyMap = exceptionToPolicyMap;
     }
 
-    public boolean shouldRetry(Exception e, int retries) throws Exception {
+    public RetryAction shouldRetry(Exception e, int retries, int failovers,
+        boolean isMethodIdempotent) throws Exception {
       RetryPolicy policy = exceptionToPolicyMap.get(e.getClass());
       if (policy == null) {
         policy = defaultPolicy;
       }
-      return policy.shouldRetry(e, retries);
+      return policy.shouldRetry(e, retries, failovers, isMethodIdempotent);
     }
     
   }
@@ -230,7 +253,8 @@ public class RetryPolicies {
       }
     }
 
-    public boolean shouldRetry(Exception e, int retries) throws Exception {
+    public RetryAction shouldRetry(Exception e, int retries, int failovers,
+        boolean isMethodIdempotent) throws Exception {
       RetryPolicy policy = null;
       if (e instanceof RemoteException) {
         policy = exceptionNameToPolicyMap.get(
@@ -239,7 +263,7 @@ public class RetryPolicies {
       if (policy == null) {
         policy = defaultPolicy;
       }
-      return policy.shouldRetry(e, retries);
+      return policy.shouldRetry(e, retries, failovers, isMethodIdempotent);
     }
   }
   
@@ -255,4 +279,55 @@ public class RetryPolicies {
       return sleepTime*r.nextInt(1<<(retries+1));
     }
   }
+  
+  /*
+   * Fail over and retry in the case of:
+   *   Remote StandbyException (server is up, but is not the active server)
+   *   Immediate socket exceptions (e.g. no route to host, econnrefused)
+   *   Socket exceptions after initial connection when operation is idempotent
+   * 
+   * Fail immediately in the case of:
+   *   Socket exceptions after initial connection when operation is not idempotent
+   * 
+   * Fall back on underlying retry policy otherwise.
+   */
+  static class FailoverOnNetworkExceptionRetry implements RetryPolicy {
+    
+    private RetryPolicy fallbackPolicy;
+    private int maxFailovers;
+    
+    public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
+        int maxFailovers) {
+      this.fallbackPolicy = fallbackPolicy;
+      this.maxFailovers = maxFailovers;
+    }
+
+    @Override
+    public RetryAction shouldRetry(Exception e, int retries,
+        int failovers, boolean isMethodIdempotent) throws Exception {
+      if (failovers >= maxFailovers) {
+        LOG.info("Failovers (" + failovers + ") exceeded maximum allowed ("
+            + maxFailovers + ")");
+        return RetryAction.FAIL;
+      }
+      
+      if (e instanceof ConnectException ||
+          e instanceof NoRouteToHostException ||
+          e instanceof UnknownHostException ||
+          e instanceof StandbyException) {
+        return RetryAction.FAILOVER_AND_RETRY;
+      } else if (e instanceof SocketException ||
+                 e instanceof IOException) {
+        if (isMethodIdempotent) {
+          return RetryAction.FAILOVER_AND_RETRY;
+        } else {
+          return RetryAction.FAIL;
+        }
+      } else {
+        return fallbackPolicy.shouldRetry(e, retries, failovers,
+            isMethodIdempotent);
+      }
+    }
+    
+  }
 }

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryPolicy.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryPolicy.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryPolicy.java Tue Jul 26 01:53:10 2011
@@ -17,13 +17,28 @@
  */
 package org.apache.hadoop.io.retry;
 
+import org.apache.hadoop.classification.InterfaceStability;
+
+
 /**
  * <p>
  * Specifies a policy for retrying method failures.
  * Implementations of this interface should be immutable.
  * </p>
  */
+@InterfaceStability.Evolving
 public interface RetryPolicy {
+  
+  /**
+   * Returned by {@link RetryPolicy#shouldRetry(Exception, int, int, boolean)}.
+   */
+  @InterfaceStability.Evolving
+  public enum RetryAction {
+    FAIL,
+    RETRY,
+    FAILOVER_AND_RETRY
+  }
+  
   /**
    * <p>
    * Determines whether the framework should retry a
@@ -31,13 +46,19 @@ public interface RetryPolicy {
    * of retries that have been made for that operation
    * so far.
    * </p>
-   * @param e The exception that caused the method to fail.
-   * @param retries The number of times the method has been retried.
+   * @param e The exception that caused the method to fail
+   * @param retries The number of times the method has been retried
+   * @param failovers The number of times the method has failed over to a
+   *   different backend implementation
+   * @param isMethodIdempotent <code>true</code> if the method is idempotent
+   *   and so can reasonably be retried on failover when we don't know if the
+   *   previous attempt reached the server or not
    * @return <code>true</code> if the method should be retried,
    *   <code>false</code> if the method should not be retried
-   *   but shouldn't fail with an exception (only for void methods).
+   *   but shouldn't fail with an exception (only for void methods)
    * @throws Exception The re-thrown exception <code>e</code> indicating
-   *   that the method failed and should not be retried further. 
+   *   that the method failed and should not be retried further
    */
-  public boolean shouldRetry(Exception e, int retries) throws Exception;
+  public RetryAction shouldRetry(Exception e, int retries, int failovers,
+      boolean isMethodIdempotent) throws Exception;
 }

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryProxy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryProxy.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryProxy.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/io/retry/RetryProxy.java Tue Jul 26 01:53:10 2011
@@ -33,25 +33,41 @@ public class RetryProxy {
    * </p>
    * @param iface the interface that the retry will implement
    * @param implementation the instance whose methods should be retried
-   * @param retryPolicy the policy for retirying method call failures
+   * @param retryPolicy the policy for retrying method call failures
    * @return the retry proxy
    */
   public static Object create(Class<?> iface, Object implementation,
                               RetryPolicy retryPolicy) {
+    return RetryProxy.create(iface,
+        new DefaultFailoverProxyProvider(iface, implementation),
+        retryPolicy);
+  }
+
+  /**
+   * Create a proxy for an interface of implementations of that interface using
+   * the given {@link FailoverProxyProvider} and the same retry policy for each
+   * method in the interface.
+   * 
+   * @param iface the interface that the retry will implement
+   * @param proxyProvider provides implementation instances whose methods should be retried
+   * @param retryPolicy the policy for retrying or failing over method call failures
+   * @return the retry proxy
+   */
+  public static Object create(Class<?> iface, FailoverProxyProvider proxyProvider,
+      RetryPolicy retryPolicy) {
     return Proxy.newProxyInstance(
-                                  implementation.getClass().getClassLoader(),
-                                  new Class<?>[] { iface },
-                                  new RetryInvocationHandler(implementation, retryPolicy)
-                                  );
-  }  
+        proxyProvider.getInterface().getClassLoader(),
+        new Class<?>[] { iface },
+        new RetryInvocationHandler(proxyProvider, retryPolicy)
+        );
+  }
   
   /**
-   * <p>
    * Create a proxy for an interface of an implementation class
    * using the a set of retry policies specified by method name.
    * If no retry policy is defined for a method then a default of
    * {@link RetryPolicies#TRY_ONCE_THEN_FAIL} is used.
-   * </p>
+   * 
    * @param iface the interface that the retry will implement
    * @param implementation the instance whose methods should be retried
    * @param methodNameToPolicyMap a map of method names to retry policies
@@ -59,10 +75,28 @@ public class RetryProxy {
    */
   public static Object create(Class<?> iface, Object implementation,
                               Map<String,RetryPolicy> methodNameToPolicyMap) {
+    return RetryProxy.create(iface,
+        new DefaultFailoverProxyProvider(iface, implementation),
+        methodNameToPolicyMap);
+  }
+
+  /**
+   * Create a proxy for an interface of implementations of that interface using
+   * the given {@link FailoverProxyProvider} and the a set of retry policies
+   * specified by method name. If no retry policy is defined for a method then a
+   * default of {@link RetryPolicies#TRY_ONCE_THEN_FAIL} is used.
+   * 
+   * @param iface the interface that the retry will implement
+   * @param proxyProvider provides implementation instances whose methods should be retried
+   * @param methodNameToPolicyMapa map of method names to retry policies
+   * @return the retry proxy
+   */
+  public static Object create(Class<?> iface, FailoverProxyProvider proxyProvider,
+      Map<String,RetryPolicy> methodNameToPolicyMap) {
     return Proxy.newProxyInstance(
-                                  implementation.getClass().getClassLoader(),
-                                  new Class<?>[] { iface },
-                                  new RetryInvocationHandler(implementation, methodNameToPolicyMap)
-                                  );
+        proxyProvider.getInterface().getClassLoader(),
+        new Class<?>[] { iface },
+        new RetryInvocationHandler(proxyProvider, methodNameToPolicyMap)
+        );
   }
 }

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/ipc/Client.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/ipc/Client.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/ipc/Client.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/ipc/Client.java Tue Jul 26 01:53:10 2011
@@ -245,14 +245,14 @@ public class Client {
       this.doPing = remoteId.getDoPing();
       this.pingInterval = remoteId.getPingInterval();
       if (LOG.isDebugEnabled()) {
-        LOG.debug("The ping interval is" + this.pingInterval + "ms.");
+        LOG.debug("The ping interval is " + this.pingInterval + " ms.");
       }
 
       UserGroupInformation ticket = remoteId.getTicket();
       Class<?> protocol = remoteId.getProtocol();
       this.useSasl = UserGroupInformation.isSecurityEnabled();
       if (useSasl && protocol != null) {
-        TokenInfo tokenInfo = SecurityUtil.getTokenInfo(protocol);
+        TokenInfo tokenInfo = SecurityUtil.getTokenInfo(protocol, conf);
         if (tokenInfo != null) {
           TokenSelector<? extends TokenIdentifier> tokenSelector = null;
           try {
@@ -267,7 +267,7 @@ public class Client {
               .getHostAddress() + ":" + addr.getPort()), 
               ticket.getTokens());
         }
-        KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol);
+        KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol, conf);
         if (krbInfo != null) {
           serverPrincipal = remoteId.getServerPrincipal();
           if (LOG.isDebugEnabled()) {
@@ -1285,7 +1285,7 @@ public class Client {
       if (!UserGroupInformation.isSecurityEnabled() || protocol == null) {
         return null;
       }
-      KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol);
+      KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol, conf);
       if (krbInfo != null) {
         String serverKey = krbInfo.serverPrincipal();
         if (serverKey == null) {

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/log/LogLevel.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/log/LogLevel.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/log/LogLevel.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/log/LogLevel.java Tue Jul 26 01:53:10 2011
@@ -142,8 +142,12 @@ public class LogLevel {
     private static void process(org.apache.log4j.Logger log, String level,
         PrintWriter out) throws IOException {
       if (level != null) {
-        log.setLevel(org.apache.log4j.Level.toLevel(level));
-        out.println(MARKER + "Setting Level to " + level + " ...<br />");
+        if (!level.equals(org.apache.log4j.Level.toLevel(level).toString())) {
+          out.println(MARKER + "Bad level : <b>" + level + "</b><br />");
+        } else {
+          log.setLevel(org.apache.log4j.Level.toLevel(level));
+          out.println(MARKER + "Setting Level to " + level + " ...<br />");
+        }
       }
       out.println(MARKER
           + "Effective level: <b>" + log.getEffectiveLevel() + "</b><br />");

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/metrics2/util/MetricsCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/metrics2/util/MetricsCache.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/metrics2/util/MetricsCache.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/metrics2/util/MetricsCache.java Tue Jul 26 01:53:10 2011
@@ -23,9 +23,6 @@ import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.Set;
 
-import com.google.common.base.Objects;
-import com.google.common.collect.Maps;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -34,6 +31,9 @@ import org.apache.hadoop.metrics2.Abstra
 import org.apache.hadoop.metrics2.MetricsRecord;
 import org.apache.hadoop.metrics2.MetricsTag;
 
+import com.google.common.base.Objects;
+import com.google.common.collect.Maps;
+
 /**
  * A metrics cache for sinks that don't support sparse updates.
  */
@@ -68,7 +68,7 @@ public class MetricsCache {
    */
   public static class Record {
     final Map<String, String> tags = Maps.newHashMap();
-    final Map<String, Number> metrics = Maps.newHashMap();
+    final Map<String, AbstractMetric> metrics = Maps.newHashMap();
 
     /**
      * Lookup a tag value
@@ -85,6 +85,16 @@ public class MetricsCache {
      * @return the metric value
      */
     public Number getMetric(String key) {
+      AbstractMetric metric = metrics.get(key);
+      return metric != null ? metric.value() : null;
+    }
+
+    /**
+     * Lookup a metric instance
+     * @param key name of the metric
+     * @return the metric instance
+     */
+    public AbstractMetric getMetricInstance(String key) {
       return metrics.get(key);
     }
 
@@ -96,9 +106,23 @@ public class MetricsCache {
     }
 
     /**
-     * @return entry set of the metrics of the record
+     * @deprecated use metricsEntrySet() instead
+     * @return entry set of metrics
      */
+    @Deprecated
     public Set<Map.Entry<String, Number>> metrics() {
+      Map<String, Number> map = new LinkedHashMap<String, Number>(
+          metrics.size());
+      for (Map.Entry<String, AbstractMetric> mapEntry : metrics.entrySet()) {
+        map.put(mapEntry.getKey(), mapEntry.getValue().value());
+      }
+      return map.entrySet();
+    }
+
+    /**
+     * @return entry set of metrics
+     */
+    public Set<Map.Entry<String, AbstractMetric>> metricsEntrySet() {
       return metrics.entrySet();
     }
 
@@ -141,7 +165,7 @@ public class MetricsCache {
       recordCache.put(tags, record);
     }
     for (AbstractMetric m : mr.metrics()) {
-      record.metrics.put(m.name(), m.value());
+      record.metrics.put(m.name(), m);
     }
     if (includingTags) {
       // mostly for some sinks that include tags as part of a dense schema

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java Tue Jul 26 01:53:10 2011
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.net;
 
+import java.net.InetAddress;
+import java.net.SocketException;
+import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
@@ -42,16 +45,11 @@ public class CachedDNSToSwitchMapping im
     this.rawMapping = rawMapping;
   }
   
-  public List<String> resolve(List<String> names) {
-    // normalize all input names to be in the form of IP addresses
-    names = NetUtils.normalizeHostNames(names);
-    
-    List <String> result = new ArrayList<String>(names.size());
-    if (names.isEmpty()) {
-      return result;
-    }
-
 
+  /**
+   * Returns the hosts from 'names' that have not been cached previously
+   */
+  private List<String> getUncachedHosts(List<String> names) {
     // find out all names without cached resolved location
     List<String> unCachedHosts = new ArrayList<String>(names.size());
     for (String name : names) {
@@ -59,27 +57,81 @@ public class CachedDNSToSwitchMapping im
         unCachedHosts.add(name);
       } 
     }
-    
-    // Resolve those names
-    List<String> rNames = rawMapping.resolve(unCachedHosts);
-    
+    return unCachedHosts;
+  }
+
+  /**
+   * Caches the resolved hosts
+   */
+  private void cacheResolvedHosts(List<String> uncachedHosts, 
+      List<String> resolvedHosts) {
     // Cache the result
-    if (rNames != null) {
-      for (int i=0; i<unCachedHosts.size(); i++) {
-        cache.put(unCachedHosts.get(i), rNames.get(i));
+    if (resolvedHosts != null) {
+      for (int i=0; i<uncachedHosts.size(); i++) {
+        cache.put(uncachedHosts.get(i), resolvedHosts.get(i));
       }
     }
-    
+  }
+
+  /**
+   * Returns the cached resolution of the list of hostnames/addresses.
+   * Returns null if any of the names are not currently in the cache
+   */
+  private List<String> getCachedHosts(List<String> names) {
+    List<String> result = new ArrayList<String>(names.size());
     // Construct the result
     for (String name : names) {
-      //now everything is in the cache
       String networkLocation = cache.get(name);
       if (networkLocation != null) {
         result.add(networkLocation);
-      } else { //resolve all or nothing
+      } else {
         return null;
       }
     }
     return result;
   }
+
+  /**
+   * Resolves host names and adds them to the cache.
+   * Unlike the 'resolve" method, this won't hide UnknownHostExceptions
+   * 
+   * @param names to resolve
+   * @return List of resolved names
+   * @throws UnknownHostException if any hosts cannot be resolved
+   */  
+  public List<String> resolveValidHosts(List<String> names) 
+    throws UnknownHostException {
+    if (names.isEmpty()) {
+      return new ArrayList<String>();
+    }
+    List<String> addresses = new ArrayList<String>(names.size());
+    for (String name : names) {
+      addresses.add(InetAddress.getByName(name).getHostAddress());
+    }
+
+    List<String> uncachedHosts = this.getUncachedHosts(names);
+
+    // Resolve the uncached hosts
+    List<String> resolvedHosts = rawMapping.resolveValidHosts(uncachedHosts);
+    this.cacheResolvedHosts(uncachedHosts, resolvedHosts);
+    return this.getCachedHosts(addresses);
+  }
+
+  public List<String> resolve(List<String> names) {
+    // normalize all input names to be in the form of IP addresses
+    names = NetUtils.normalizeHostNames(names);
+
+    List <String> result = new ArrayList<String>(names.size());
+    if (names.isEmpty()) {
+      return result;
+    }
+
+    List<String> uncachedHosts = this.getUncachedHosts(names);
+
+    // Resolve the uncached hosts
+    List<String> resolvedHosts = rawMapping.resolve(uncachedHosts);
+    this.cacheResolvedHosts(uncachedHosts, resolvedHosts);
+    return this.getCachedHosts(names);
+
+  }
 }

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/net/DNSToSwitchMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/net/DNSToSwitchMapping.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/net/DNSToSwitchMapping.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/net/DNSToSwitchMapping.java Tue Jul 26 01:53:10 2011
@@ -18,6 +18,7 @@
 package org.apache.hadoop.net;
 
 import java.util.List;
+import java.net.UnknownHostException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -44,4 +45,23 @@ public interface DNSToSwitchMapping {
    * @return list of resolved network paths
    */
   public List<String> resolve(List<String> names);
+
+  /**
+   * Resolves a list of DNS-names/IP-addresses and returns back a list of
+   * switch information (network paths). One-to-one correspondence must be 
+   * maintained between the elements in the lists. 
+   * Consider an element in the argument list - x.y.com. The switch information
+   * that is returned must be a network path of the form /foo/rack, 
+   * where / is the root, and 'foo' is the switch where 'rack' is connected.
+   * Note the hostname/ip-address is not part of the returned path.
+   * The network topology of the cluster would determine the number of
+   * components in the network path.  Unlike 'resolve', names must be 
+   * resolvable
+   * @param names
+   * @return list of resolved network paths
+   * @throws UnknownHostException if any hosts are not resolvable
+   */
+  public List<String> resolveValidHosts(List<String> names)
+    throws UnknownHostException;
+
 }

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/net/ScriptBasedMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/net/ScriptBasedMapping.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/net/ScriptBasedMapping.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/net/ScriptBasedMapping.java Tue Jul 26 01:53:10 2011
@@ -20,6 +20,7 @@ package org.apache.hadoop.net;
 
 import java.util.*;
 import java.io.*;
+import java.net.UnknownHostException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -123,6 +124,17 @@ implements Configurable
     return m;
   }
   
+  public List<String> resolveValidHosts(List<String> names) 
+    throws UnknownHostException {
+    List<String> result = this.resolve(names);
+    if (result != null) {
+      return result;
+    } else {
+      throw new UnknownHostException(
+          "Unknown host(s) returned from ScriptBasedMapping");
+    }
+  }
+
   private String runResolveCommand(List<String> args) {
     int loopCount = 0;
     if (args.size() == 0) {

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/AnnotatedSecurityInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/AnnotatedSecurityInfo.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/AnnotatedSecurityInfo.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/AnnotatedSecurityInfo.java Tue Jul 26 01:53:10 2011
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.security;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.token.TokenInfo;
 
 /**
@@ -26,12 +27,12 @@ import org.apache.hadoop.security.token.
 public class AnnotatedSecurityInfo extends SecurityInfo {
 
   @Override
-  public KerberosInfo getKerberosInfo(Class<?> protocol) {
+  public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
     return protocol.getAnnotation(KerberosInfo.class);
   }
 
   @Override
-  public TokenInfo getTokenInfo(Class<?> protocol) {
+  public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
     return protocol.getAnnotation(TokenInfo.class);
   }
 

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/SecurityInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/SecurityInfo.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/SecurityInfo.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/SecurityInfo.java Tue Jul 26 01:53:10 2011
@@ -18,8 +18,13 @@
 
 package org.apache.hadoop.security;
 
+import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.token.TokenInfo;
 
+@Evolving
+@LimitedPrivate({"MapReduce", "HDFS"})
 /**
  * Interface used by RPC to get the Security information for a given 
  * protocol.
@@ -29,15 +34,17 @@ public abstract class SecurityInfo {
   /**
    * Get the KerberosInfo for a given protocol.
    * @param protocol interface class
+   * @param conf configuration
    * @return KerberosInfo
    */
-  public abstract KerberosInfo getKerberosInfo(Class<?> protocol);
+  public abstract KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf);
 
   /**
    * Get the TokenInfo for a given protocol.
    * @param protocol interface class
+   * @param conf configuration object.
    * @return TokenInfo instance
    */
-  public abstract TokenInfo getTokenInfo(Class<?> protocol);
+  public abstract TokenInfo getTokenInfo(Class<?> protocol, Configuration conf);
 
 }

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/SecurityUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/SecurityUtil.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/SecurityUtil.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/SecurityUtil.java Tue Jul 26 01:53:10 2011
@@ -310,17 +310,18 @@ public class SecurityUtil {
    * Look up the KerberosInfo for a given protocol. It searches all known
    * SecurityInfo providers.
    * @param protocol the protocol class to get the information for
+   * @param conf configuration object
    * @return the KerberosInfo or null if it has no KerberosInfo defined
    */
-  public static KerberosInfo getKerberosInfo(Class<?> protocol) {
+  public static KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
     for(SecurityInfo provider: testProviders) {
-      KerberosInfo result = provider.getKerberosInfo(protocol);
+      KerberosInfo result = provider.getKerberosInfo(protocol, conf);
       if (result != null) {
         return result;
       }
     }
     for(SecurityInfo provider: securityInfoProviders) {
-      KerberosInfo result = provider.getKerberosInfo(protocol);
+      KerberosInfo result = provider.getKerberosInfo(protocol, conf);
       if (result != null) {
         return result;
       }
@@ -332,17 +333,18 @@ public class SecurityUtil {
    * Look up the TokenInfo for a given protocol. It searches all known
    * SecurityInfo providers.
    * @param protocol The protocol class to get the information for.
+   * @conf conf Configuration object
    * @return the TokenInfo or null if it has no KerberosInfo defined
    */
-  public static TokenInfo getTokenInfo(Class<?> protocol) {
+  public static TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
     for(SecurityInfo provider: testProviders) {
-      TokenInfo result = provider.getTokenInfo(protocol);
+      TokenInfo result = provider.getTokenInfo(protocol, conf);
       if (result != null) {
         return result;
       }      
     }
     for(SecurityInfo provider: securityInfoProviders) {
-      TokenInfo result = provider.getTokenInfo(protocol);
+      TokenInfo result = provider.getTokenInfo(protocol, conf);
       if (result != null) {
         return result;
       }

Modified: hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java?rev=1150969&r1=1150968&r2=1150969&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java (original)
+++ hadoop/common/branches/HDFS-1073/common/src/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java Tue Jul 26 01:53:10 2011
@@ -84,7 +84,7 @@ public class ServiceAuthorizationManager
     }
     
     // get client principal key to verify (if available)
-    KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol);
+    KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol, conf);
     String clientPrincipal = null; 
     if (krbInfo != null) {
       String clientKey = krbInfo.clientPrincipal();



Mime
View raw message