hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1124466 [1/3] - in /hadoop/hdfs/branches/HDFS-1073: ./ bin/ conf/ src/c++/libhdfs/ src/contrib/ src/contrib/hdfsproxy/ src/contrib/thriftfs/ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/...
Date Wed, 18 May 2011 23:44:25 GMT
Author: todd
Date: Wed May 18 23:44:23 2011
New Revision: 1124466

URL: http://svn.apache.org/viewvc?rev=1124466&view=rev
Log:
Merge trunk into HDFS-1073

Added:
    hadoop/hdfs/branches/HDFS-1073/conf/hadoop-metrics2.properties
      - copied unchanged from r1124460, hadoop/hdfs/trunk/conf/hadoop-metrics2.properties
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/GetGroups.java
      - copied unchanged from r1124460, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/GetGroups.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRenewer.java
      - copied unchanged from r1124460, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRenewer.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetGroups.java
      - copied unchanged from r1124460, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetGroups.java
Removed:
    hadoop/hdfs/branches/HDFS-1073/src/contrib/thriftfs/
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeActivityMBean.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeActivityMBean.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/util/DaemonFactory.java
Modified:
    hadoop/hdfs/branches/HDFS-1073/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/CHANGES.txt
    hadoop/hdfs/branches/HDFS-1073/bin/hdfs
    hadoop/hdfs/branches/HDFS-1073/build.xml   (contents, props changed)
    hadoop/hdfs/branches/HDFS-1073/ivy.xml
    hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/contrib/build.xml
    hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/java/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocols.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/JMXGet.java
    hadoop/hdfs/branches/HDFS-1073/src/test/aop/build/aop.xml
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUtil.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/tools/TestJMXGet.java
    hadoop/hdfs/branches/HDFS-1073/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
    hadoop/hdfs/branches/HDFS-1073/src/webapps/datanode/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/webapps/hdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/webapps/secondary/   (props changed)

Propchange: hadoop/hdfs/branches/HDFS-1073/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed May 18 23:44:23 2011
@@ -2,4 +2,4 @@
 /hadoop/hdfs/branches/HDFS-1052:987665-1095512
 /hadoop/hdfs/branches/HDFS-265:796829-820463
 /hadoop/hdfs/branches/branch-0.21:820487
-/hadoop/hdfs/trunk:1086482-1102504
+/hadoop/hdfs/trunk:1086482-1124460

Modified: hadoop/hdfs/branches/HDFS-1073/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/CHANGES.txt?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-1073/CHANGES.txt Wed May 18 23:44:23 2011
@@ -275,6 +275,12 @@ Trunk (unreleased changes)
 
     HDFS 1911 HDFS tests for the newly added viewfs
 
+    HDFS-1814. Add "hdfs groups" command to query the server-side groups
+    resolved for a user. (Aaron T. Myers via todd)
+
+    HDFS-1914. Federation: namenode storage directories must be configurable
+    specific to name service. (suresh)
+
 
   IMPROVEMENTS
 
@@ -283,7 +289,8 @@ Trunk (unreleased changes)
     HDFS-1628. Display full path in AccessControlException.  (John George
     via szetszwo)
 
-    HDFS-1707. Federation: Failure in browsing data on new namenodes. (jitendra)
+    HDFS-1707. Federation: Failure in browsing data on new namenodes. 
+    (jitendra)
 
     HDFS-1683. Test Balancer with multiple NameNodes.  (szetszwo)
 
@@ -404,6 +411,31 @@ Trunk (unreleased changes)
     HDFS-1378. Edit log replay should track and report file offsets in case of
     errors. (Aaron T. Myers and Todd Lipcon via todd)
 
+    HDFS-1917. Separate hdfs jars from common in ivy configuration.  (Eric Yang
+    via szetszwo)
+
+    HDFS-1899. GenericTestUtils.formatNamenode should be moved to DFSTestUtil
+    (Ted Yu via todd)
+
+    HDFS-1117. Metrics 2.0 HDFS instrumentation. (Luke Lu via suresh)
+
+    HDFS-1946. HDFS part of HADOOP-7291. (eli)
+
+    HDFS-1945. Removed the deprecated fields in DataTransferProtocol.
+    (szetszwo)
+
+    HDFS-1730. Use DaemonFactory from common and delete it from HDFS.
+    (Tanping via suresh)
+
+    HDFS-1573. Add useful tracing information to Lease Renewer thread names
+    (todd)
+
+    HDFS-1939.  In ivy.xml, test conf should not extend common conf.
+    (Eric Yang via szetszwo)
+
+    HDFS-1332.  Include more information in exceptions and debug messages
+    when BlockPlacementPolicy cannot be satisfied.  (Ted Yu via szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
@@ -543,8 +575,33 @@ Trunk (unreleased changes)
 
     HDFS-1627. Fix NullPointerException in Secondary NameNode. (hairong)
 
+    HDFS-1928. Fix path display for touchz in TestHDFSCLI.
+    (Daryn Sharp via todd)
+
+    HDFS-1938. Fix ivy-retrieve-hdfs dependence in build.xml and aop.xml.
+    (Eric Yang via szetszwo)
+
+    HDFS-1929. TestEditLogFileOutputStream fails if running on same host as NN
+    (Aaron T. Myers via todd)
+
+    HDFS-1933. Update TestDFSShell for improved "test" shell command. (Daryn
+    Sharp via todd)
+    
+    HDFS-1931. Update TestDFSShell for improved "du" shell command. (Daryn
+    Sharp via todd)
+
+    HDFS-1881. Federation: after taking snapshot the current directory 
+    of datanode is empty. (Tanping Wang via suresh)
+
+    HDFS-1927. Fix a bug which causes ip=null in NameNode audit log.
+    (John George via szetszwo)
+
 Release 0.22.0 - Unreleased
 
+  INCOMPATIBLE CHANGES
+
+    HDFS-1825. Remove thriftfs contrib. (nigel via eli)
+
   NEW FEATURES
 
     HDFS-992. Re-factor block access token implementation to conform to the 
@@ -772,6 +829,11 @@ Release 0.22.0 - Unreleased
     HDFS-1052. HDFS Federation - Merge of umbrella jira changes from
     HDFS-1052 branch into trunk.
 
+    HDFS-1835. DataNode should not depend on SHA1PRNG secure random generator
+    to generate a storage ID. (John Carrino via todd)
+
+    HDFS-1947. DFSClient should use mapreduce.task.attempt.id. (eli)
+
   OPTIMIZATIONS
 
     HDFS-1140. Speedup INode.getPathComponents. (Dmytro Molkov via shv)
@@ -947,6 +1009,15 @@ Release 0.22.0 - Unreleased
     HDFS-1621. Fix references to hadoop-common-${version} in build.xml
     (Jolly Chen via todd)
 
+    HDFS-1505. saveNamespace appears to succeed even if all directories fail
+    to save. (Aaron T. Myers via todd)
+
+    HDFS-1921. saveNamespace can cause NN to be unable to come up on restart
+    (Matt Foley via todd)
+
+    HDFS-1925. SafeModeInfo should use the correct constant instead of a
+    hard-coded value for its default. (Joey Echeverria via todd)
+
 Release 0.21.1 - Unreleased
     HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
 

Modified: hadoop/hdfs/branches/HDFS-1073/bin/hdfs
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/hdfs?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/hdfs (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/hdfs Wed May 18 23:44:23 2011
@@ -35,6 +35,7 @@ function print_usage(){
   echo "  oev                  apply the offline edits viewer to an edits file"
   echo "  fetchdt              fetch a delegation token from the NameNode"
   echo "  getconf              get config values from configuration"
+  echo "  groups               get the groups which users belong to"
   echo "						Use -help to see options"
   echo ""
   echo "Most commands print help when invoked w/o parameters."
@@ -97,6 +98,8 @@ elif [ "$COMMAND" = "fetchdt" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
 elif [ "$COMMAND" = "getconf" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.GetConf
+elif [ "$COMMAND" = "groups" ] ; then
+  CLASS=org.apache.hadoop.hdfs.tools.GetGroups
 else
   echo $COMMAND - invalid command
   print_usage

Modified: hadoop/hdfs/branches/HDFS-1073/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/build.xml?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/build.xml (original)
+++ hadoop/hdfs/branches/HDFS-1073/build.xml Wed May 18 23:44:23 2011
@@ -92,7 +92,7 @@
   <property name="test.junit.fork.mode" value="perTest" />
   <property name="test.junit.printsummary" value="yes" />
   <property name="test.junit.haltonfailure" value="no" />
-  <property name="test.junit.maxmemory" value="512m" />
+  <property name="test.junit.maxmemory" value="1024m" />
   <property name="test.conf.dir" value="${build.dir}/test/conf" />
 
   <property name="test.hdfs.build.classes" value="${test.build.dir}/hdfs/classes"/>
@@ -165,6 +165,7 @@
   <property name="build.ivy.dir" location="${build.dir}/ivy" />
   <property name="build.ivy.lib.dir" location="${build.ivy.dir}/lib" />
   <property name="common.ivy.lib.dir" location="${build.ivy.lib.dir}/${ant.project.name}/common"/>
+  <property name="hdfs.ivy.lib.dir" location="${build.ivy.lib.dir}/${ant.project.name}/hdfs"/>
   <property name="test.ivy.lib.dir" location="${build.ivy.lib.dir}/${ant.project.name}/test"/>
   <property name="build.ivy.report.dir" location="${build.ivy.dir}/report" />
   <property name="build.ivy.maven.dir" location="${build.ivy.dir}/maven" />
@@ -216,6 +217,7 @@
     <pathelement location="${build.classes}"/>
     <pathelement location="${conf.dir}"/>
     <path refid="ivy-common.classpath"/>
+    <path refid="ivy-hdfs.classpath"/>
   </path>
 
   <path id="test.classpath">
@@ -233,6 +235,7 @@
     <pathelement location="${build.classes}"/>
     <pathelement location="${test.conf.dir}"/>
     <path refid="ivy-common.classpath"/>
+    <path refid="ivy-hdfs.classpath"/>
   </path>
 
   <!-- the cluster test classpath: uses conf.dir for configuration -->
@@ -261,7 +264,7 @@
   <!-- ====================================================== -->
   <!-- Stuff needed by all targets                            -->
   <!-- ====================================================== -->
-  <target name="init" depends="ivy-retrieve-common">
+  <target name="init" depends="ivy-retrieve-common,ivy-retrieve-hdfs,ivy-retrieve-test">
     <mkdir dir="${build.dir}"/>
     <mkdir dir="${build.classes}"/>
     <mkdir dir="${build.src}"/>
@@ -404,7 +407,7 @@
     </jar>
   </target>
 
-  <target name="compile-hdfs-test" depends="compile-hdfs-classes, ivy-retrieve-test">
+  <target name="compile-hdfs-test" depends="compile-hdfs-classes">
     <macro-compile-hdfs-test
       target.dir="${test.hdfs.build.classes}"
       source.dir="${test.src.dir}/hdfs;${test.src.dir}/unit"
@@ -986,7 +989,7 @@
     <mkdir dir="${dist.dir}/docs/jdiff"/>
 
     <copy todir="${dist.dir}/lib" includeEmptyDirs="false" flatten="true">
-      <fileset dir="${common.ivy.lib.dir}"/>
+      <fileset dir="${hdfs.ivy.lib.dir}"/>
     </copy>
 
     <copy todir="${dist.dir}/lib" includeEmptyDirs="false">
@@ -1084,7 +1087,7 @@
   	<mkdir dir="${dist.dir}/bin"/>
 
     <copy todir="${dist.dir}/lib" includeEmptyDirs="false" flatten="true">
-      <fileset dir="${common.ivy.lib.dir}"/>
+      <fileset dir="${hdfs.ivy.lib.dir}"/>
     </copy>
 
     <copy todir="${dist.dir}/lib" includeEmptyDirs="false">
@@ -1202,7 +1205,6 @@
         <exclude name="CHANGES.txt"/>
         <exclude name="docs/"/>
         <exclude name="lib/jdiff/"/>
-        <exclude name="src/contrib/thriftfs/gen-*/" />
         <exclude name="**/conf/*" />
         <exclude name="webapps/**/WEB-INF/web.xml"/>
         <exclude name="src/docs/releasenotes.html" />
@@ -1457,7 +1459,6 @@
     <arg value="${findbugs.home}"/>
     <arg value="${forrest.home}"/>
     <arg value="${eclipse.home}"/>
-    <arg value="${python.home}"/>
     <arg value="${basedir}"/>
     <arg value="${jira.passwd}"/>
     <arg value="${curl.cmd}"/>
@@ -1484,7 +1485,7 @@
   </target>
   
   <target name="eclipse" 
-          depends="init,ant-eclipse-download,ivy-retrieve-common,ivy-retrieve-test"
+          depends="init,ant-eclipse-download,ivy-retrieve-hdfs,ivy-retrieve-common,ivy-retrieve-test"
           description="Create eclipse project files">
        <pathconvert property="eclipse.project">
          <path path="${basedir}"/>
@@ -1506,6 +1507,7 @@
                 output="${build.dir.eclipse-test-classes}" />
         <output path="${build.dir.eclipse-main-classes}" />
         <library pathref="ivy-common.classpath" exported="true" />
+        <library pathref="ivy-hdfs.classpath" exported="true" />
         <library pathref="ivy-test.classpath" exported="false" />
         <library path="${conf.dir}" exported="false" />
       </classpath>
@@ -1776,6 +1778,11 @@
       log="${ivyresolvelog}"/>
   </target>
 
+  <target name="ivy-resolve-hdfs" depends="ivy-init">
+    <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="hdfs"
+      log="${ivyresolvelog}"/>
+  </target>
+
   <target name="ivy-resolve-jdiff" depends="ivy-init">
     <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="jdiff"
       log="${ivyresolvelog}"/>
@@ -1838,6 +1845,14 @@
     <ivy:cachepath pathid="ivy-common.classpath" conf="common"/>
   </target>
 
+  <target name="ivy-retrieve-hdfs" depends="ivy-resolve-hdfs"
+    description="Retrieve Ivy-managed artifacts for the hdfs configurations">
+    <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"
+      pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"
+        log="${ivyretrievelog}"/>
+    <ivy:cachepath pathid="ivy-hdfs.classpath" conf="hdfs"/>
+  </target>
+
   <target name="ivy-retrieve-releaseaudit" depends="ivy-resolve-releaseaudit"
     description="Retrieve Ivy-managed artifacts for the compile configurations">
     <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"

Propchange: hadoop/hdfs/branches/HDFS-1073/build.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed May 18 23:44:23 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/build.xml:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/build.xml:796829-820463
 /hadoop/hdfs/branches/branch-0.21/build.xml:820487
-/hadoop/hdfs/trunk/build.xml:1086482-1102504
+/hadoop/hdfs/trunk/build.xml:1086482-1124460

Modified: hadoop/hdfs/branches/HDFS-1073/ivy.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/ivy.xml?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/ivy.xml (original)
+++ hadoop/hdfs/branches/HDFS-1073/ivy.xml Wed May 18 23:44:23 2011
@@ -36,9 +36,10 @@
     -->
     <!--Private configurations. -->
 
-    <conf name="common" visibility="private" extends="compile,runtime" description="common artifacts"/>
+    <conf name="common" visibility="private" extends="compile" description="Hadoop common artifacts"/>
+    <conf name="hdfs" visibility="private" extends="compile,runtime" description="HDFS dependent artifacts"/>
     <conf name="javadoc" visibility="private" description="artiracts required while performing doc generation" extends="common"/>
-    <conf name="test" extends="common" visibility="private" description="the classpath needed to run tests"/>
+    <conf name="test" extends="master" visibility="private" description="the classpath needed to run tests"/>
     <conf name="system" extends="test" visibility="private" description="the classpath needed to run system tests"/>
 
     <conf name="test-hdfswithmr" extends="test, common" visibility="private" description="the classpath needed to run tests"/>
@@ -56,10 +57,14 @@
   </publications>
   <dependencies>
     
-    <dependency org="org.apache.hadoop" name="hadoop-common" rev="${hadoop-common.version}" conf="common->default"/>
-    <dependency org="org.apache.hadoop" name="hadoop-common-instrumented" rev="${hadoop-common.version}" conf="system->default"/>
+    <dependency org="org.apache.hadoop" name="hadoop-common" rev="${hadoop-common.version}" conf="common->default">
+      <exclude module="ant"/>
+    </dependency>
+    <dependency org="org.apache.hadoop" name="hadoop-common-instrumented" rev="${hadoop-common.version}" conf="system->default">
+      <exclude module="ant"/>
+    </dependency>
     <dependency org="commons-logging" name="commons-logging" rev="${commons-logging.version}" conf="common->master"/>
-    <dependency org="commons-daemon" name="commons-daemon" rev="${commons-daemon.version}" conf="common->default" />
+    <dependency org="commons-daemon" name="commons-daemon" rev="${commons-daemon.version}" conf="hdfs->default" />
     <dependency org="log4j" name="log4j" rev="${log4j.version}" conf="common->master"/>
     <dependency org="com.google.guava" name="guava" rev="${guava.version}" conf="common->default" />
     <dependency org="org.apache.hadoop" name="avro" rev="${avro.version}" conf="common->default">
@@ -72,7 +77,7 @@
 
     <dependency org="org.slf4j" name="slf4j-api" rev="${slf4j-api.version}" conf="test->master"/>
     <dependency org="org.slf4j" name="slf4j-log4j12" rev="${slf4j-log4j12.version}" conf="test->master"/>
-    <dependency org="org.apache.hadoop" name="hadoop-common-test" rev="${hadoop-common.version}" conf="test->default"/>
+    <dependency org="org.apache.hadoop" name="hadoop-common-test" rev="${hadoop-common.version}" conf="test->master"/>
 
     <dependency org="checkstyle" name="checkstyle" rev="${checkstyle.version}" conf="checkstyle->default"/>
 

Propchange: hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed May 18 23:44:23 2011
@@ -1,4 +1,4 @@
 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
 /hadoop/core/trunk/src/c++/libhdfs:776175-784663
 /hadoop/hdfs/branches/HDFS-1052/src/c++/libhdfs:987665-1095512
-/hadoop/hdfs/trunk/src/c++/libhdfs:1086482-1102504
+/hadoop/hdfs/trunk/src/c++/libhdfs:1086482-1124460

Modified: hadoop/hdfs/branches/HDFS-1073/src/contrib/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/contrib/build.xml?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/contrib/build.xml (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/contrib/build.xml Wed May 18 23:44:23 2011
@@ -28,7 +28,6 @@
   <!-- ====================================================== -->
   <target name="compile">
     <subant target="compile">
-      <fileset dir="." includes="thriftfs/build.xml"/>
       <fileset dir="." includes="hdfsproxy/build.xml"/>
     </subant>
   </target>

Propchange: hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed May 18 23:44:23 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/contrib/hdfsproxy:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/contrib/hdfsproxy:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy:820487
-/hadoop/hdfs/trunk/src/contrib/hdfsproxy:1086482-1102504
+/hadoop/hdfs/trunk/src/contrib/hdfsproxy:1086482-1124460

Propchange: hadoop/hdfs/branches/HDFS-1073/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed May 18 23:44:23 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/java:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/java:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/java:820487
-/hadoop/hdfs/trunk/src/java:1086482-1102504
+/hadoop/hdfs/trunk/src/java:1086482-1124460

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSClient.java Wed May 18 23:44:23 2011
@@ -259,7 +259,7 @@ public class DFSClient implements FSCons
         nameNodeAddr.getHostName() + ":" + nameNodeAddr.getPort();
     this.leaserenewer = LeaseRenewer.getInstance(authority, ugi, this);
     
-    String taskId = conf.get("mapred.task.id", "NONMAPREDUCE");
+    String taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
     this.clientName = leaserenewer.getClientName(taskId);
 
     defaultBlockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
@@ -307,6 +307,14 @@ public class DFSClient implements FSCons
         (HdfsConstants.READ_TIMEOUT_EXTENSION * numNodes +
         socketTimeout) : 0;
   }
+  
+  int getHdfsTimeout() {
+    return hdfsTimeout;
+  }
+  
+  String getClientName() {
+    return clientName;
+  }
 
   void checkOpen() throws IOException {
     if (!clientRunning) {
@@ -335,6 +343,11 @@ public class DFSClient implements FSCons
       return filesBeingWritten.isEmpty();
     }
   }
+  
+  /** @return true if the client is running */
+  boolean isClientRunning() {
+    return clientRunning;
+  }
 
   /** Renew leases */
   void renewLease() throws IOException {

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSUtil.java Wed May 18 23:44:23 2011
@@ -576,4 +576,13 @@ public class DFSUtil {
     return new InetSocketAddress(address.substring(0, colon), 
         Integer.parseInt(address.substring(colon + 1)));
   }
+
+  /**
+   * Round bytes to GiB (gibibyte)
+   * @param bytes number of bytes
+   * @return number of GiB
+   */
+  public static int roundBytesToGB(long bytes) {
+    return Math.round((float)bytes/ 1024 / 1024 / 1024);
+  }
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java Wed May 18 23:44:23 2011
@@ -27,6 +27,7 @@ import org.apache.hadoop.security.Refres
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.security.authorize.Service;
+import org.apache.hadoop.tools.GetUserMappingsProtocol;
 
 /**
  * {@link PolicyProvider} for HDFS protocols.
@@ -46,6 +47,8 @@ public class HDFSPolicyProvider extends 
                 RefreshAuthorizationPolicyProtocol.class),
     new Service("security.refresh.user.mappings.protocol.acl", 
                 RefreshUserMappingsProtocol.class),
+    new Service("security.get.user.mappings.protocol.acl",
+                GetUserMappingsProtocol.class)
   };
   
   @Override

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java Wed May 18 23:44:23 2011
@@ -187,9 +187,23 @@ class LeaseRenewer {
   /** A list of clients corresponding to this renewer. */
   private final List<DFSClient> dfsclients = new ArrayList<DFSClient>();
 
+  /**
+   * A stringified stack trace of the call stack when the Lease Renewer
+   * was instantiated. This is only generated if trace-level logging is
+   * enabled on this class.
+   */
+  private final String instantiationTrace;
+
   private LeaseRenewer(Factory.Key factorykey) {
     this.factorykey = factorykey;
     setGraceSleepPeriod(LEASE_RENEWER_GRACE_DEFAULT);
+    
+    if (LOG.isTraceEnabled()) {
+      instantiationTrace = StringUtils.stringifyException(
+        new Throwable("TRACE"));
+    } else {
+      instantiationTrace = null;
+    }
   }
 
   /** @return the renewal time in milliseconds. */
@@ -214,8 +228,8 @@ class LeaseRenewer {
     dfsclients.add(dfsc);
 
     //update renewal time
-    if (dfsc.hdfsTimeout > 0) {
-      final long half = dfsc.hdfsTimeout/2;
+    if (dfsc.getHdfsTimeout() > 0) {
+      final long half = dfsc.getHdfsTimeout()/2;
       if (half < renewal) {
         this.renewal = half;
       }
@@ -224,7 +238,7 @@ class LeaseRenewer {
 
   private synchronized boolean clientsRunning() {
     for(Iterator<DFSClient> i = dfsclients.iterator(); i.hasNext(); ) {
-      if (!i.next().clientRunning) {
+      if (!i.next().isClientRunning()) {
         i.remove();
       }
     }
@@ -251,6 +265,11 @@ class LeaseRenewer {
   synchronized boolean isRunning() {
     return daemon != null && daemon.isAlive();
   }
+  
+  /** Used only by tests */
+  synchronized String getDaemonName() {
+    return daemon.getName();
+  }
 
   /** Is the empty period longer than the grace period? */  
   private synchronized boolean isRenewerExpired() {
@@ -260,7 +279,7 @@ class LeaseRenewer {
 
   synchronized void put(final String src, final DFSOutputStream out,
       final DFSClient dfsc) {
-    if (dfsc.clientRunning) {
+    if (dfsc.isClientRunning()) {
       if (!isRunning() || isRenewerExpired()) {
         //start a new deamon with a new id.
         final int id = ++currentId;
@@ -280,6 +299,11 @@ class LeaseRenewer {
               }
             }
           }
+          
+          @Override
+          public String toString() {
+            return String.valueOf(LeaseRenewer.this);
+          }
         });
         daemon.start();
       }
@@ -322,13 +346,13 @@ class LeaseRenewer {
     }
 
     //update renewal time
-    if (renewal == dfsc.hdfsTimeout/2) {
+    if (renewal == dfsc.getHdfsTimeout()/2) {
       long min = FSConstants.LEASE_SOFTLIMIT_PERIOD;
       for(DFSClient c : dfsclients) {
-        if (c.hdfsTimeout > 0) {
-          final long half = c.hdfsTimeout;
-          if (half < min) {
-            min = half;
+        if (c.getHdfsTimeout() > 0) {
+          final long timeout = c.getHdfsTimeout();
+          if (timeout < min) {
+            min = timeout;
           }
         }
       }
@@ -362,16 +386,16 @@ class LeaseRenewer {
     Collections.sort(copies, new Comparator<DFSClient>() {
       @Override
       public int compare(final DFSClient left, final DFSClient right) {
-        return left.clientName.compareTo(right.clientName);
+        return left.getClientName().compareTo(right.getClientName());
       }
     });
     String previousName = "";
     for(int i = 0; i < copies.size(); i++) {
       final DFSClient c = copies.get(i);
       //skip if current client name is the same as the previous name.
-      if (!c.clientName.equals(previousName)) {
+      if (!c.getClientName().equals(previousName)) {
         c.renewLease();
-        previousName = c.clientName;
+        previousName = c.getClientName();
       }
     }
   }
@@ -417,8 +441,8 @@ class LeaseRenewer {
   public String toString() {
     String s = getClass().getSimpleName() + ":" + factorykey;
     if (LOG.isTraceEnabled()) {
-      return s + ", clients=" +  clientsString() + ", "
-             + StringUtils.stringifyException(new Throwable("for testing"));
+      return s + ", clients=" +  clientsString()
+        + ", created at " + instantiationTrace;
     }
     return s;
   }
@@ -429,9 +453,9 @@ class LeaseRenewer {
       return "[]";
     } else {
       final StringBuilder b = new StringBuilder("[").append(
-          dfsclients.get(0).clientName);
+          dfsclients.get(0).getClientName());
       for(int i = 1; i < dfsclients.size(); i++) {
-        b.append(", ").append(dfsclients.get(i).clientName);
+        b.append(", ").append(dfsclients.get(i).getClientName());
       }
       return b.append("]").toString();
     }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java Wed May 18 23:44:23 2011
@@ -51,11 +51,10 @@ public interface DataTransferProtocol {
    * when protocol changes. It is not very obvious. 
    */
   /*
-   * Version 23:
-   *    Changed the protocol methods to use ExtendedBlock instead
-   *    of Block.
+   * Version 24:
+   *    Remove deprecated fields.
    */
-  public static final int DATA_TRANSFER_VERSION = 23;
+  public static final int DATA_TRANSFER_VERSION = 24;
 
   /** Operation */
   public enum Op {
@@ -185,49 +184,6 @@ public interface DataTransferProtocol {
     }
   }    
 
-  /** @deprecated Deprecated at 0.21.  Use Op.WRITE_BLOCK instead. */
-  @Deprecated
-  public static final byte OP_WRITE_BLOCK = Op.WRITE_BLOCK.code;
-  /** @deprecated Deprecated at 0.21.  Use Op.READ_BLOCK instead. */
-  @Deprecated
-  public static final byte OP_READ_BLOCK = Op.READ_BLOCK.code;
-  /** @deprecated As of version 15, OP_READ_METADATA is no longer supported. */
-  @Deprecated
-  public static final byte OP_READ_METADATA = Op.READ_METADATA.code;
-  /** @deprecated Deprecated at 0.21.  Use Op.REPLACE_BLOCK instead. */
-  @Deprecated
-  public static final byte OP_REPLACE_BLOCK = Op.REPLACE_BLOCK.code;
-  /** @deprecated Deprecated at 0.21.  Use Op.COPY_BLOCK instead. */
-  @Deprecated
-  public static final byte OP_COPY_BLOCK = Op.COPY_BLOCK.code;
-  /** @deprecated Deprecated at 0.21.  Use Op.BLOCK_CHECKSUM instead. */
-  @Deprecated
-  public static final byte OP_BLOCK_CHECKSUM = Op.BLOCK_CHECKSUM.code;
-
-
-  /** @deprecated Deprecated at 0.21.  Use Status.SUCCESS instead. */
-  @Deprecated
-  public static final int OP_STATUS_SUCCESS = Status.SUCCESS.code;  
-  /** @deprecated Deprecated at 0.21.  Use Status.ERROR instead. */
-  @Deprecated
-  public static final int OP_STATUS_ERROR = Status.ERROR.code;
-  /** @deprecated Deprecated at 0.21.  Use Status.ERROR_CHECKSUM instead. */
-  @Deprecated
-  public static final int OP_STATUS_ERROR_CHECKSUM = Status.ERROR_CHECKSUM.code;
-  /** @deprecated Deprecated at 0.21.  Use Status.ERROR_INVALID instead. */
-  @Deprecated
-  public static final int OP_STATUS_ERROR_INVALID = Status.ERROR_INVALID.code;
-  /** @deprecated Deprecated at 0.21.  Use Status.ERROR_EXISTS instead. */
-  @Deprecated
-  public static final int OP_STATUS_ERROR_EXISTS = Status.ERROR_EXISTS.code;
-  /** @deprecated Deprecated at 0.21.  Use Status.ERROR_ACCESS_TOKEN instead.*/
-  @Deprecated
-  public static final int OP_STATUS_ERROR_ACCESS_TOKEN = Status.ERROR_ACCESS_TOKEN.code;
-  /** @deprecated Deprecated at 0.21.  Use Status.CHECKSUM_OK instead. */
-  @Deprecated
-  public static final int OP_STATUS_CHECKSUM_OK = Status.CHECKSUM_OK.code;
-
-
   /** Sender */
   @InterfaceAudience.Private
   @InterfaceStability.Evolving

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java Wed May 18 23:44:23 2011
@@ -440,13 +440,13 @@ class BlockPoolSliceScanner {
         
         if (second) {
           totalScanErrors++;
-          datanode.getMetrics().blockVerificationFailures.inc(); 
+          datanode.getMetrics().incrBlockVerificationFailures();
           handleScanFailure(block);
           return;
         } 
       } finally {
         IOUtils.closeStream(blockSender);
-        datanode.getMetrics().blocksVerified.inc();
+        datanode.getMetrics().incrBlocksVerified();
         totalScans++;
       }
     }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java Wed May 18 23:44:23 2011
@@ -465,8 +465,11 @@ public class BlockPoolSliceStorage exten
     int diskLayoutVersion = this.getLayoutVersion();
     // hardlink finalized blocks in tmpDir
     HardLink hardLink = new HardLink();
-    DataStorage.linkBlocks(fromDir, new File(toDir,
-        DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
+    DataStorage.linkBlocks(new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED), 
+      new File(toDir,DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
+    DataStorage.linkBlocks(new File(fromDir, DataStorage.STORAGE_DIR_RBW), 
+        new File(toDir, DataStorage.STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
+    LOG.info( hardLink.linkStats.report() );
   }
 
   private void verifyDistributedUpgradeProgress(NamespaceInfo nsInfo)

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Wed May 18 23:44:23 2011
@@ -628,7 +628,7 @@ class BlockReceiver implements Closeable
             offsetInBlock, lastChunkChecksum
           );
           
-          datanode.myMetrics.bytesWritten.inc(len);
+          datanode.metrics.incrBytesWritten(len);
         }
       } catch (IOException iex) {
         datanode.checkDiskError(iex);
@@ -696,7 +696,7 @@ class BlockReceiver implements Closeable
           // Finalize the block. Does this fsync()?
           datanode.data.finalizeBlock(block);
         }
-        datanode.myMetrics.blocksWritten.inc();
+        datanode.metrics.incrBlocksWritten();
       }
 
     } catch (IOException ioe) {

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Wed May 18 23:44:23 2011
@@ -29,7 +29,6 @@ import java.io.DataOutputStream;
 import java.io.File;
 import java.io.IOException;
 import java.io.OutputStream;
-import java.lang.management.ManagementFactory;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.Socket;
@@ -54,9 +53,6 @@ import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import javax.management.MBeanServer;
-import javax.management.ObjectName;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -118,6 +114,8 @@ import org.apache.hadoop.ipc.ProtocolSig
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
@@ -353,7 +351,7 @@ public class DataNode extends Configured
   long heartBeatInterval;
   private DataStorage storage = null;
   private HttpServer infoServer = null;
-  DataNodeMetrics myMetrics;
+  DataNodeMetrics metrics;
   private InetSocketAddress selfAddr;
   
   private static volatile DataNode datanodeObject = null;
@@ -925,7 +923,7 @@ public class DataNode extends Configured
         cmd = bpNamenode.blockReport(bpRegistration, blockPoolId, bReport
             .getBlockListAsLongs());
         long brTime = now() - brStartTime;
-        myMetrics.blockReports.inc(brTime);
+        metrics.addBlockReport(brTime);
         LOG.info("BlockReport of " + bReport.getNumberOfBlocks() +
             " blocks got processed in " + brTime + " msecs");
         //
@@ -1036,7 +1034,7 @@ public class DataNode extends Configured
             //
             lastHeartbeat = startTime;
             DatanodeCommand[] cmds = sendHeartBeat();
-            myMetrics.heartbeats.inc(now() - startTime);
+            metrics.addHeartbeat(now() - startTime);
             if (!processCommand(cmds))
               continue;
           }
@@ -1258,7 +1256,7 @@ public class DataNode extends Configured
       case DatanodeProtocol.DNA_TRANSFER:
         // Send a copy of a block to another datanode
         transferBlocks(bcmd.getBlockPoolId(), bcmd.getBlocks(), bcmd.getTargets());
-        myMetrics.blocksReplicated.inc(bcmd.getBlocks().length);
+        metrics.incrBlocksReplicated(bcmd.getBlocks().length);
         break;
       case DatanodeProtocol.DNA_INVALIDATE:
         //
@@ -1276,7 +1274,7 @@ public class DataNode extends Configured
           checkDiskError();
           throw e;
         }
-        myMetrics.blocksRemoved.inc(toDelete.length);
+        metrics.incrBlocksRemoved(toDelete.length);
         break;
       case DatanodeProtocol.DNA_SHUTDOWN:
         // shut down the data node
@@ -1377,7 +1375,7 @@ public class DataNode extends Configured
     this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
     initIpcServer(conf);
 
-    myMetrics = new DataNodeMetrics(conf, getMachineName());
+    metrics = DataNodeMetrics.create(conf, getMachineName());
 
     blockPoolManager = new BlockPoolManager(conf);
   }
@@ -1427,17 +1425,7 @@ public class DataNode extends Configured
   }
   
   private void registerMXBean() {
-    // register MXBean
-    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); 
-    try {
-      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=DataNodeInfo");
-      mbs.registerMBean(this, mxbeanName);
-    } catch ( javax.management.InstanceAlreadyExistsException iaee ) {
-      // in unit tests, we may have multiple datanodes in the same JVM
-      LOG.info("DataNode MXBean already registered");
-    } catch ( javax.management.JMException e ) {
-      LOG.warn("Failed to register DataNode MXBean", e);
-    }
+    MBeans.register("DataNode", "DataNodeInfo", this);
   }
   
   int getPort() {
@@ -1551,7 +1539,7 @@ public class DataNode extends Configured
   }
     
   DataNodeMetrics getMetrics() {
-    return myMetrics;
+    return metrics;
   }
   
   public static void setNewStorageID(DatanodeID dnId) {
@@ -1583,13 +1571,7 @@ public class DataNode extends Configured
       LOG.warn("Could not find ip address of \"default\" inteface.");
     }
     
-    int rand = 0;
-    try {
-      rand = SecureRandom.getInstance("SHA1PRNG").nextInt(Integer.MAX_VALUE);
-    } catch (NoSuchAlgorithmException e) {
-      LOG.warn("Could not use SecureRandom");
-      rand = R.nextInt(Integer.MAX_VALUE);
-    }
+    int rand = new SecureRandom().nextInt(Integer.MAX_VALUE);
     return "DS-" + rand + "-" + ip + "-" + port + "-"
         + System.currentTimeMillis();
   }
@@ -1674,8 +1656,8 @@ public class DataNode extends Configured
     if (data != null) {
       data.shutdown();
     }
-    if (myMetrics != null) {
-      myMetrics.shutdown();
+    if (metrics != null) {
+      metrics.shutdown();
     }
   }
   
@@ -1715,7 +1697,7 @@ public class DataNode extends Configured
     // shutdown the DN completely.
     int dpError = hasEnoughResources ? DatanodeProtocol.DISK_ERROR  
                                      : DatanodeProtocol.FATAL_DISK_ERROR;  
-    myMetrics.volumeFailures.inc(1);
+    metrics.incrVolumeFailures();
 
     //inform NameNodes
     for(BPOfferService bpos: blockPoolManager.getAllNamenodeThreads()) {
@@ -2009,7 +1991,7 @@ public class DataNode extends Configured
    * @param delHint
    */
   void closeBlock(ExtendedBlock block, String delHint) {
-    myMetrics.blocksWritten.inc();
+    metrics.incrBlocksWritten();
     BPOfferService bpos = blockPoolManager.get(block.getBlockPoolId());
     if(bpos != null) {
       bpos.notifyNamenodeReceivedBlock(block, delHint);
@@ -2144,6 +2126,7 @@ public class DataNode extends Configured
         conf.get(DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
                  DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
     ArrayList<File> dirs = getDataDirsFromURIs(dataDirs, localFS, permission);
+    DefaultMetricsSystem.initialize("DataNode");
 
     assert dirs.size() > 0 : "number of data directories should be > 0";
     return new DataNode(conf, dirs, resources);

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Wed May 18 23:44:23 2011
@@ -48,8 +48,8 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
@@ -183,11 +183,11 @@ class DataXceiver extends DataTransferPr
       SUCCESS.write(out); // send op status
       long read = blockSender.sendBlock(out, baseStream, null); // send data
       
-      datanode.myMetrics.bytesRead.inc((int) read);
-      datanode.myMetrics.blocksRead.inc();
+      datanode.metrics.incrBytesRead((int) read);
+      datanode.metrics.incrBlocksRead();
     } catch ( SocketException ignored ) {
       // Its ok for remote side to close the connection anytime.
-      datanode.myMetrics.blocksRead.inc();
+      datanode.metrics.incrBlocksRead();
     } catch ( IOException ioe ) {
       /* What exactly should we do here?
        * Earlier version shutdown() datanode if there is disk error.
@@ -203,9 +203,8 @@ class DataXceiver extends DataTransferPr
     }
 
     //update metrics
-    updateDuration(datanode.myMetrics.readBlockOp);
-    updateCounter(datanode.myMetrics.readsFromLocalClient,
-                  datanode.myMetrics.readsFromRemoteClient);
+    datanode.metrics.addReadBlockOp(elapsed());
+    datanode.metrics.incrReadsFromClient(isLocal);
   }
 
   /**
@@ -409,9 +408,8 @@ class DataXceiver extends DataTransferPr
     }
 
     //update metrics
-    updateDuration(datanode.myMetrics.writeBlockOp);
-    updateCounter(datanode.myMetrics.writesFromLocalClient,
-                  datanode.myMetrics.writesFromRemoteClient);
+    datanode.metrics.addWriteBlockOp(elapsed());
+    datanode.metrics.incrWritesFromClient(isLocal);
   }
 
   @Override
@@ -482,7 +480,7 @@ class DataXceiver extends DataTransferPr
     }
 
     //update metrics
-    updateDuration(datanode.myMetrics.blockChecksumOp);
+    datanode.metrics.addBlockChecksumOp(elapsed());
   }
 
   /**
@@ -535,8 +533,8 @@ class DataXceiver extends DataTransferPr
       long read = blockSender.sendBlock(reply, baseStream, 
                                         dataXceiverServer.balanceThrottler);
 
-      datanode.myMetrics.bytesRead.inc((int) read);
-      datanode.myMetrics.blocksRead.inc();
+      datanode.metrics.incrBytesRead((int) read);
+      datanode.metrics.incrBlocksRead();
       
       LOG.info("Copied block " + block + " to " + s.getRemoteSocketAddress());
     } catch (IOException ioe) {
@@ -556,7 +554,7 @@ class DataXceiver extends DataTransferPr
     }
 
     //update metrics    
-    updateDuration(datanode.myMetrics.copyBlockOp);
+    datanode.metrics.addCopyBlockOp(elapsed());
   }
 
   /**
@@ -670,16 +668,16 @@ class DataXceiver extends DataTransferPr
     }
 
     //update metrics
-    updateDuration(datanode.myMetrics.replaceBlockOp);
+    datanode.metrics.addReplaceBlockOp(elapsed());
   }
 
-  private void updateDuration(MetricsTimeVaryingRate mtvr) {
-    mtvr.inc(now() - opStartTime);
+  private long elapsed() {
+    return now() - opStartTime;
   }
 
-  private void updateCounter(MetricsTimeVaryingInt localCounter,
-      MetricsTimeVaryingInt remoteCounter) {
-    (isLocal? localCounter: remoteCounter).inc();
+  private void updateCounter(MutableCounterLong localCounter,
+      MutableCounterLong remoteCounter) {
+    (isLocal? localCounter: remoteCounter).incr();
   }
 
   /**

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java Wed May 18 23:44:23 2011
@@ -42,7 +42,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
-import org.apache.hadoop.hdfs.util.DaemonFactory;
+import org.apache.hadoop.util.Daemon;
 
 /**
  * Periodically scans the data directories for block and block metadata files.
@@ -228,8 +228,9 @@ public class DirectoryScanner implements
                     DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_DEFAULT);
 
     reportCompileThreadPool = Executors.newFixedThreadPool(threads, 
-        new DaemonFactory());
-    masterThread = new ScheduledThreadPoolExecutor(1, new DaemonFactory());
+        new Daemon.DaemonFactory());
+    masterThread = new ScheduledThreadPoolExecutor(1,
+        new Daemon.DaemonFactory());
   }
 
   void start() {

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Wed May 18 23:44:23 2011
@@ -59,7 +59,7 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
-import org.apache.hadoop.metrics.util.MBeanUtil;
+import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.StringUtils;
@@ -2186,18 +2186,17 @@ public class FSDataset implements FSCons
     }
     try {
       bean = new StandardMBean(this,FSDatasetMBean.class);
-      mbeanName = MBeanUtil.registerMBean("DataNode", "FSDatasetState-" + storageName, bean);
+      mbeanName = MBeans.register("DataNode", "FSDatasetState-" + storageName, bean);
     } catch (NotCompliantMBeanException e) {
-      e.printStackTrace();
+      DataNode.LOG.warn("Error registering FSDatasetState MBean", e);
     }
- 
-    DataNode.LOG.info("Registered FSDatasetStatusMBean");
+    DataNode.LOG.info("Registered FSDatasetState MBean");
   }
 
   @Override // FSDatasetInterface
   public void shutdown() {
     if (mbeanName != null)
-      MBeanUtil.unregisterMBean(mbeanName);
+      MBeans.unregister(mbeanName);
     
     if (asyncDiskService != null) {
       asyncDiskService.shutdown();

Propchange: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed May 18 23:44:23 2011
@@ -5,4 +5,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:820487
-/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:1086482-1102504
+/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:1086482-1124460

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java Wed May 18 23:44:23 2011
@@ -17,23 +17,22 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.metrics;
 
+import java.util.Random;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.metrics.MetricsContext;
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.MetricsUtil;
-import org.apache.hadoop.metrics.Updater;
-import org.apache.hadoop.metrics.jvm.JvmMetrics;
-import org.apache.hadoop.metrics.util.MetricsBase;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+import static org.apache.hadoop.metrics2.impl.MsInfo.*;
 
 /**
- * 
+ *
  * This class is for maintaining  the various DataNode statistics
  * and publishing them through the metrics interfaces.
  * This also registers the JMX MBean for RPC.
@@ -45,97 +44,125 @@ import org.apache.hadoop.hdfs.DFSConfigK
  *
  */
 @InterfaceAudience.Private
-public class DataNodeMetrics implements Updater {
-  private final MetricsRecord metricsRecord;
-  private DataNodeActivityMBean datanodeActivityMBean;
-  public MetricsRegistry registry = new MetricsRegistry();
-  
-  
-  public MetricsTimeVaryingLong bytesWritten = 
-                      new MetricsTimeVaryingLong("bytes_written", registry);
-  public MetricsTimeVaryingLong bytesRead = 
-                      new MetricsTimeVaryingLong("bytes_read", registry);
-  public MetricsTimeVaryingInt blocksWritten = 
-                      new MetricsTimeVaryingInt("blocks_written", registry);
-  public MetricsTimeVaryingInt blocksRead = 
-                      new MetricsTimeVaryingInt("blocks_read", registry);
-  public MetricsTimeVaryingInt blocksReplicated =
-                      new MetricsTimeVaryingInt("blocks_replicated", registry);
-  public MetricsTimeVaryingInt blocksRemoved =
-                       new MetricsTimeVaryingInt("blocks_removed", registry);
-  public MetricsTimeVaryingInt blocksVerified = 
-                        new MetricsTimeVaryingInt("blocks_verified", registry);
-  public MetricsTimeVaryingInt blockVerificationFailures =
-                       new MetricsTimeVaryingInt("block_verification_failures", registry);
-  
-  public MetricsTimeVaryingInt readsFromLocalClient = 
-                new MetricsTimeVaryingInt("reads_from_local_client", registry);
-  public MetricsTimeVaryingInt readsFromRemoteClient = 
-                new MetricsTimeVaryingInt("reads_from_remote_client", registry);
-  public MetricsTimeVaryingInt writesFromLocalClient = 
-              new MetricsTimeVaryingInt("writes_from_local_client", registry);
-  public MetricsTimeVaryingInt writesFromRemoteClient = 
-              new MetricsTimeVaryingInt("writes_from_remote_client", registry);
+@Metrics(about="DataNode metrics", context="dfs")
+public class DataNodeMetrics {
 
-  public MetricsTimeVaryingInt volumeFailures =
-    new MetricsTimeVaryingInt("volumeFailures", registry);
+  @Metric MutableCounterLong bytesWritten;
+  @Metric MutableCounterLong bytesRead;
+  @Metric MutableCounterLong blocksWritten;
+  @Metric MutableCounterLong blocksRead;
+  @Metric MutableCounterLong blocksReplicated;
+  @Metric MutableCounterLong blocksRemoved;
+  @Metric MutableCounterLong blocksVerified;
+  @Metric MutableCounterLong blockVerificationFailures;
+  @Metric MutableCounterLong readsFromLocalClient;
+  @Metric MutableCounterLong readsFromRemoteClient;
+  @Metric MutableCounterLong writesFromLocalClient;
+  @Metric MutableCounterLong writesFromRemoteClient;
   
-  public MetricsTimeVaryingRate readBlockOp = 
-                new MetricsTimeVaryingRate("readBlockOp", registry);
-  public MetricsTimeVaryingRate writeBlockOp = 
-                new MetricsTimeVaryingRate("writeBlockOp", registry);
-  public MetricsTimeVaryingRate blockChecksumOp = 
-                new MetricsTimeVaryingRate("blockChecksumOp", registry);
-  public MetricsTimeVaryingRate copyBlockOp = 
-                new MetricsTimeVaryingRate("copyBlockOp", registry);
-  public MetricsTimeVaryingRate replaceBlockOp = 
-                new MetricsTimeVaryingRate("replaceBlockOp", registry);
-  public MetricsTimeVaryingRate heartbeats = 
-                    new MetricsTimeVaryingRate("heartBeats", registry);
-  public MetricsTimeVaryingRate blockReports = 
-                    new MetricsTimeVaryingRate("blockReports", registry);
-
-    
-  public DataNodeMetrics(Configuration conf, String datanodeName) {
-    String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY); 
-    // Initiate reporting of Java VM metrics
-    JvmMetrics.init("DataNode", sessionId);
-    
-
-    // Now the MBean for the data node
-    datanodeActivityMBean = new DataNodeActivityMBean(registry, datanodeName);
-    
-    // Create record for DataNode metrics
-    MetricsContext context = MetricsUtil.getContext("dfs");
-    metricsRecord = MetricsUtil.createRecord(context, "datanode");
-    metricsRecord.setTag("sessionId", sessionId);
-    context.registerUpdater(this);
+  @Metric MutableCounterLong volumeFailures;
+
+  @Metric MutableRate readBlockOp;
+  @Metric MutableRate writeBlockOp;
+  @Metric MutableRate blockChecksumOp;
+  @Metric MutableRate copyBlockOp;
+  @Metric MutableRate replaceBlockOp;
+  @Metric MutableRate heartbeats;
+  @Metric MutableRate blockReports;
+
+  final MetricsRegistry registry = new MetricsRegistry("datanode");
+  final String name;
+  static final Random rng = new Random();
+
+  public DataNodeMetrics(String name, String sessionId) {
+    this.name = name;
+    registry.tag(SessionId, sessionId);
   }
-  
+
+  public static DataNodeMetrics create(Configuration conf, String dnName) {
+    String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    JvmMetrics.create("DataNode", sessionId, ms);
+    String name = "DataNodeActivity-"+ (dnName.isEmpty()
+        ? "UndefinedDataNodeName"+ rng.nextInt() : dnName.replace(':', '-'));
+    return ms.register(name, null, new DataNodeMetrics(name, sessionId));
+  }
+
+  public String name() { return name; }
+
+  public void addHeartbeat(long latency) {
+    heartbeats.add(latency);
+  }
+
+  public void addBlockReport(long latency) {
+    blockReports.add(latency);
+  }
+
+  public void incrBlocksReplicated(int delta) {
+    blocksReplicated.incr(delta);
+  }
+
+  public void incrBlocksWritten() {
+    blocksWritten.incr();
+  }
+
+  public void incrBlocksRemoved(int delta) {
+    blocksRemoved.incr(delta);
+  }
+
+  public void incrBytesWritten(int delta) {
+    bytesWritten.incr(delta);
+  }
+
+  public void incrBlockVerificationFailures() {
+    blockVerificationFailures.incr();
+  }
+
+  public void incrBlocksVerified() {
+    blocksVerified.incr();
+  }
+
+  public void addReadBlockOp(long latency) {
+    readBlockOp.add(latency);
+  }
+
+  public void addWriteBlockOp(long latency) {
+    writeBlockOp.add(latency);
+  }
+
+  public void addReplaceBlockOp(long latency) {
+    replaceBlockOp.add(latency);
+  }
+
+  public void addCopyBlockOp(long latency) {
+    copyBlockOp.add(latency);
+  }
+
+  public void addBlockChecksumOp(long latency) {
+    blockChecksumOp.add(latency);
+  }
+
+  public void incrBytesRead(int delta) {
+    bytesRead.incr(delta);
+  }
+
+  public void incrBlocksRead() {
+    blocksRead.incr();
+  }
+
   public void shutdown() {
-    if (datanodeActivityMBean != null) 
-      datanodeActivityMBean.shutdown();
+    DefaultMetricsSystem.shutdown();
   }
-    
-  /**
-   * Since this object is a registered updater, this method will be called
-   * periodically, e.g. every 5 seconds.
-   */
-  public void doUpdates(MetricsContext unused) {
-    synchronized (this) {
-      for (MetricsBase m : registry.getMetricsList()) {
-        m.pushMetric(metricsRecord);
-      }
-    }
-    metricsRecord.update();
-  }
-  public void resetAllMinMax() {
-    readBlockOp.resetMinMax();
-    writeBlockOp.resetMinMax();
-    blockChecksumOp.resetMinMax();
-    copyBlockOp.resetMinMax();
-    replaceBlockOp.resetMinMax();
-    heartbeats.resetMinMax();
-    blockReports.resetMinMax();
+
+  public void incrWritesFromClient(boolean local) {
+    (local ? writesFromLocalClient : writesFromRemoteClient).incr();
+  }
+
+  public void incrReadsFromClient(boolean local) {
+    (local ? readsFromLocalClient : readsFromRemoteClient).incr();
+  }
+  
+  public void incrVolumeFailures() {
+    volumeFailures.incr();
   }
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java Wed May 18 23:44:23 2011
@@ -33,7 +33,7 @@ import org.apache.hadoop.classification.
  * 
  * <p>
  * Data Node runtime statistic  info is report in another MBean
- * @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean
+ * @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics
  *
  */
 @InterfaceAudience.Private

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java Wed May 18 23:44:23 2011
@@ -49,6 +49,9 @@ public class BlockPlacementPolicyDefault
   private boolean considerLoad; 
   private NetworkTopology clusterMap;
   private FSClusterStats stats;
+  static final String enableDebugLogging = "For more information, please enable"
+    + " DEBUG level logging on the "
+    + "org.apache.hadoop.hdfs.server.namenode.FSNamesystem logger.";
 
   BlockPlacementPolicyDefault(Configuration conf,  FSClusterStats stats,
                            NetworkTopology clusterMap) {
@@ -66,6 +69,14 @@ public class BlockPlacementPolicyDefault
     this.clusterMap = clusterMap;
   }
 
+  private ThreadLocal<StringBuilder> threadLocalBuilder =
+    new ThreadLocal<StringBuilder>() {
+    @Override
+    protected StringBuilder initialValue() {
+      return new StringBuilder();
+    }
+  };
+
   /** {@inheritDoc} */
   public DatanodeDescriptor[] chooseTarget(String srcPath,
                                     int numOfReplicas,
@@ -157,6 +168,7 @@ public class BlockPlacementPolicyDefault
     if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) {
       return writer;
     }
+    int totalReplicasExpected = numOfReplicas;
       
     int numOfResults = results.size();
     boolean newBlock = (numOfResults==0);
@@ -198,7 +210,8 @@ public class BlockPlacementPolicyDefault
                    blocksize, maxNodesPerRack, results);
     } catch (NotEnoughReplicasException e) {
       FSNamesystem.LOG.warn("Not able to place enough replicas, still in need of "
-               + numOfReplicas);
+               + numOfReplicas + " to reach " + totalReplicasExpected + "\n"
+               + e.getMessage());
     }
     return writer;
   }
@@ -326,6 +339,13 @@ public class BlockPlacementPolicyDefault
     throws NotEnoughReplicasException {
     int numOfAvailableNodes =
       clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet());
+    StringBuilder builder = null;
+    if (FSNamesystem.LOG.isDebugEnabled()) {
+      builder = threadLocalBuilder.get();
+      builder.setLength(0);
+      builder.append("[");
+    }
+    boolean badTarget = false;
     while(numOfAvailableNodes > 0) {
       DatanodeDescriptor chosenNode = 
         (DatanodeDescriptor)(clusterMap.chooseRandom(nodes));
@@ -336,12 +356,20 @@ public class BlockPlacementPolicyDefault
         if (isGoodTarget(chosenNode, blocksize, maxNodesPerRack, results)) {
           results.add(chosenNode);
           return chosenNode;
+        } else {
+          badTarget = true;
         }
       }
     }
 
-    throw new NotEnoughReplicasException(
-        "Not able to place enough replicas");
+    String detail = enableDebugLogging;
+    if (FSNamesystem.LOG.isDebugEnabled()) {
+      if (badTarget && builder != null) {
+        detail = builder.append("]").toString();
+        builder.setLength(0);
+      } else detail = "";
+    }
+    throw new NotEnoughReplicasException(detail);
   }
     
   /* Randomly choose <i>numOfReplicas</i> targets from <i>nodes</i>.
@@ -356,6 +384,13 @@ public class BlockPlacementPolicyDefault
       
     int numOfAvailableNodes =
       clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet());
+    StringBuilder builder = null;
+    if (FSNamesystem.LOG.isDebugEnabled()) {
+      builder = threadLocalBuilder.get();
+      builder.setLength(0);
+      builder.append("[");
+    }
+    boolean badTarget = false;
     while(numOfReplicas > 0 && numOfAvailableNodes > 0) {
       DatanodeDescriptor chosenNode = 
         (DatanodeDescriptor)(clusterMap.chooseRandom(nodes));
@@ -366,13 +401,21 @@ public class BlockPlacementPolicyDefault
         if (isGoodTarget(chosenNode, blocksize, maxNodesPerRack, results)) {
           numOfReplicas--;
           results.add(chosenNode);
+        } else {
+          badTarget = true;
         }
       }
     }
       
     if (numOfReplicas>0) {
-      throw new NotEnoughReplicasException(
-                                           "Not able to place enough replicas");
+      String detail = enableDebugLogging;
+      if (FSNamesystem.LOG.isDebugEnabled()) {
+        if (badTarget && builder != null) {
+          detail = builder.append("]").toString();
+          builder.setLength(0);
+        } else detail = "";
+      }
+      throw new NotEnoughReplicasException(detail);
     }
   }
     
@@ -394,8 +437,9 @@ public class BlockPlacementPolicyDefault
     // check if the node is (being) decommissed
     if (node.isDecommissionInProgress() || node.isDecommissioned()) {
       if(FSNamesystem.LOG.isDebugEnabled()) {
-        FSNamesystem.LOG.debug("Node "+NodeBase.getPath(node)+
-            " is not chosen because the node is (being) decommissioned");
+        threadLocalBuilder.get().append(node.toString()).append(": ")
+          .append("Node ").append(NodeBase.getPath(node))
+          .append(" is not chosen because the node is (being) decommissioned ");
       }
       return false;
     }
@@ -405,8 +449,9 @@ public class BlockPlacementPolicyDefault
     // check the remaining capacity of the target machine
     if (blockSize* FSConstants.MIN_BLOCKS_FOR_WRITE>remaining) {
       if(FSNamesystem.LOG.isDebugEnabled()) {
-        FSNamesystem.LOG.debug("Node "+NodeBase.getPath(node)+
-            " is not chosen because the node does not have enough space");
+        threadLocalBuilder.get().append(node.toString()).append(": ")
+          .append("Node ").append(NodeBase.getPath(node))
+          .append(" is not chosen because the node does not have enough space ");
       }
       return false;
     }
@@ -420,8 +465,9 @@ public class BlockPlacementPolicyDefault
       }
       if (node.getXceiverCount() > (2.0 * avgLoad)) {
         if(FSNamesystem.LOG.isDebugEnabled()) {
-          FSNamesystem.LOG.debug("Node "+NodeBase.getPath(node)+
-              " is not chosen because the node is too busy");
+          threadLocalBuilder.get().append(node.toString()).append(": ")
+            .append("Node ").append(NodeBase.getPath(node))
+            .append(" is not chosen because the node is too busy ");
         }
         return false;
       }
@@ -439,8 +485,9 @@ public class BlockPlacementPolicyDefault
     }
     if (counter>maxTargetPerLoc) {
       if(FSNamesystem.LOG.isDebugEnabled()) {
-        FSNamesystem.LOG.debug("Node "+NodeBase.getPath(node)+
-            " is not chosen because the rack has too many chosen nodes");
+        threadLocalBuilder.get().append(node.toString()).append(": ")
+          .append("Node ").append(NodeBase.getPath(node))
+          .append(" is not chosen because the rack has too many chosen nodes ");
       }
       return false;
     }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Wed May 18 23:44:23 2011
@@ -187,7 +187,7 @@ class FSDirectory implements Closeable {
 
   private void incrDeletedFileCount(int count) {
     if (getFSNamesystem() != null)
-      NameNode.getNameNodeMetrics().numFilesDeleted.inc(count);
+      NameNode.getNameNodeMetrics().incrFilesDeleted(count);
   }
     
   /**
@@ -1486,7 +1486,7 @@ class FSDirectory implements Closeable {
         // Directory creation also count towards FilesCreated
         // to match count of FilesDeleted metric.
         if (getFSNamesystem() != null)
-          NameNode.getNameNodeMetrics().numFilesCreated.inc();
+          NameNode.getNameNodeMetrics().incrFilesCreated();
         fsImage.getEditLog().logMkDir(cur, inodes[i]);
         if(NameNode.stateChangeLog.isDebugEnabled()) {
           NameNode.stateChangeLog.debug(

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Wed May 18 23:44:23 2011
@@ -340,7 +340,7 @@ public class FSEditLog implements NNStor
     numTransactions++;
     totalTimeTransactions += (end-start);
     if (metrics != null) // Metrics is non-null only when used inside name node
-      metrics.transactions.inc((end-start));
+      metrics.addTransaction(end-start);
   }
 
   /**
@@ -444,7 +444,7 @@ public class FSEditLog implements NNStor
         if (mytxid <= synctxid) {
           numTransactionsBatchedInSync++;
           if (metrics != null) // Metrics is non-null only when used inside name node
-            metrics.transactionsBatchedInSync.inc();
+            metrics.incrTransactionsBatchedInSync();
           return;
         }
      
@@ -490,7 +490,7 @@ public class FSEditLog implements NNStor
       disableAndReportErrorOnJournals(badJournals);
   
       if (metrics != null) // Metrics non-null only when used inside name node
-        metrics.syncs.inc(elapsed);
+        metrics.addSync(elapsed);
     } finally {
       // Prevent RuntimeException from blocking other log edit sync 
       synchronized (this) {

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1124466&r1=1124465&r2=1124466&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Wed May 18 23:44:23 2011
@@ -792,10 +792,19 @@ public class FSImage implements NNStorag
     for (Iterator<StorageDirectory> it
            = storage.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
       StorageDirectory sd = it.next();
-      FSImageSaver saver = new FSImageSaver(sd, errorSDs);
-      Thread saveThread = new Thread(saver, saver.toString());
-      saveThreads.add(saveThread);
-      saveThread.start();
+      if (errorSDs.contains(sd)) {
+        continue;
+      }
+      try {
+        FSImageSaver saver = new FSImageSaver(sd, errorSDs);
+        Thread saveThread = new Thread(saver, saver.toString());
+        saveThreads.add(saveThread);
+        saveThread.start();
+      } catch (Exception e) {
+        LOG.error("Failed save to image directory " + sd.getRoot(), e);
+        errorSDs.add(sd);
+        continue;
+      }
     }
     waitForThreads(saveThreads);
     saveThreads.clear();
@@ -813,32 +822,58 @@ public class FSImage implements NNStorag
     for (Iterator<StorageDirectory> it
            = storage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
       StorageDirectory sd = it.next();
+      if (errorSDs.contains(sd)) {
+        continue;
+      }
+
       // if this directory already stores the image and edits, then it was
       // already processed in the earlier loop.
       if (sd.getStorageDirType() == NameNodeDirType.IMAGE_AND_EDITS) {
         continue;
       }
 
-      FSImageSaver saver = new FSImageSaver(sd, errorSDs);
-      Thread saveThread = new Thread(saver, saver.toString());
-      saveThreads.add(saveThread);
-      saveThread.start();
+      try {
+        FSImageSaver saver = new FSImageSaver(sd, errorSDs);
+        Thread saveThread = new Thread(saver, saver.toString());
+        saveThreads.add(saveThread);
+        saveThread.start();
+      } catch (Exception e) {
+        LOG.error("Failed save to edits directory " + sd.getRoot(), e);
+        errorSDs.add(sd);
+        continue;
+      }
     }
     waitForThreads(saveThreads);
 
     // mv lastcheckpoint.tmp -> previous.checkpoint
     for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
       StorageDirectory sd = it.next();
+      if (errorSDs.contains(sd)) {
+        continue;
+      }
       try {
         storage.moveLastCheckpoint(sd);
       } catch(IOException ie) {
         LOG.error("Unable to move last checkpoint for " + sd.getRoot(), ie);
         errorSDs.add(sd);
+        continue;
       }
     }
-    storage.reportErrorsOnDirectories(errorSDs);
-    if(!editLog.isOpen()) editLog.open();
-    ckptState = CheckpointStates.UPLOAD_DONE;
+    
+    try {
+      storage.reportErrorsOnDirectories(errorSDs);
+      
+      // If there was an error in every storage dir, each one will have been
+      // removed from the list of storage directories.
+      if (storage.getNumStorageDirs(NameNodeDirType.IMAGE) == 0 ||
+          storage.getNumStorageDirs(NameNodeDirType.EDITS) == 0) {
+        throw new IOException("Failed to save any storage directories while saving namespace");
+      }
+      
+      if(!editLog.isOpen()) editLog.open();
+    } finally {
+      ckptState = CheckpointStates.UPLOAD_DONE;
+    }
   }
 
   /**



Mime
View raw message