hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1158072 [1/7] - in /hadoop/common/branches/HDFS-1623/hdfs: ./ ivy/ src/c++/libhdfs/ src/contrib/ src/contrib/fuse-dfs/ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/ser...
Date Tue, 16 Aug 2011 00:37:25 GMT
Author: todd
Date: Tue Aug 16 00:37:15 2011
New Revision: 1158072

URL: http://svn.apache.org/viewvc?rev=1158072&view=rev
Log:
Merge trunk into HDFS-1623 branch.

Added:
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java
      - copied unchanged from r1158071, hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
      - copied unchanged from r1158071, hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/util/CyclicIteration.java
      - copied unchanged from r1158071, hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/util/CyclicIteration.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/util/Holder.java
      - copied unchanged from r1158071, hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/util/Holder.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/util/RwLock.java
      - copied unchanged from r1158071, hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/util/RwLock.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
      - copied unchanged from r1158071, hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
      - copied unchanged from r1158071, hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
      - copied unchanged from r1158071, hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
      - copied unchanged from r1158071, hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/util/TestCyclicIteration.java
      - copied unchanged from r1158071, hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/util/TestCyclicIteration.java
Removed:
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
Modified:
    hadoop/common/branches/HDFS-1623/hdfs/   (props changed)
    hadoop/common/branches/HDFS-1623/hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-1623/hdfs/build.xml
    hadoop/common/branches/HDFS-1623/hdfs/ivy.xml
    hadoop/common/branches/HDFS-1623/hdfs/ivy/ivysettings.xml
    hadoop/common/branches/HDFS-1623/hdfs/ivy/libraries.properties
    hadoop/common/branches/HDFS-1623/hdfs/src/c++/libhdfs/   (props changed)
    hadoop/common/branches/HDFS-1623/hdfs/src/contrib/build-contrib.xml
    hadoop/common/branches/HDFS-1623/hdfs/src/contrib/fuse-dfs/ivy.xml
    hadoop/common/branches/HDFS-1623/hdfs/src/java/   (props changed)
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/HsftpFileSystem.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryServlet.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
    hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/namenode/FileDataServletAspects.aj
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/   (props changed)
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRemove.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestHftpFileSystem.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLease.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationToken.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
    hadoop/common/branches/HDFS-1623/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
    hadoop/common/branches/HDFS-1623/hdfs/src/webapps/datanode/   (props changed)
    hadoop/common/branches/HDFS-1623/hdfs/src/webapps/hdfs/   (props changed)
    hadoop/common/branches/HDFS-1623/hdfs/src/webapps/secondary/   (props changed)

Propchange: hadoop/common/branches/HDFS-1623/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Aug 16 00:37:15 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hdfs:1152502-1153927
+/hadoop/common/trunk/hdfs:1152502-1158071
 /hadoop/core/branches/branch-0.19/hdfs:713112
 /hadoop/hdfs/branches/HDFS-1052:987665-1095512
 /hadoop/hdfs/branches/HDFS-265:796829-820463

Modified: hadoop/common/branches/HDFS-1623/hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/CHANGES.txt?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/CHANGES.txt Tue Aug 16 00:37:15 2011
@@ -629,6 +629,42 @@ Trunk (unreleased changes)
     HDFS-2199. Move blockTokenSecretManager from FSNamesystem to BlockManager.
     (Uma Maheswara Rao G via szetszwo)
 
+    HDFS-2187. Make EditLogInputStream act like an iterator over FSEditLogOps
+    (Ivan Kelly and todd via todd)
+
+    HDFS-2225. Refactor edit log file management so it's not in classes
+    which should be generic to the type of edit log storage. (Ivan Kelly
+    via todd)
+
+    HDFS-2108. Move datanode heartbeat handling from namenode package to
+    blockmanagement package.  (szetszwo)
+
+    HDFS-2226. Clean up counting of operations in FSEditLogLoader (todd)
+
+    HDFS-2228. Move block and datanode code from FSNamesystem to
+    BlockManager and DatanodeManager.  (szetszwo)
+
+    HDFS-2238. In NamenodeFsck.toString(), uses StringBuilder.(..) instead of
+    string concatenation.  (Uma Maheswara Rao G via szetszwo)
+   
+    HDFS-2230. ivy to resolve/retrieve latest common-tests jar published by 
+    hadoop common maven build. (gkesavan)
+
+    HDFS-2227. getRemoteEditLogManifest should pull its information from
+    FileJournalManager during checkpoint process (Ivan Kelly and Todd Lipcon
+    via todd)
+
+    HDFS-2239. Reduce access levels of the fields and methods in FSNamesystem.
+    (szetszwo)
+
+    HDFS-2241. Remove implementing FSConstants interface to just get the
+    constants from the interface. (suresh)
+
+    HDFS-2237. Change UnderReplicatedBlocks from public to package private.
+    (szetszwo)
+
+    HDFS-2233. Add WebUI tests with URI reserved chars. (eli)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
@@ -917,6 +953,22 @@ Trunk (unreleased changes)
     HDFS-2196. Make ant build system work with hadoop-common JAR generated
     by Maven. (Alejandro Abdelnur via tomwhite)
 
+    HDFS-2245. Fix a NullPointerException in BlockManager.chooseTarget(..).
+    (szetszwo)
+
+    HDFS-2229. Fix a deadlock in namenode by enforcing lock acquisition
+    ordering.  (szetszwo)
+
+    HDFS-2235. Encode servlet paths. (eli)
+
+    HDFS-2186. DN volume failures on startup are not counted. (eli)
+
+    HDFS-2240. Fix a deadlock in LeaseRenewer by enforcing lock acquisition
+    ordering.  (szetszwo)
+
+    HDFS-73. DFSOutputStream does not close all the sockets.
+    (Uma Maheswara Rao G via eli)
+
   BREAKDOWN OF HDFS-1073 SUBTASKS
 
     HDFS-1521. Persist transaction ID on disk between NN restarts.

Modified: hadoop/common/branches/HDFS-1623/hdfs/build.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/build.xml?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/build.xml (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/build.xml Tue Aug 16 00:37:15 2011
@@ -180,7 +180,7 @@
   <property name="hadoop-hdfs-test.pom" location="${ivy.dir}/hadoop-hdfs-test.xml"/>
 
   <!--this is the naming policy for artifacts we want pulled down-->
-  <property name="ivy.artifact.retrieve.pattern" value="${ant.project.name}/[conf]/[artifact]-[revision].[ext]"/>
+  <property name="ivy.artifact.retrieve.pattern" value="${ant.project.name}/[conf]/[artifact]-[revision](-[classifier]).[ext]"/>
 
   <!--this is how artifacts that get built are named-->
   <property name="ivy.publish.pattern" value="hadoop-hdfs-[revision].[ext]"/>
@@ -247,8 +247,8 @@
     <pathelement location="${build.tools}"/>
     <pathelement path="${clover.jar}"/>
     <path refid="ivy-test.classpath"/>
-    <fileset dir="${lib.dir}">
-      <include name="hadoop-common-test-${hadoop-common.version}.jar" />
+    <fileset dir="${test.ivy.lib.dir}">
+      <include name="hadoop-common-${hadoop-common.version}-tests.jar" />
       <exclude name="**/excluded/" />
     </fileset>
     <pathelement location="${build.classes}"/>

Modified: hadoop/common/branches/HDFS-1623/hdfs/ivy.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/ivy.xml?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/ivy.xml (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/ivy.xml Tue Aug 16 00:37:15 2011
@@ -15,7 +15,7 @@
    limitations under the License.
 -->
 
-<ivy-module version="1.0">
+<ivy-module version="1.0" xmlns:m="http://ant.apache.org/ivy/maven">
   <info organisation="org.apache.hadoop" module="${ant.project.name}" revision="${version}">
     <license name="Apache 2.0"/>
     <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
@@ -87,7 +87,9 @@
 
     <dependency org="org.slf4j" name="slf4j-api" rev="${slf4j-api.version}" conf="test->master"/>
     <dependency org="org.slf4j" name="slf4j-log4j12" rev="${slf4j-log4j12.version}" conf="test->master"/>
-    <dependency org="org.apache.hadoop" name="hadoop-common-test" rev="${hadoop-common.version}" conf="test->master"/>
+    <dependency org="org.apache.hadoop" name="hadoop-common" rev="${hadoop-common.version}" conf="test->master">
+      <artifact name="hadoop-common" type="tests" ext="jar" m:classifier="tests"/>
+    </dependency> 
 
     <dependency org="checkstyle" name="checkstyle" rev="${checkstyle.version}" conf="checkstyle->default"/>
 

Modified: hadoop/common/branches/HDFS-1623/hdfs/ivy/ivysettings.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/ivy/ivysettings.xml?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/ivy/ivysettings.xml (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/ivy/ivysettings.xml Tue Aug 16 00:37:15 2011
@@ -31,7 +31,8 @@
   -->
   <property name="repo.maven.org" value="http://repo1.maven.org/maven2/" override="false"/>
   <property name="snapshot.apache.org" value="https://repository.apache.org/content/repositories/snapshots/" override="false"/>
-  <property name="maven2.pattern" value="[organisation]/[module]/[revision]/[module]-[revision]"/>
+  <property name="maven2.pattern" value="[organisation]/[module]/[revision]/[module]-[revision](-[classifier])"/>
+
   <property name="repo.dir" value="${user.home}/.m2/repository"/>
   <property name="maven2.pattern.ext"  value="${maven2.pattern}.[ext]"/>
   <property name="resolvers" value="default" override="false"/>
@@ -41,10 +42,11 @@
   <resolvers>
     <ibiblio name="maven2" root="${repo.maven.org}" pattern="${maven2.pattern.ext}" m2compatible="true"/>
     <ibiblio name="apache-snapshot" root="${snapshot.apache.org}" m2compatible="true"
-        checkmodified="true" changingPattern=".*SNAPSHOT"/>
+        checkmodified="true" changingPattern=".*SNAPSHOT" />
+      
 
     <filesystem name="fs" m2compatible="true" force="${force-resolve}">
-       <artifact pattern="${repo.dir}/[organisation]/[module]/[revision]/[module]-[revision].[ext]"/>
+       <artifact pattern="${repo.dir}/${maven2.pattern.ext}"/>
        <ivy pattern="${repo.dir}/[organisation]/[module]/[revision]/[module]-[revision].pom"/>
     </filesystem>
 

Modified: hadoop/common/branches/HDFS-1623/hdfs/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/ivy/libraries.properties?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/ivy/libraries.properties (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/ivy/libraries.properties Tue Aug 16 00:37:15 2011
@@ -41,7 +41,7 @@ hadoop-hdfs.version=0.23.0-SNAPSHOT
 
 hsqldb.version=1.8.0.10
 
-ivy.version=2.1.0
+ivy.version=2.2.0-rc1
 
 jasper.version=5.5.12
 jdeb.version=0.8

Propchange: hadoop/common/branches/HDFS-1623/hdfs/src/c++/libhdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Aug 16 00:37:15 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hdfs/src/c++/libhdfs:1152502-1153927
+/hadoop/common/trunk/hdfs/src/c++/libhdfs:1152502-1158071
 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
 /hadoop/core/trunk/src/c++/libhdfs:776175-784663
 /hadoop/hdfs/branches/HDFS-1052/src/c++/libhdfs:987665-1095512

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/contrib/build-contrib.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/contrib/build-contrib.xml?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/contrib/build-contrib.xml (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/contrib/build-contrib.xml Tue Aug 16 00:37:15 2011
@@ -82,7 +82,7 @@
 
   <!--this is the naming policy for artifacts we want pulled down-->
   <property name="ivy.artifact.retrieve.pattern"
-    			value="${ant.project.name}/[conf]/[artifact]-[revision].[ext]"/>
+    			value="${ant.project.name}/[conf]/[artifact]-[revision](-[classifier]).[ext]"/>
 
   <!-- the normal classpath -->
   <path id="contrib-classpath">

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/contrib/fuse-dfs/ivy.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/contrib/fuse-dfs/ivy.xml?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/contrib/fuse-dfs/ivy.xml (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/contrib/fuse-dfs/ivy.xml Tue Aug 16 00:37:15 2011
@@ -15,7 +15,7 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<ivy-module version="1.0">
+<ivy-module version="1.0" xmlns:m="http://ant.apache.org/ivy/maven">
   <info organisation="org.apache.hadoop" module="${ant.project.name}">
     <license name="Apache 2.0"/>
     <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
@@ -45,13 +45,20 @@
       rev="${hadoop-common.version}"
       conf="common->default"/>
     <dependency org="org.apache.hadoop"
-      name="hadoop-common-test"
+      name="hadoop-common"
       rev="${hadoop-common.version}"
-      conf="common->default"/>
+      conf="common->default">
+      <artifact name="hadoop-common" type="tests" ext="jar" m:classifier="tests"/>
+    </dependency>
     <dependency org="log4j"
       name="log4j"
       rev="${log4j.version}"
-      conf="common->master"/>
+      conf="common->master">
+      <exclude org="com.sun.jdmk"/>
+      <exclude org="com.sun.jmx"/>
+      <exclude org="javax.jms"/> 
+    </dependency>
+    
     <dependency org="commons-logging"
       name="commons-logging"
       rev="${commons-logging.version}"

Propchange: hadoop/common/branches/HDFS-1623/hdfs/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Aug 16 00:37:15 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hdfs/src/java:1152502-1153927
+/hadoop/common/trunk/hdfs/src/java:1152502-1158071
 /hadoop/core/branches/branch-0.19/hdfs/src/java:713112
 /hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
 /hadoop/hdfs/branches/HDFS-1052/src/java:987665-1095512

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/DFSClient.java Tue Aug 16 00:37:15 2011
@@ -61,6 +61,9 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -106,7 +109,7 @@ import org.apache.hadoop.util.Progressab
  *
  ********************************************************/
 @InterfaceAudience.Private
-public class DFSClient implements FSConstants, java.io.Closeable {
+public class DFSClient implements java.io.Closeable {
   public static final Log LOG = LogFactory.getLog(DFSClient.class);
   public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
   static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
@@ -165,7 +168,7 @@ public class DFSClient implements FSCons
       writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
           DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
       defaultBlockSize = conf.getLong(DFS_BLOCK_SIZE_KEY,
-          DEFAULT_BLOCK_SIZE);
+          DFS_BLOCK_SIZE_DEFAULT);
       defaultReplication = (short) conf.getInt(
           DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
       taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
@@ -1043,7 +1046,7 @@ public class DFSClient implements FSCons
 
           out = new DataOutputStream(
               new BufferedOutputStream(NetUtils.getOutputStream(sock), 
-                                       DataNode.SMALL_BUFFER_SIZE));
+                                       FSConstants.SMALL_BUFFER_SIZE));
           in = new DataInputStream(NetUtils.getInputStream(sock));
 
           if (LOG.isDebugEnabled()) {

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Tue Aug 16 00:37:15 2011
@@ -234,7 +234,7 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_DF_INTERVAL_KEY = "dfs.df.interval";
   public static final int     DFS_DF_INTERVAL_DEFAULT = 60000;
   public static final String  DFS_BLOCKREPORT_INTERVAL_MSEC_KEY = "dfs.blockreport.intervalMsec";
-  public static final long    DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT = 21600000;
+  public static final long    DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT = 60 * 60 * 1000;
   public static final String  DFS_BLOCKREPORT_INITIAL_DELAY_KEY = "dfs.blockreport.initialDelay";
   public static final int     DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT = 0;
   public static final String  DFS_BLOCK_INVALIDATE_LIMIT_KEY = "dfs.block.invalidate.limit";

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java Tue Aug 16 00:37:15 2011
@@ -36,7 +36,6 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSOutputSummer;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -48,6 +47,7 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -62,7 +62,6 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.io.EnumSetWritable;
@@ -167,7 +166,7 @@ class DFSOutputStream extends FSOutputSu
       this.seqno = HEART_BEAT_SEQNO;
       
       buffer = null;
-      int packetSize = PacketHeader.PKT_HEADER_LEN + DFSClient.SIZE_OF_INTEGER; // TODO(todd) strange
+      int packetSize = PacketHeader.PKT_HEADER_LEN + FSConstants.BYTES_IN_INTEGER;
       buf = new byte[packetSize];
       
       checksumStart = dataStart = packetSize;
@@ -235,12 +234,12 @@ class DFSOutputStream extends FSOutputSu
                          dataStart - checksumLen , checksumLen); 
       }
       
-      int pktLen = DFSClient.SIZE_OF_INTEGER + dataLen + checksumLen;
+      int pktLen = FSConstants.BYTES_IN_INTEGER + dataLen + checksumLen;
       
       //normally dataStart == checksumPos, i.e., offset is zero.
       buffer = ByteBuffer.wrap(
         buf, dataStart - checksumPos,
-        PacketHeader.PKT_HEADER_LEN + pktLen - DFSClient.SIZE_OF_INTEGER);
+        PacketHeader.PKT_HEADER_LEN + pktLen - FSConstants.BYTES_IN_INTEGER);
       buf = null;
       buffer.mark();
 
@@ -605,6 +604,7 @@ class DFSOutputStream extends FSOutputSu
         try {
           blockStream.close();
         } catch (IOException e) {
+          setLastException(e);
         } finally {
           blockStream = null;
         }
@@ -613,10 +613,20 @@ class DFSOutputStream extends FSOutputSu
         try {
           blockReplyStream.close();
         } catch (IOException e) {
+          setLastException(e);
         } finally {
           blockReplyStream = null;
         }
       }
+      if (null != s) {
+        try {
+          s.close();
+        } catch (IOException e) {
+          setLastException(e);
+        } finally {
+          s = null;
+        }
+      }
     }
 
     //
@@ -839,7 +849,7 @@ class DFSOutputStream extends FSOutputSu
         final long writeTimeout = dfsClient.getDatanodeWriteTimeout(2);
         out = new DataOutputStream(new BufferedOutputStream(
             NetUtils.getOutputStream(sock, writeTimeout),
-            DataNode.SMALL_BUFFER_SIZE));
+            FSConstants.SMALL_BUFFER_SIZE));
 
         //send the TRANSFER_BLOCK request
         new Sender(out).transferBlock(block, blockToken, dfsClient.clientName,
@@ -1002,16 +1012,20 @@ class DFSOutputStream extends FSOutputSu
       persistBlocks.set(true);
 
       boolean result = false;
+      DataOutputStream out = null;
       try {
+        assert null == s : "Previous socket unclosed";
         s = createSocketForPipeline(nodes[0], nodes.length, dfsClient);
         long writeTimeout = dfsClient.getDatanodeWriteTimeout(nodes.length);
 
         //
         // Xmit header info to datanode
         //
-        DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
+        out = new DataOutputStream(new BufferedOutputStream(
             NetUtils.getOutputStream(s, writeTimeout),
-            DataNode.SMALL_BUFFER_SIZE));
+            FSConstants.SMALL_BUFFER_SIZE));
+        
+        assert null == blockReplyStream : "Previous blockReplyStream unclosed";
         blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));
 
         // send the request
@@ -1037,7 +1051,7 @@ class DFSOutputStream extends FSOutputSu
                 + firstBadLink);
           }
         }
-
+        assert null == blockStream : "Previous blockStream unclosed";
         blockStream = out;
         result =  true; // success
 
@@ -1058,12 +1072,15 @@ class DFSOutputStream extends FSOutputSu
         }
         hasError = true;
         setLastException(ie);
-        blockReplyStream = null;
         result =  false;  // error
       } finally {
         if (!result) {
           IOUtils.closeSocket(s);
           s = null;
+          IOUtils.closeStream(out);
+          out = null;
+          IOUtils.closeStream(blockReplyStream);
+          blockReplyStream = null;
         }
       }
       return result;
@@ -1156,7 +1173,7 @@ class DFSOutputStream extends FSOutputSu
     final int timeout = client.getDatanodeReadTimeout(length);
     NetUtils.connect(sock, isa, timeout);
     sock.setSoTimeout(timeout);
-    sock.setSendBufferSize(DFSClient.DEFAULT_DATA_SOCKET_SIZE);
+    sock.setSendBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE);
     if(DFSClient.LOG.isDebugEnabled()) {
       DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize());
     }

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java Tue Aug 16 00:37:15 2011
@@ -61,6 +61,7 @@ import org.apache.hadoop.security.UserGr
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.ServletUtil;
 import org.xml.sax.Attributes;
 import org.xml.sax.InputSource;
 import org.xml.sax.SAXException;
@@ -244,46 +245,31 @@ public class HftpFileSystem extends File
   /**
    * Return a URL pointing to given path on the namenode.
    *
-   * @param p path to obtain the URL for
-   * @return namenode URL referring to the given path
-   * @throws IOException on error constructing the URL
-   */
-  URL getNamenodeFileURL(Path p) throws IOException {
-    return getNamenodeURL("/data" + p.toUri().getPath(),
-                          "ugi=" + getUgiParameter());
-  }
-
-  /**
-   * Return a URL pointing to given path on the namenode.
-   *
    * @param path to obtain the URL for
    * @param query string to append to the path
    * @return namenode URL referring to the given path
    * @throws IOException on error constructing the URL
    */
   URL getNamenodeURL(String path, String query) throws IOException {
-    try {
-      final URL url = new URI("http", null, nnAddr.getHostName(),
-          nnAddr.getPort(), path, query, null).toURL();
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("url=" + url);
-      }
-      return url;
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
+    final URL url = new URL("http", nnAddr.getHostName(),
+          nnAddr.getPort(), path + '?' + query);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("url=" + url);
     }
+    return url;
   }
 
   /**
-   * ugi parameter for http connection
+   * Get encoded UGI parameter string for a URL.
    * 
    * @return user_shortname,group1,group2...
    */
-  private String getUgiParameter() {
-    StringBuilder ugiParamenter = new StringBuilder(ugi.getShortUserName());
+  private String getEncodedUgiParameter() {
+    StringBuilder ugiParamenter = new StringBuilder(
+        ServletUtil.encodeQueryValue(ugi.getShortUserName()));
     for(String g: ugi.getGroupNames()) {
       ugiParamenter.append(",");
-      ugiParamenter.append(g);
+      ugiParamenter.append(ServletUtil.encodeQueryValue(g));
     }
     return ugiParamenter.toString();
   }
@@ -304,7 +290,7 @@ public class HftpFileSystem extends File
    */
   protected HttpURLConnection openConnection(String path, String query)
       throws IOException {
-    query = updateQuery(query);
+    query = addDelegationTokenParam(query);
     final URL url = getNamenodeURL(path, query);
     final HttpURLConnection connection = (HttpURLConnection)url.openConnection();
     try {
@@ -316,14 +302,14 @@ public class HftpFileSystem extends File
     return connection;
   }
 
-  protected String updateQuery(String query) throws IOException {
+  protected String addDelegationTokenParam(String query) throws IOException {
     String tokenString = null;
     if (UserGroupInformation.isSecurityEnabled()) {
       synchronized (this) {
         if (delegationToken != null) {
           tokenString = delegationToken.encodeToUrlString();
           return (query + JspHelper.getDelegationTokenUrlParam(tokenString));
-        } // else we are talking to an insecure cluster
+        }
       }
     }
     return query;
@@ -331,9 +317,9 @@ public class HftpFileSystem extends File
 
   @Override
   public FSDataInputStream open(Path f, int buffersize) throws IOException {
-    String query = "ugi=" + getUgiParameter();
-    query = updateQuery(query);
-    URL u = getNamenodeURL("/data" + f.toUri().getPath(), query);
+    String path = "/data" + ServletUtil.encodePath(f.toUri().getPath());
+    String query = addDelegationTokenParam("ugi=" + getEncodedUgiParameter());
+    URL u = getNamenodeURL(path, query);    
     return new FSDataInputStream(new ByteRangeInputStream(u));
   }
 
@@ -382,9 +368,9 @@ public class HftpFileSystem extends File
       try {
         XMLReader xr = XMLReaderFactory.createXMLReader();
         xr.setContentHandler(this);
-        HttpURLConnection connection = openConnection("/listPaths" + path,
-            "ugi=" + getUgiParameter() + (recur? "&recursive=yes" : ""));
-
+        HttpURLConnection connection = openConnection(
+            "/listPaths" + ServletUtil.encodePath(path),
+            "ugi=" + getEncodedUgiParameter() + (recur ? "&recursive=yes" : ""));
         InputStream resp = connection.getInputStream();
         xr.parse(new InputSource(resp));
       } catch(SAXException e) {
@@ -447,7 +433,8 @@ public class HftpFileSystem extends File
 
     private FileChecksum getFileChecksum(String f) throws IOException {
       final HttpURLConnection connection = openConnection(
-          "/fileChecksum" + f, "ugi=" + getUgiParameter());
+          "/fileChecksum" + ServletUtil.encodePath(f), 
+          "ugi=" + getEncodedUgiParameter());
       try {
         final XMLReader xr = XMLReaderFactory.createXMLReader();
         xr.setContentHandler(this);
@@ -534,7 +521,8 @@ public class HftpFileSystem extends File
      */
     private ContentSummary getContentSummary(String path) throws IOException {
       final HttpURLConnection connection = openConnection(
-          "/contentSummary" + path, "ugi=" + getUgiParameter());
+          "/contentSummary" + ServletUtil.encodePath(path), 
+          "ugi=" + getEncodedUgiParameter());
       InputStream in = null;
       try {
         in = connection.getInputStream();        

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/HsftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/HsftpFileSystem.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/HsftpFileSystem.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/HsftpFileSystem.java Tue Aug 16 00:37:15 2011
@@ -123,42 +123,42 @@ public class HsftpFileSystem extends Hft
   @Override
   protected HttpURLConnection openConnection(String path, String query)
       throws IOException {
+    query = addDelegationTokenParam(query);
+    final URL url = new URL("https", nnAddr.getHostName(), 
+        nnAddr.getPort(), path + '?' + query);
+    HttpsURLConnection conn = (HttpsURLConnection)url.openConnection();
+    // bypass hostname verification
     try {
-      query = updateQuery(query);
-      final URL url = new URI("https", null, nnAddr.getHostName(), nnAddr
-          .getPort(), path, query, null).toURL();
-      HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
-      // bypass hostname verification
       conn.setHostnameVerifier(new DummyHostnameVerifier());
       conn.setRequestMethod("GET");
       conn.connect();
+    } catch (IOException ioe) {
+      throwIOExceptionFromConnection(conn, ioe);
+    }
 
-      // check cert expiration date
-      final int warnDays = ExpWarnDays;
-      if (warnDays > 0) { // make sure only check once
-        ExpWarnDays = 0;
-        long expTimeThreshold = warnDays * MM_SECONDS_PER_DAY
-            + System.currentTimeMillis();
-        X509Certificate[] clientCerts = (X509Certificate[]) conn
-            .getLocalCertificates();
-        if (clientCerts != null) {
-          for (X509Certificate cert : clientCerts) {
-            long expTime = cert.getNotAfter().getTime();
-            if (expTime < expTimeThreshold) {
-              StringBuilder sb = new StringBuilder();
-              sb.append("\n Client certificate "
-                  + cert.getSubjectX500Principal().getName());
-              int dayOffSet = (int) ((expTime - System.currentTimeMillis()) / MM_SECONDS_PER_DAY);
-              sb.append(" have " + dayOffSet + " days to expire");
-              LOG.warn(sb.toString());
-            }
+    // check cert expiration date
+    final int warnDays = ExpWarnDays;
+    if (warnDays > 0) { // make sure only check once
+      ExpWarnDays = 0;
+      long expTimeThreshold = warnDays * MM_SECONDS_PER_DAY
+          + System.currentTimeMillis();
+      X509Certificate[] clientCerts = (X509Certificate[]) conn
+          .getLocalCertificates();
+      if (clientCerts != null) {
+        for (X509Certificate cert : clientCerts) {
+          long expTime = cert.getNotAfter().getTime();
+          if (expTime < expTimeThreshold) {
+            StringBuilder sb = new StringBuilder();
+            sb.append("\n Client certificate "
+                + cert.getSubjectX500Principal().getName());
+            int dayOffSet = (int) ((expTime - System.currentTimeMillis()) / MM_SECONDS_PER_DAY);
+            sb.append(" have " + dayOffSet + " days to expire");
+            LOG.warn(sb.toString());
           }
         }
       }
-      return (HttpURLConnection) conn;
-    } catch (URISyntaxException e) {
-      throw (IOException) new IOException().initCause(e);
     }
+    return (HttpURLConnection) conn;
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java Tue Aug 16 00:37:15 2011
@@ -75,7 +75,9 @@ class LeaseRenewer {
   /** Get a {@link LeaseRenewer} instance */
   static LeaseRenewer getInstance(final String authority,
       final UserGroupInformation ugi, final DFSClient dfsc) throws IOException {
-    return Factory.INSTANCE.get(authority, ugi, dfsc);
+    final LeaseRenewer r = Factory.INSTANCE.get(authority, ugi);
+    r.addClient(dfsc);
+    return r;
   }
 
   /** 
@@ -132,14 +134,13 @@ class LeaseRenewer {
 
     /** Get a renewer. */
     private synchronized LeaseRenewer get(final String authority,
-        final UserGroupInformation ugi, final DFSClient dfsc) {
+        final UserGroupInformation ugi) {
       final Key k = new Key(authority, ugi);
       LeaseRenewer r = renewers.get(k);
       if (r == null) {
         r = new LeaseRenewer(k);
         renewers.put(k, r);
       }
-      r.addClient(dfsc);
       return r;
     }
 
@@ -196,7 +197,7 @@ class LeaseRenewer {
 
   private LeaseRenewer(Factory.Key factorykey) {
     this.factorykey = factorykey;
-    setGraceSleepPeriod(LEASE_RENEWER_GRACE_DEFAULT);
+    unsyncSetGraceSleepPeriod(LEASE_RENEWER_GRACE_DEFAULT);
     
     if (LOG.isTraceEnabled()) {
       instantiationTrace = StringUtils.stringifyException(
@@ -251,6 +252,10 @@ class LeaseRenewer {
 
   /** Set the grace period and adjust the sleep period accordingly. */
   synchronized void setGraceSleepPeriod(final long gracePeriod) {
+    unsyncSetGraceSleepPeriod(gracePeriod);
+  }
+
+  private void unsyncSetGraceSleepPeriod(final long gracePeriod) {
     if (gracePeriod < 100L) {
       throw new HadoopIllegalArgumentException(gracePeriod
           + " = gracePeriod < 100ms is too small.");

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Tue Aug 16 00:37:15 2011
@@ -565,7 +565,6 @@ public interface ClientProtocol extends 
    * <li> [3] contains number of under replicated blocks in the system.</li>
    * <li> [4] contains number of blocks with a corrupt replica. </li>
    * <li> [5] contains number of blocks without any good replicas left. </li>
-   * <li> [5] contains number of blocks without any good replicas left. </li>
    * <li> [6] contains the total used space of the block pool. </li>
    * </ul>
    * Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of 

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java Tue Aug 16 00:37:15 2011
@@ -18,68 +18,71 @@
 package org.apache.hadoop.hdfs.protocol;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 
 /************************************
  * Some handy constants
- *
+ * 
  ************************************/
 @InterfaceAudience.Private
-public interface FSConstants {
+public final class FSConstants {
+  /* Hidden constructor */
+  private FSConstants() {
+  }
+
   public static int MIN_BLOCKS_FOR_WRITE = 5;
 
   // Long that indicates "leave current quota unchanged"
   public static final long QUOTA_DONT_SET = Long.MAX_VALUE;
   public static final long QUOTA_RESET = -1L;
-  
+
   //
   // Timeouts, constants
   //
-  public static long HEARTBEAT_INTERVAL = 3;
-  public static long BLOCKREPORT_INTERVAL = 60 * 60 * 1000;
-  public static long BLOCKREPORT_INITIAL_DELAY = 0;
   public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
   public static final long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
-  public static final long LEASE_RECOVER_PERIOD = 10 * 1000; //in ms
-  
-  // We need to limit the length and depth of a path in the filesystem.  HADOOP-438
-  // Currently we set the maximum length to 8k characters and the maximum depth to 1k.  
+  public static final long LEASE_RECOVER_PERIOD = 10 * 1000; // in ms
+
+  // We need to limit the length and depth of a path in the filesystem.
+  // HADOOP-438
+  // Currently we set the maximum length to 8k characters and the maximum depth
+  // to 1k.
   public static int MAX_PATH_LENGTH = 8000;
   public static int MAX_PATH_DEPTH = 1000;
-    
-  public static final int BUFFER_SIZE = new HdfsConfiguration().getInt("io.file.buffer.size", 4096);
-  //Used for writing header etc.
-  public static final int SMALL_BUFFER_SIZE = Math.min(BUFFER_SIZE/2, 512);
-  //TODO mb@media-style.com: should be conf injected?
-  public static final long DEFAULT_BLOCK_SIZE = 64 * 1024 * 1024;
-  public static final int DEFAULT_BYTES_PER_CHECKSUM = 512;
-  public static final int DEFAULT_WRITE_PACKET_SIZE = 64 * 1024;
-  public static final short DEFAULT_REPLICATION_FACTOR = 3;
-  public static final int DEFAULT_FILE_BUFFER_SIZE = 4096;
+
+  // TODO mb@media-style.com: should be conf injected?
   public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
+  public static final int IO_FILE_BUFFER_SIZE = new HdfsConfiguration().getInt(
+      DFSConfigKeys.IO_FILE_BUFFER_SIZE_KEY,
+      DFSConfigKeys.IO_FILE_BUFFER_SIZE_DEFAULT);
+  // Used for writing header etc.
+  public static final int SMALL_BUFFER_SIZE = Math.min(IO_FILE_BUFFER_SIZE / 2,
+      512);
 
-  public static final int SIZE_OF_INTEGER = Integer.SIZE / Byte.SIZE;
+  public static final int BYTES_IN_INTEGER = Integer.SIZE / Byte.SIZE;
 
   // SafeMode actions
-  public enum SafeModeAction{ SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET; }
+  public enum SafeModeAction {
+    SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET;
+  }
 
   // type of the datanode report
-  public static enum DatanodeReportType {ALL, LIVE, DEAD }
-  
+  public static enum DatanodeReportType {
+    ALL, LIVE, DEAD
+  }
+
   // An invalid transaction ID that will never be seen in a real namesystem.
   public static final long INVALID_TXID = -12345;
 
   /**
    * Distributed upgrade actions:
    * 
-   * 1. Get upgrade status.
-   * 2. Get detailed upgrade status.
-   * 3. Proceed with the upgrade if it is stuck, no matter what the status is.
+   * 1. Get upgrade status. 2. Get detailed upgrade status. 3. Proceed with the
+   * upgrade if it is stuck, no matter what the status is.
    */
   public static enum UpgradeAction {
-    GET_STATUS,
-    DETAILED_STATUS,
-    FORCE_PROCEED;
+    GET_STATUS, DETAILED_STATUS, FORCE_PROCEED;
   }
 
   /**
@@ -90,6 +93,6 @@ public interface FSConstants {
   /**
    * Please see {@link LayoutVersion} on adding new layout version.
    */
-  public static final int LAYOUT_VERSION = 
-    LayoutVersion.getCurrentLayoutVersion();
+  public static final int LAYOUT_VERSION = LayoutVersion
+      .getCurrentLayoutVersion();
 }

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java Tue Aug 16 00:37:15 2011
@@ -309,10 +309,10 @@ public class Balancer {
             target.datanode.getName()), HdfsConstants.READ_TIMEOUT);
         sock.setKeepAlive(true);
         out = new DataOutputStream( new BufferedOutputStream(
-            sock.getOutputStream(), FSConstants.BUFFER_SIZE));
+            sock.getOutputStream(), FSConstants.IO_FILE_BUFFER_SIZE));
         sendRequest(out);
         in = new DataInputStream( new BufferedInputStream(
-            sock.getInputStream(), FSConstants.BUFFER_SIZE));
+            sock.getInputStream(), FSConstants.IO_FILE_BUFFER_SIZE));
         receiveResponse(in);
         bytesMoved.inc(block.getNumBytes());
         LOG.info( "Moving block " + block.getBlock().getBlockId() +

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java Tue Aug 16 00:37:15 2011
@@ -259,26 +259,6 @@ public class BlockInfo extends Block imp
     return head;
   }
 
-  boolean listIsConsistent(DatanodeDescriptor dn) {
-    // going forward
-    int count = 0;
-    BlockInfo next, nextPrev;
-    BlockInfo cur = this;
-    while(cur != null) {
-      next = cur.getNext(cur.findDatanode(dn));
-      if(next != null) {
-        nextPrev = next.getPrevious(next.findDatanode(dn));
-        if(cur != nextPrev) {
-          System.out.println("Inconsistent list: cur->next->prev != cur");
-          return false;
-        }
-      }
-      cur = next;
-      count++;
-    }
-    return true;
-  }
-
   /**
    * BlockInfo represents a block that is not being constructed.
    * In order to start modifying the block, the BlockInfo should be converted



Mime
View raw message