hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1099687 [1/15] - in /hadoop/hdfs/branches/HDFS-1073: ./ bin/ src/c++/libhdfs/ src/contrib/hdfsproxy/ src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/pro...
Date Thu, 05 May 2011 05:40:13 GMT
Author: todd
Date: Thu May  5 05:40:07 2011
New Revision: 1099687

URL: http://svn.apache.org/viewvc?rev=1099687&view=rev
Log:
Merge trunk (including federation) into HDFS-1073 branch

Added:
    hadoop/hdfs/branches/HDFS-1073/bin/distribute-exclude.sh
      - copied unchanged from r1099686, hadoop/hdfs/trunk/bin/distribute-exclude.sh
    hadoop/hdfs/branches/HDFS-1073/bin/refresh-namenodes.sh
      - copied unchanged from r1099686, hadoop/hdfs/trunk/bin/refresh-namenodes.sh
    hadoop/hdfs/branches/HDFS-1073/pom.xml
      - copied unchanged from r1099686, hadoop/hdfs/trunk/pom.xml
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/GetConf.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/tools/GetConf.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/util/DaemonFactory.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/util/DaemonFactory.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestStorageInfo.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestStorageInfo.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetConf.java
      - copied unchanged from r1099686, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetConf.java
Removed:
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataXceiver.java
Modified:
    hadoop/hdfs/branches/HDFS-1073/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/CHANGES.txt
    hadoop/hdfs/branches/HDFS-1073/bin/hdfs
    hadoop/hdfs/branches/HDFS-1073/bin/start-dfs.sh
    hadoop/hdfs/branches/HDFS-1073/bin/stop-dfs.sh
    hadoop/hdfs/branches/HDFS-1073/build.xml   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java
    hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java
    hadoop/hdfs/branches/HDFS-1073/src/java/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/BlockReader.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/Block.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicasMap.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/DFSck.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
    hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/fs/TestFiRename.java
    hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/hdfs/TestFiHftp.java
    hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj
    hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataTransferProtocolAspects.aj
    hadoop/hdfs/branches/HDFS-1073/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsPermission.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsSymlink.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockMissingException.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSFinalize.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUtil.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeConfig.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestHftpFileSystem.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestListFilesInFileContext.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestListPathServlet.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java
    hadoop/hdfs/branches/HDFS-1073/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
    hadoop/hdfs/branches/HDFS-1073/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
    hadoop/hdfs/branches/HDFS-1073/src/webapps/datanode/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/webapps/hdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1073/src/webapps/hdfs/dfsnodelist.jsp
    hadoop/hdfs/branches/HDFS-1073/src/webapps/hdfs/nn_browsedfscontent.jsp
    hadoop/hdfs/branches/HDFS-1073/src/webapps/secondary/   (props changed)

Propchange: hadoop/hdfs/branches/HDFS-1073/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu May  5 05:40:07 2011
@@ -1,4 +1,5 @@
 /hadoop/core/branches/branch-0.19/hdfs:713112
+/hadoop/hdfs/branches/HDFS-1052:987665-1095512
 /hadoop/hdfs/branches/HDFS-265:796829-820463
 /hadoop/hdfs/branches/branch-0.21:820487
-/hadoop/hdfs/trunk:1086482-1097628
+/hadoop/hdfs/trunk:1086482-1099686

Modified: hadoop/hdfs/branches/HDFS-1073/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/CHANGES.txt?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-1073/CHANGES.txt Thu May  5 05:40:07 2011
@@ -11,20 +11,253 @@ Trunk (unreleased changes)
 
   NEW FEATURES
 
+    HDFS-1365. Federation: propose ClusterID and BlockPoolID format 
+    (Tanping via boryas)
+
+    HDFS-1394. Federation: modify -format option for namenode to generated 
+    new blockpool id and accept newcluster (boryas)
+
+    HDFS-1400. Federation: DataTransferProtocol uses ExtendedBlockPool to 
+    include BlockPoolID in the protocol. (suresh)
+
+    HDFS-1428. Federation : add cluster ID and block pool ID into 
+    Name node web UI(Tanping via boryas)
+
+    HDFS-1450. Federation: Introduce block pool ID into FSDatasetInterface.
+    (suresh)
+
+    HDFS-1632. Federation: data node storage structure changes and
+    introduce block pool storage. (Tanping via suresh)
+
+    HDFS-1634. Federation: Convert single threaded DataNode into 
+    per BlockPool thread model.(boryas)
+
+    HDFS-1637. Federation: FSDataset in Datanode should be created after 
+    initial handshake with namenode. (boryas and jitendra)
+
+    HDFS-1653. Federation: Block received message from datanode sends invalid 
+    DatanodeRegistration. (Tanping via suresh)
+
+    HDFS-1645. Federation: DatanodeCommond.Finalize needs to include 
+    BlockPoolId.  (suresh)
+
+    HDFS-1638. Federation: DataNode.handleDiskError needs to inform 
+    ALL namenodes if a disk failed (boryas)
+
+    HDFS-1647. Federation: Multiple namenode configuration. (jitendra)
+
+    HDFS-1639. Federation: Add block pool management to FSDataset. (suresh)
+
+    HDFS-1648. Federation: Only DataStorage must be locked using in_use.lock 
+    and no locks must be associated with BlockPoolStorage. (Tanping via suresh)
+
+    HDFS-1641. Federation: Datanode fields that are no longer used should 
+    be removed (boryas)
+
+    HDFS-1642. Federation: add Datanode.getDNRegistration(String bpid) 
+    method  (boryas)
+
+    HDFS-1643. Federation: remove namenode argument from DataNode 
+    constructor (boryas)
+
+    HDFS-1657. Federation: Tests that corrupt block files fail due to changed 
+    file path in federation. (suresh)
+
+    HDFS-1661. Federation: Remove unnecessary TODO:FEDERATION comments.
+    (jitendra)
+
+    HDFS-1660. Federation: Datanode doesn't start with two namenodes (boryas)
+
+    HDFS-1650. Federation: TestReplication fails. (Tanping via suresh)
+
+    HDFS-1651. Federation: Tests fail due to null pointer exception in 
+    Datnode#shutdown() method. (Tanping via suresh)
+
+    HDFS-1649. Federation: Datanode command to refresh namenode list at 
+    the datanode. (jitendra)
+
+    HDFS-1646. Federation: MiniDFSClsuter#waitActive() waits for ever 
+    with the introduction of BPOfferService in datanode. (suresh)
+
+    HDFS-1659. Federation: BPOfferService exits after one iteration 
+    incorrectly.  (Tanping via suresh)
+
+    HDFS-1654. Federation: Fix TestDFSUpgrade and TestDFSRollback failures.
+    (suresh)
+    
+    HDFS-1668. Federation: Datanodes sends block pool usage information 
+    to the namenode in heartbeat. (suresh)
+
+    HDFS-1669. Federation: Fix TestHftpFileSystem failure. (suresh)
+
+    HDFS-1670. Federation: remove dnRegistration from Datanode (boryas)
+
+    HDFS-1662. Federation: fix unit test case, TestCheckpoint 
+    and TestDataNodeMXBean (tanping via boryas)
+
+    HDFS-1671. Federation: shutdown in DataNode should be able to 
+    shutdown individual BP threads as well as the whole DN (boryas).
+
+    HDFS-1663. Federation: Rename getPoolId() everywhere to 
+    getBlockPoolId() (tanping via boryas)
+
+    HDFS-1652. FederationL Add support for multiple namenodes in MiniDFSCluster.
+    (suresh)
+
+    HDFS-1672. Federation: refactor stopDatanode(name) to work 
+    with multiple Block Pools (boryas)
+
+    HDFS-1687. Federation: DirectoryScanner changes for 
+    federation (Matt Foley via boryas)
+
+    HDFS-1626. Make BLOCK_INVALIDATE_LIMIT configurable. (szetszwo)
+
+    HDFS-1655. Federation: DatablockScanner should scan blocks for 
+    all the block pools. (jitendra)
+
+    HDFS-1664. Federation: Add block pool storage usage to Namenode WebUI.
+    (Tanping via suresh)
+
+    HDFS-1674. Federation: Rename BlockPool class to BlockPoolSlice. 
+    (jghoman, Tanping via suresh)
+
+    HDFS-1673. Federation: Datanode changes to track block token secret per 
+    namenode. (suresh)
+
+    HDFS-1677. Federation: Fix TestFsck and TestListCorruptFileBlocks 
+    failures. (Tanping via suresh)
+
+    HDFS-1678. Federation: Remove unnecessary #getBlockpool() 
+    for NameNodeMXBean in FSNameSystem. (Tanping via Suresh)
+
+    HDFS-1688. Federation: Fix failures in fault injection tests,
+    TestDiskError, TestDatanodeRestart and TestDFSTartupVersions. (suresh)
+
+    HDFS-1696. Federation: when build version doesn't match - 
+    datanode should wait (keep connecting) untill NN comes up 
+    with the right version (boryas)
+
+    HDFS-1681. Balancer: support per pool and per node policies. (szetszwo)
+
+    HDFS-1695. Federation: Fix testOIV and TestDatanodeUtils 
+    (jhoman and tanping via boryas)
+
+    HDFS:1699. Federation: Fix failure of TestBlockReport.
+    (Matt Foley via suresh)
+
+    HDFS-1698. Federation: TestOverReplicatedBlocks and TestWriteToReplica 
+    failing. (jhoman and jitendra)
+
+    HDFS-1701. Federation: Fix TestHeartbeathandling.
+    (Erik Steffl and Tanping Wang via suresh)
+
+    HDFS-1693. Federation: Fix TestDFSStorageStateRecovery failure. (suresh)
+
+    HDFS-1694. Federation: SimulatedFSDataset changes to work with
+    federation and multiple block pools. (suresh)
+
+    HDFS-1689. Federation: Configuration for namenodes. (suresh and jitendra)
+
+    HDFS-1682. Change Balancer CLI for multiple namenodes and balancing
+    policy.  (szetszwo)
+
+    HDFS-1697. Federation: fix TestBlockRecovery (boryas)
+
+    HDFS-1702. Federation: fix TestBackupNode and TestRefreshNamendoes
+    failures. (suresh)
+
+    HDFS-1706. Federation: TestFileAppend2, TestFileAppend3 and 
+    TestBlockTokenWithDFS failing. (jitendra)
+
+    HDFS-1704. Federation: Add a tool that lists namenodes, secondary and
+    backup from configuration file. (suresh)
+
+    HDFS-1711. Federation: create method for updating machine name in 
+    DataNode.java (boryas)
+
+    HDFS-1712. Federation: when looking up datanode we should use machineNmae 
+    (in testOverReplicatedBlocks) (boryas)
+
+    HDFS-1709. Federation: Error "nnaddr url param is null" when clicking on a 
+    node from NN Live Node Link. (jitendra)
+
+    HDFS-1714. Federation: refactor upgrade object in DataNode (boryas) 
+
+    HDFS-1715. Federation: warning/error not generated when datanode sees 
+    inconsistent/different Cluster ID between namenodes (boryas)
+
+    HDFS-1715. Federation: warning/error not generated when datanode sees 
+    inconsistent/different Cluster ID between namenodes (boryas)
+
+    HDFS-1716. Federation: Add decommission tests for federated namenodes.
+    (suresh)
+
+    HDFS-1713. Federation: Prevent DataBlockScanner from running in tight loop.
+    (jitendra)
+
+    HDFS-1721. Federation: Configuration for principal names should not be 
+    namenode specific. (jitendra)
+
+    HDFS-1717. Federation: FSDataset volumeMap access is not synchronized
+    correctly. (suresh)
+
+    HDFS-1722. Federation: Add flag to MiniDFSCluser to differentiate between
+    federation and non-federation modes. (boryas via suresh)
+
+    HDFS-1718. Federation: MiniDFSCluster#waitActive() bug causes some tests
+    to fail. (suresh)
+
+    HDFS-1719. Federation: Fix TestDFSRemove that fails intermittently.
+    (suresh)
+
+    HDFS-1720. Federation: FSVolumeSet volumes is not synchronized correctly.
+    (suresh)
+
+    HDFS-1700. Federation: fsck needs to work with federation changes.
+    (Matt Foley via suresh)
+
     HDFS-1482. Add listCorruptFileBlocks to DistributedFileSystem.
     (Patrick Kling via hairong)
 
     HDFS-1448. Add a new tool Offline Edits Viewer (oev).  (Erik Steffl
     via szetszwo)
 
-    HDFS-1626. Make BLOCK_INVALIDATE_LIMIT configurable. (szetszwo)
+    HDFS-1735. Federation: merge FSImage change in federation to
+    FSImage+NNStorage refactoring in trunk. (suresh)
+
+    HDFS-1737. Federation: Update the layout version for federation
+    changes. (suresh)
+
+    HDFS-1744. Federation: Add new layout version to offline image viewer
+    and edits viewer. (suresh)
+
+    HDFS-1745. Federation: Fix fault injection test failures. (suresh)
+
+    HDFS-1746. Federation: TestFileAppend3 fails intermittently. (jitendra)
+
+    HDFS-1703. Improve start/stop scripts and add decommission tool for
+    federation. (Tanping Wang, Erik Steffl via suresh)
+
+    HDFS-1749. Federation: TestListCorruptFileBlocks failing in federation 
+    branch. (jitendra)
+
+    HDFS-1754. Federation: testFsck fails. (boryas)
+
+    HDFS-1755. Federation: The BPOfferService must always connect to namenode as 
+    the login user. (jitendra)
 
     HDFS-1675. Support transferring RBW between datanodes. (szetszwo)
 
+    HDFS-1791. Federation: Add command to delete block pool directories 
+    from a datanode. (jitendra)
+
     HDFS-1761. Add a new DataTransferProtocol operation, Op.TRANSFER_BLOCK,
     for transferring RBW/Finalized with acknowledgement and without using RPC.
     (szetszwo)
 
+    HDFS-1813. Federation: Authentication using BlockToken in RPC to datanode 
+               fails. (jitendra)
+
     HDFS_1630. Support fsedits checksum. (hairong)
 
     HDFS-1606. Provide a stronger data guarantee in the write pipeline by
@@ -41,10 +274,19 @@ Trunk (unreleased changes)
 
     HDFS-1510. Added test-patch.properties required by test-patch.sh (nigel)
 
-    HDFS-1481. NameNode should validate fsimage before rolling. (hairong)
+    HDFS-1628. Display full path in AccessControlException.  (John George
+    via szetszwo)
+
+    HDFS-1707. Federation: Failure in browsing data on new namenodes. (jitendra)
 
-    HDFS-1518. Wrong description in FSNamesystem's javadoc. 
-    (Jingguo Yao via eli)
+    HDFS-1683. Test Balancer with multiple NameNodes.  (szetszwo)
+
+    HDFS-1547. Improve decommission mechanism. (suresh)
+
+    HDFS-1588. Remove hardcoded strings for configuration keys, "dfs.hosts"
+    and "dfs.hosts.exlude". (Erik Steffl via suresh)
+
+    HDFS-1481. NameNode should validate fsimage before rolling. (hairong)
 
     HDFS-1506. Refactor fsimage loading code. (hairong)
 
@@ -59,14 +301,6 @@ Trunk (unreleased changes)
     HDFS-1539. A config option for the datanode to fsycn a block file
     when block is completely written. (dhruba)
 
-    HDFS-1547. Improve decommission mechanism. (suresh)
-
-    HDFS-1586. Add InterfaceAudience and InterfaceStability annotations to 
-    MiniDFSCluster. (suresh)
-
-    HDFS-1588. Remove hardcoded strings for configuration keys, "dfs.hosts"
-    and "dfs.hosts.exlude". (Erik Steffl via suresh)
-
     HDFS-1335. HDFS side change of HADDOP-6904: RPC compatibility. (hairong)
 
     HDFS-1557. Separate Storage from FSImage. (Ivan Kelly via jitendra)
@@ -76,9 +310,6 @@ Trunk (unreleased changes)
     HDFS-1629. Add a method to BlockPlacementPolicy for keeping the chosen
     nodes in the output array.  (szetszwo)
 
-    HDFS-1628. Display full path in AccessControlException.  (John George
-    via szetszwo)
-
     HDFS-1731. Allow using a file to exclude certain tests from build (todd)
 
     HDFS-1736. Remove the dependency from DatanodeJspHelper to FsShell.
@@ -146,6 +377,12 @@ Trunk (unreleased changes)
     HDFS-1846. Preallocate edit log with OP_INVALID instead of zero bytes
     to ensure blocks are actually allocated. (Aaron T. Myers via todd)
 
+    HDFS-1741. Provide a minimal pom file to allow integration of HDFS into Sonar
+    analysis (cos)
+
+    HDFS-1870. Move and rename DFSClient.LeaseChecker to a seperated class
+    LeaseRenewer.  (szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
@@ -155,6 +392,21 @@ Trunk (unreleased changes)
 
   BUG FIXES
 
+    HDFS-1449. Fix test failures - ExtendedBlock must return 
+    block file name in #getBlockName(). (suresh)
+
+    HDFS-1680. Fix TestBalancer. (szetszwo)
+
+    HDFS-1705. Balancer command throws NullPointerException. (suresh via
+    szetszwo)
+
+    HDFS-1559. Add missing UGM overrides to TestRefreshUserMappings
+    (Todd Lipcon via eli)
+
+    HDFS-1585. Fix build after HDFS-1547 (todd)
+
+    HDFS-1684. Balancer cannot start with with multiple namenodes.  (szetszwo)
+
     HDFS-1516. mvn-install is broken after 0.22 branch creation. (cos)
 
     HDFS-1360. TestBlockRecovery should bind ephemeral ports.
@@ -162,9 +414,6 @@ Trunk (unreleased changes)
 
     HDFS-1551. Fix pom templates dependency list (gkesavan)
 
-    HDFS-1559. Add missing UGM overrides to TestRefreshUserMappings
-    (Todd Lipcon via eli)
-
     HDFS-1509. A savenamespace command writes the fsimage and edits into
     all configured directories. (dhruba)
 
@@ -173,27 +422,25 @@ Trunk (unreleased changes)
 
     HDFS-1463. Accesstime of a file is not updated in safeMode. (dhruba)
 
-    HDFS-1585. Fix build after HDFS-1547 (todd)
-
     HDFS-863. Potential deadlock in TestOverReplicatedBlocks. 
     (Ken Goodhope via jghoman)
 
-    HDFS-1610. Fix TestClientProtocolWithDelegationToken and TestBlockToken
-    on trunk after HADOOP-6904 (todd)
-
     HDFS-1607. Fix referenced to misspelled method name getProtocolSigature
     (todd)
 
+    HDFS-1610. Fix TestClientProtocolWithDelegationToken and TestBlockToken
+    on trunk after HADOOP-6904 (todd)
+
     HDFS-1600. Fix release audit warnings on trunk. (todd)
 
     HDFS-1691. Remove a duplicated static initializer for reading default
     configurations in DFSck.  (Alexey Diomin via szetszwo)
 
+    HDFS-1748. Balancer utilization classification is incomplete.  (szetszwo)
+
     HDFS-1738. change hdfs jmxget to return an empty string instead of 
     null when an attribute value is not available (tanping vi boryas)
 
-    HDFS-1748. Balancer utilization classification is incomplete.  (szetszwo)
-
     HDFS-1757. Don't compile fuse-dfs by default. (eli)
 
     HDFS-1770. TestFiRename fails due to invalid block size. (eli)
@@ -231,6 +478,27 @@ Trunk (unreleased changes)
     HDFS-1829. TestNodeCount waits forever, errs without giving information.
     (Matt Foley via eli)
 
+    HDFS-1860. when renewing/canceling DelegationToken over http we need to
+     pass exception information back to the caller.(boryas)
+
+    HDFS-1871. Mapreduce build fails due to MiniDFSCluster change from
+    HDFS-1052. (suresh)
+
+    HDFS-1876. One MiniDFSCluster constructor ignores numDataNodes parameter
+    (todd)
+
+    HDFS-1773. Do not show decommissioned datanodes, which are not in both
+    include and exclude lists, on web and JMX interfaces.
+    (Tanping Wang via szetszwo)
+
+    HDFS-1888. MiniDFSCluster#corruptBlockOnDatanodes() access must be
+    public. (suresh)
+
+    HDFS-1889. incorrect path in start/stop dfs script. (John George via eli)
+
+    HDFS-1890. Improve the name, class and value type of the map
+    LeaseRenewer.pendingCreates.  (szetszwo)
+
 Release 0.22.0 - Unreleased
 
   NEW FEATURES
@@ -264,6 +532,8 @@ Release 0.22.0 - Unreleased
     HDFS piggyback block locations to each file status when listing a
     directory.  (hairong)
 
+    HDFS-1359. Add BlockPoolID to Block. (suresh)
+
     HDFS-1361. Add -fileStatus operation to NNThroughputBenchmark. (shv)
 
     HDFS-1435. Provide an option to store fsimage compressed. (hairong)
@@ -392,9 +662,6 @@ Release 0.22.0 - Unreleased
 
     HDFS-1426. Remove unused method BlockInfo#listCount. (hairong)
 
-    HDFS-1456. Provide builder for constructing instances of MiniDFSCluster.
-    (jghoman)
-
     HDFS-1472. Allow programmatic access to fsck output.
     (Ramkumar Vadali via dhruba)
 
@@ -452,9 +719,15 @@ Release 0.22.0 - Unreleased
 
     HDFS-1582. Remove auto-generated native build files. (rvs via eli)
 
+    HDFS-1456. Provide builder for constructing instances of MiniDFSCluster.
+    (jghoman)
+
     HDFS-1861. Rename dfs.datanode.max.xcievers and bump its default value.
     (eli)
 
+    HDFS-1052. HDFS Federation - Merge of umbrella jira changes from
+    HDFS-1052 branch into trunk.
+
   OPTIMIZATIONS
 
     HDFS-1140. Speedup INode.getPathComponents. (Dmytro Molkov via shv)
@@ -591,6 +864,9 @@ Release 0.22.0 - Unreleased
     HDFS-1357. HFTP traffic served by DataNode shouldn't use service port 
     on NameNode. (Kan Zhang via jghoman)
 
+    HDFS-1419. HDFS Federation: Three test cases need minor modification after 
+    the new block id change (Tanping Wang via suresh)
+
     HDFS-96. HDFS supports blocks larger than 2 GB.
     (Patrick Kling via dhruba)
 
@@ -607,6 +883,12 @@ Release 0.22.0 - Unreleased
     HDFS-1498. FSDirectory#unprotectedConcat calls setModificationTime 
     on a file. (eli)
 
+    HDFS-1625. Ignore disk space values in TestDataNodeMXBean.  (szetszwo)
+
+    HDFS-1850. DN should transmit absolute failed volume count rather than 
+    increments to the NN. (eli)
+
+Release 0.21.1 - Unreleased
     HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
 
     HDFS-874. TestHDFSFileContextMainOperations fails on weirdly 
@@ -617,15 +899,12 @@ Release 0.22.0 - Unreleased
     HDFS-1487. FSDirectory.removeBlock() should update diskspace count 
     of the block owner node (Zhong Wang via eli).
 
-    HDFS-1001. DataXceiver and BlockReader disagree on when to send/recv
-    CHECKSUM_OK. (bc Wong via eli)
+    HDFS-1467. Append pipeline never succeeds with more than one replica.
+    (Todd Lipcon via eli)
 
     HDFS-1167. New property for local conf directory in system-test-hdfs.xml
     file. (Vinay Thota via cos)
 
-    HDFS-1467. Append pipeline never succeeds with more than one replica.
-    (Todd Lipcon via eli)
-
     HDFS-1503. TestSaveNamespace fails. (Todd Lipcon via cos)
 
     HDFS-1524. Image loader should make sure to read every byte in image file.
@@ -663,11 +942,6 @@ Release 0.22.0 - Unreleased
     HDFS-884. DataNode throws IOException if all data directories are 
     unavailable. (Steve Loughran and shv)
 
-    HDFS-1572. Checkpointer should trigger checkpoint with specified period.
-    (jghoman)
-
-    HDFS-1561. BackupNode listens on the default host. (shv)
-
     HDFS-1591. HDFS part of HADOOP-6642. (Chris Douglas, Po Cheung via shv)
 
     HDFS-900. Corrupt replicas are not processed correctly in block report (shv)
@@ -685,7 +959,8 @@ Release 0.22.0 - Unreleased
 
     HDFS-981. test-contrib fails due to test-cactus failure (cos)
 
-    HDFS-1625. Ignore disk space values in TestDataNodeMXBean.  (szetszwo)
+    HDFS-1001. DataXceiver and BlockReader disagree on when to send/recv
+    CHECKSUM_OK. (bc Wong via eli)
 
     HDFS-1781. Fix the path for jsvc in bin/hdfs.  (John George via szetszwo)
 
@@ -708,8 +983,6 @@ Release 0.22.0 - Unreleased
 
 Release 0.21.1 - Unreleased
 
-  IMPROVEMENTS
-
     HDFS-1411. Correct backup node startup command in hdfs user guide.
     (Ching-Shen Chen via shv)
 
@@ -733,6 +1006,17 @@ Release 0.21.1 - Unreleased
     HDFS-1292. Allow artifacts to be published to the staging Apache Nexus
     Maven Repository.  (Giridharan Kesavan via tomwhite)
 
+    HDFS-1552. Remove java5 dependencies from build. (cos) 
+
+    HDFS-1189. Quota counts missed between clear quota and set quota.
+    (John George via szetszwo)
+
+    HDFS-1665. Balancer misuses dfs.heartbeat.interval as milliseconds.
+    (szetszwo)
+
+    HDFS-1728. SecondaryNameNode.checkpointSize is in bytes but not in MB.
+    (szetszwo)
+
     HDFS-1206. TestFiHFlush fails intermittently. (cos)
 
     HDFS-1548. Fault-injection tests are executed multiple times if invoked
@@ -746,14 +1030,6 @@ Release 0.21.1 - Unreleased
     block placement and checkpoint/backup node features.  (Joe Crobak
     via szetszwo)
 
-    HDFS-1189. Quota counts missed between clear quota and set quota.
-    (John George via szetszwo)
-
-    HDFS-1665. Balancer misuses dfs.heartbeat.interval as milliseconds.
-    (szetszwo)
-
-    HDFS-1728. SecondaryNameNode.checkpointSize is in bytes but not in MB.
-    (szetszwo)
 
     HDFS-1596. Replace fs.checkpoint.* with dfs.namenode.checkpoint.*
     in documentations.  (Harsh J Chouraria via szetszwo)
@@ -807,6 +1083,8 @@ Release 0.21.0 - 2010-08-13
     error message on the screen when cat a directory or a 
     non-existent file. (hairong)
 
+    HDFS-1439. HDFS Federation: Fix compilation error in TestFiHftp. (suresh)
+
   NEW FEATURES
 
     HDFS-1134. Large-scale Automated Framework. (cos)

Modified: hadoop/hdfs/branches/HDFS-1073/bin/hdfs
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/hdfs?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/hdfs (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/hdfs Thu May  5 05:40:07 2011
@@ -34,6 +34,7 @@ function print_usage(){
   echo "  oiv                  apply the offline fsimage viewer to an fsimage"
   echo "  oev                  apply the offline edits viewer to an edits file"
   echo "  fetchdt              fetch a delegation token from the NameNode"
+  echo "  getconf              get config values from configuration"
   echo "						Use -help to see options"
   echo ""
   echo "Most commands print help when invoked w/o parameters."
@@ -94,6 +95,8 @@ elif [ "$COMMAND" = "oev" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
 elif [ "$COMMAND" = "fetchdt" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
+elif [ "$COMMAND" = "getconf" ] ; then
+  CLASS=org.apache.hadoop.hdfs.tools.GetConf
 else
   echo $COMMAND - invalid command
   print_usage

Modified: hadoop/hdfs/branches/HDFS-1073/bin/start-dfs.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/start-dfs.sh?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/start-dfs.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/start-dfs.sh Thu May  5 05:40:07 2011
@@ -25,17 +25,17 @@ usage="Usage: start-dfs.sh [-upgrade|-ro
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
-. "$bin"/hdfs-config.sh
+. "$bin/hdfs-config.sh"
 
 # get arguments
 if [ $# -ge 1 ]; then
-	nameStartOpt=$1
+	nameStartOpt="$1"
 	shift
-	case $nameStartOpt in
+	case "$nameStartOpt" in
 	  (-upgrade)
 	  	;;
 	  (-rollback) 
-	  	dataStartOpt=$nameStartOpt
+	  	dataStartOpt="$nameStartOpt"
 	  	;;
 	  (*)
 		  echo $usage
@@ -44,14 +44,50 @@ if [ $# -ge 1 ]; then
 	esac
 fi
 
-# start dfs daemons
-# start namenode after datanodes, to minimize time namenode is up w/o data
-# note: datanodes will log connection errors until namenode starts
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start namenode $nameStartOpt
-#
+#---------------------------------------------------------
+# namenodes
+
+NAMENODES=$($HADOOP_HDFS_HOME/bin/hdfs getconf -namenodes)
+
+echo "Starting namenodes on [$NAMENODES]"
+
+"$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+  --config "$HADOOP_CONF_DIR" \
+  --hostnames "$NAMENODES" \
+  --script "$bin/hdfs" start namenode $nameStartOpt
+
+#---------------------------------------------------------
+# datanodes (using defalut slaves file)
+
 if [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  echo "Attempting to start secure cluster, skipping datanodes. Run start-secure-dns.sh as root to complete startup."
+  echo \
+    "Attempting to start secure cluster, skipping datanodes. " \
+    "Run start-secure-dns.sh as root to complete startup."
+else
+  "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+    --config "$HADOOP_CONF_DIR" \
+    --script "$bin/hdfs" start datanode $dataStartOpt
+fi
+
+#---------------------------------------------------------
+# secondary namenodes (if any)
+
+# if there are no secondary namenodes configured it returns
+# 0.0.0.0 or empty string
+SECONDARY_NAMENODES=$($HADOOP_HDFS_HOME/bin/hdfs getconf -secondarynamenodes 2>&-)
+SECONDARY_NAMENODES=${SECONDARY_NAMENODES:='0.0.0.0'}
+
+if [ "$SECONDARY_NAMENODES" = '0.0.0.0' ] ; then
+  echo \
+    "Secondary namenodes are not configured. " \
+    "Cannot start secondary namenodes."
 else
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start datanode $dataStartOpt
+  echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
+
+  "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+    --config "$HADOOP_CONF_DIR" \
+    --hostnames "$SECONDARY_NAMENODES" \
+    --script "$bin/hdfs" start secondarynamenode
 fi
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --hosts masters --script "$bin"/hdfs start secondarynamenode
+
+# eof

Modified: hadoop/hdfs/branches/HDFS-1073/bin/stop-dfs.sh
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/bin/stop-dfs.sh?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/bin/stop-dfs.sh (original)
+++ hadoop/hdfs/branches/HDFS-1073/bin/stop-dfs.sh Thu May  5 05:40:07 2011
@@ -15,18 +15,55 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
-# Stop hadoop DFS daemons.  Run this on master node.
-
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
 . "$bin"/hdfs-config.sh
 
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop namenode
+#---------------------------------------------------------
+# namenodes
+
+NAMENODES=$($HADOOP_HDFS_HOME/bin/hdfs getconf -namenodes)
+
+echo "Stopping namenodes on [$NAMENODES]"
+
+"$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+  --config "$HADOOP_CONF_DIR" \
+  --hostnames "$NAMENODES" \
+  --script "$bin/hdfs" stop namenode
+
+#---------------------------------------------------------
+# datanodes (using default slaves file)
+
 if [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  echo "Attempting to stop secure cluster, skipping datanodes. Run stop-secure-dns.sh as root to complete shutdown."
+  echo \
+    "Attempting to stop secure cluster, skipping datanodes. " \
+    "Run stop-secure-dns.sh as root to complete shutdown."
 else
-  "$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop datanode
+  "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+    --config "$HADOOP_CONF_DIR" \
+    --script "$bin/hdfs" stop datanode
 fi
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --hosts masters --script "$bin"/hdfs stop secondarynamenode
\ No newline at end of file
+
+#---------------------------------------------------------
+# secondary namenodes (if any)
+
+# if there are no secondary namenodes configured it returns
+# 0.0.0.0 or empty string
+SECONDARY_NAMENODES=$($HADOOP_HDFS_HOME/bin/hdfs getconf -secondarynamenodes 2>&-)
+SECONDARY_NAMENODES=${SECONDARY_NAMENODES:-'0.0.0.0'}
+
+if [ "$SECONDARY_NAMENODES" = '0.0.0.0' ] ; then
+  echo \
+    "Secondary namenodes are not configured. " \
+    "Cannot stop secondary namenodes."
+else
+  echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
+
+  "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+    --config "$HADOOP_CONF_DIR" \
+    --hostnames "$SECONDARY_NAMENODES" \
+    --script "$bin/hdfs" stop secondarynamenode
+fi
+
+# eof

Propchange: hadoop/hdfs/branches/HDFS-1073/build.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu May  5 05:40:07 2011
@@ -1,5 +1,6 @@
 /hadoop/core/branches/branch-0.19/hdfs/build.xml:713112
 /hadoop/core/trunk/build.xml:779102
+/hadoop/hdfs/branches/HDFS-1052/build.xml:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/build.xml:796829-820463
 /hadoop/hdfs/branches/branch-0.21/build.xml:820487
-/hadoop/hdfs/trunk/build.xml:1086482-1097628
+/hadoop/hdfs/trunk/build.xml:1086482-1099686

Propchange: hadoop/hdfs/branches/HDFS-1073/src/c++/libhdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu May  5 05:40:07 2011
@@ -1,3 +1,4 @@
 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
 /hadoop/core/trunk/src/c++/libhdfs:776175-784663
-/hadoop/hdfs/trunk/src/c++/libhdfs:1086482-1097628
+/hadoop/hdfs/branches/HDFS-1052/src/c++/libhdfs:987665-1095512
+/hadoop/hdfs/trunk/src/c++/libhdfs:1086482-1099686

Propchange: hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu May  5 05:40:07 2011
@@ -1,5 +1,6 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/contrib/hdfsproxy:713112
 /hadoop/core/trunk/src/contrib/hdfsproxy:776175-784663
+/hadoop/hdfs/branches/HDFS-1052/src/contrib/hdfsproxy:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/contrib/hdfsproxy:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy:820487
-/hadoop/hdfs/trunk/src/contrib/hdfsproxy:1086482-1097628
+/hadoop/hdfs/trunk/src/contrib/hdfsproxy:1086482-1099686

Modified: hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java Thu May  5 05:40:07 2011
@@ -32,6 +32,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 
 /**
  * A HTTPS/SSL proxy to HDFS, implementing certificate based access control.
@@ -69,7 +70,7 @@ public class HdfsProxy {
 
     this.server = new ProxyHttpServer(sslAddr, sslConf);
     this.server.setAttribute("proxy.https.port", server.getPort());
-    this.server.setAttribute("name.node.address", nnAddr);
+    this.server.setAttribute(NameNode.NAMENODE_ADDRESS_ATTRIBUTE_KEY, nnAddr);
     this.server.setAttribute(JspHelper.CURRENT_CONF, new HdfsConfiguration());
     this.server.addGlobalFilter("ProxyFilter", ProxyFilter.class.getName(), null);
     this.server.addServlet("listPaths", "/listPaths/*", ProxyListPathsServlet.class);

Modified: hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java Thu May  5 05:40:07 2011
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfsproxy;
 
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
 
@@ -28,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.namenode.FileDataServlet;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.security.UserGroupInformation;
 
 /** {@inheritDoc} */
@@ -44,10 +46,15 @@ public class ProxyFileDataServlet extend
     if (dt != null) {
       dtParam=JspHelper.getDelegationTokenUrlParam(dt);
     }
-
+    InetSocketAddress nnAddress = (InetSocketAddress) getServletContext()
+        .getAttribute(NameNode.NAMENODE_ADDRESS_ATTRIBUTE_KEY);
+    String nnHostPort = nnAddress == null ? null : NameNode
+        .getHostPortString(nnAddress);
+    String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS,
+        nnHostPort);
     return new URI(request.getScheme(), null, request.getServerName(), request
         .getServerPort(), "/streamFile" + i.getFullName(parent),
-        "&ugi=" + ugi.getShortUserName() + dtParam, null);
+        "&ugi=" + ugi.getShortUserName() + dtParam + addrParam, null);
   }
 
   /** {@inheritDoc} */

Propchange: hadoop/hdfs/branches/HDFS-1073/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu May  5 05:40:07 2011
@@ -1,5 +1,6 @@
 /hadoop/core/branches/branch-0.19/hdfs/src/java:713112
 /hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
+/hadoop/hdfs/branches/HDFS-1052/src/java:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/java:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/java:820487
-/hadoop/hdfs/trunk/src/java:1086482-1097628
+/hadoop/hdfs/trunk/src/java:1086482-1099686

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/BlockReader.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/BlockReader.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/BlockReader.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/BlockReader.java Thu May  5 05:40:07 2011
@@ -35,7 +35,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.fs.FSInputChecker;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
@@ -317,13 +317,11 @@ public class BlockReader extends FSInput
     return bytesToRead;
   }
   
-  private BlockReader( String file, long blockId, DataInputStream in, 
-                       DataChecksum checksum, boolean verifyChecksum,
-                       long startOffset, long firstChunkOffset,
-                       long bytesToRead,
-                       Socket dnSock ) {
+  private BlockReader(String file, String bpid, long blockId,
+      DataInputStream in, DataChecksum checksum, boolean verifyChecksum,
+      long startOffset, long firstChunkOffset, long bytesToRead, Socket dnSock) {
     // Path is used only for printing block and file information in debug
-    super(new Path("/blk_" + blockId + ":of:" + file)/*too non path-like?*/,
+    super(new Path("/blk_" + blockId + ":" + bpid + ":of:"+ file)/*too non path-like?*/,
           1, verifyChecksum,
           checksum.getChecksumSize() > 0? checksum : null, 
           checksum.getBytesPerChecksum(),
@@ -349,7 +347,7 @@ public class BlockReader extends FSInput
   }
 
   public static BlockReader newBlockReader(Socket sock, String file,
-      Block block, Token<BlockTokenIdentifier> blockToken, 
+      ExtendedBlock block, Token<BlockTokenIdentifier> blockToken, 
       long startOffset, long len, int bufferSize) throws IOException {
     return newBlockReader(sock, file, block, blockToken, startOffset, len, bufferSize,
         true);
@@ -357,7 +355,7 @@ public class BlockReader extends FSInput
 
   /** Java Doc required */
   public static BlockReader newBlockReader( Socket sock, String file, 
-                                     Block block, 
+                                     ExtendedBlock block, 
                                      Token<BlockTokenIdentifier> blockToken,
                                      long startOffset, long len,
                                      int bufferSize, boolean verifyChecksum)
@@ -367,7 +365,7 @@ public class BlockReader extends FSInput
   }
 
   public static BlockReader newBlockReader( Socket sock, String file,
-                                     Block block, 
+                                     ExtendedBlock block, 
                                      Token<BlockTokenIdentifier> blockToken,
                                      long startOffset, long len,
                                      int bufferSize, boolean verifyChecksum,
@@ -394,14 +392,14 @@ public class BlockReader extends FSInput
             "Got access token error for OP_READ_BLOCK, self="
                 + sock.getLocalSocketAddress() + ", remote="
                 + sock.getRemoteSocketAddress() + ", for file " + file
-                + ", for block " + block.getBlockId() 
-                + "_" + block.getGenerationStamp());
+                + ", for pool " + block.getBlockPoolId() + " block " 
+                + block.getBlockId() + "_" + block.getGenerationStamp());
       } else {
         throw new IOException("Got error for OP_READ_BLOCK, self="
             + sock.getLocalSocketAddress() + ", remote="
             + sock.getRemoteSocketAddress() + ", for file " + file
-            + ", for block " + block.getBlockId() + "_" 
-            + block.getGenerationStamp());
+            + ", for pool " + block.getBlockPoolId() + " block " 
+            + block.getBlockId() + "_" + block.getGenerationStamp());
       }
     }
     DataChecksum checksum = DataChecksum.newDataChecksum( in );
@@ -417,8 +415,8 @@ public class BlockReader extends FSInput
                             startOffset + " for file " + file);
     }
 
-    return new BlockReader(file, block.getBlockId(), in, checksum,
-        verifyChecksum, startOffset, firstChunkOffset, len, sock);
+    return new BlockReader(file, block.getBlockPoolId(), block.getBlockId(),
+        in, checksum, verifyChecksum, startOffset, firstChunkOffset, len, sock);
   }
 
   @Override
@@ -453,9 +451,15 @@ public class BlockReader extends FSInput
     }
   }
   
-  // File name to print when accessing a block directory from servlets
+  /**
+   * File name to print when accessing a block directly (from servlets)
+   * @param s Address of the block location
+   * @param poolId Block pool ID of the block
+   * @param blockId Block ID of the block
+   * @return string that has a file name for debug purposes
+   */
   public static String getFileName(final InetSocketAddress s,
-      final long blockId) {
-    return s.toString() + ":" + blockId;
+      final String poolId, final long blockId) {
+    return s.toString() + ":" + poolId + ":" + blockId;
   }
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSClient.java Thu May  5 05:40:07 2011
@@ -31,21 +31,17 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.net.InetSocketAddress;
 import java.net.Socket;
-import java.net.SocketTimeoutException;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
-import java.util.SortedMap;
-import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
 
 import javax.net.SocketFactory;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
@@ -64,7 +60,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
@@ -73,6 +68,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -101,7 +97,6 @@ import org.apache.hadoop.security.Access
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.StringUtils;
 
@@ -122,7 +117,7 @@ public class DFSClient implements FSCons
   public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
   static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
   final ClientProtocol namenode;
-  private final ClientProtocol rpcNamenode;
+  final ClientProtocol rpcNamenode;
   final UserGroupInformation ugi;
   volatile boolean clientRunning = true;
   private volatile FsServerDefaults serverDefaults;
@@ -138,7 +133,7 @@ public class DFSClient implements FSCons
   final DataTransferProtocol.ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
   final FileSystem.Statistics stats;
   final int hdfsTimeout;    // timeout value for a DFS operation.
-  final LeaseChecker leasechecker;
+  final LeaseRenewer leaserenewer;
 
   /**
    * The locking hierarchy is to first acquire lock on DFSClient object, followed by 
@@ -197,7 +192,7 @@ public class DFSClient implements FSCons
       ClientDatanodeProtocol.LOG.debug("ClientDatanodeProtocol addr=" + addr);
     }
     UserGroupInformation ticket = UserGroupInformation
-        .createRemoteUser(locatedBlock.getBlock().toString());
+        .createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());
     ticket.addToken(locatedBlock.getBlockToken());
     return (ClientDatanodeProtocol)RPC.getProxy(ClientDatanodeProtocol.class,
         ClientDatanodeProtocol.versionID, addr, ticket, conf, NetUtils
@@ -254,7 +249,7 @@ public class DFSClient implements FSCons
 
     // The hdfsTimeout is currently the same as the ipc timeout 
     this.hdfsTimeout = Client.getTimeout(conf);
-    this.leasechecker = new LeaseChecker(hdfsTimeout);
+    this.leaserenewer = new LeaseRenewer(this, hdfsTimeout);
 
     this.ugi = UserGroupInformation.getCurrentUser();
     
@@ -320,10 +315,10 @@ public class DFSClient implements FSCons
    */
   public synchronized void close() throws IOException {
     if(clientRunning) {
-      leasechecker.close();
+      leaserenewer.close();
       clientRunning = false;
       try {
-        leasechecker.interruptAndJoin();
+        leaserenewer.interruptAndJoin();
       } catch (InterruptedException ie) {
       }
   
@@ -634,18 +629,18 @@ public class DFSClient implements FSCons
     if(LOG.isDebugEnabled()) {
       LOG.debug(src + ": masked=" + masked);
     }
-    OutputStream result = new DFSOutputStream(this, src, masked,
+    final DFSOutputStream result = new DFSOutputStream(this, src, masked,
         flag, createParent, replication, blockSize, progress, buffersize,
         conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 
                     DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT));
-    leasechecker.put(src, result);
+    leaserenewer.put(src, result);
     return result;
   }
   
   /**
    * Append to an existing file if {@link CreateFlag#APPEND} is present
    */
-  private OutputStream primitiveAppend(String src, EnumSet<CreateFlag> flag,
+  private DFSOutputStream primitiveAppend(String src, EnumSet<CreateFlag> flag,
       int buffersize, Progressable progress) throws IOException {
     if (flag.contains(CreateFlag.APPEND)) {
       HdfsFileStatus stat = getFileInfo(src);
@@ -679,13 +674,13 @@ public class DFSClient implements FSCons
       throws IOException, UnresolvedLinkException {
     checkOpen();
     CreateFlag.validate(flag);
-    OutputStream result = primitiveAppend(src, flag, buffersize, progress);
+    DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
     if (result == null) {
       result = new DFSOutputStream(this, src, absPermission,
           flag, createParent, replication, blockSize, progress, buffersize,
           bytesPerChecksum);
     }
-    leasechecker.put(src, result);
+    leaserenewer.put(src, result);
     return result;
   }
   
@@ -727,7 +722,7 @@ public class DFSClient implements FSCons
   }
 
   /** Method to get stream returned by append call */
-  private OutputStream callAppend(HdfsFileStatus stat, String src,
+  private DFSOutputStream callAppend(HdfsFileStatus stat, String src,
       int buffersize, Progressable progress) throws IOException {
     LocatedBlock lastBlock = null;
     try {
@@ -755,7 +750,7 @@ public class DFSClient implements FSCons
    * 
    * @see ClientProtocol#append(String, String) 
    */
-  OutputStream append(String src, int buffersize, Progressable progress) 
+  DFSOutputStream append(String src, int buffersize, Progressable progress) 
       throws IOException {
     checkOpen();
     HdfsFileStatus stat = getFileInfo(src);
@@ -763,8 +758,8 @@ public class DFSClient implements FSCons
       throw new FileNotFoundException("failed to append to non-existent file "
           + src + " on client " + clientName);
     }
-    OutputStream result = callAppend(stat, src, buffersize, progress);
-    leasechecker.put(src, result);
+    final DFSOutputStream result = callAppend(stat, src, buffersize, progress);
+    leaserenewer.put(src, result);
     return result;
   }
 
@@ -979,7 +974,7 @@ public class DFSClient implements FSCons
         refetchBlocks = false;
       }
       LocatedBlock lb = locatedblocks.get(i);
-      final Block block = lb.getBlock();
+      final ExtendedBlock block = lb.getBlock();
       final DatanodeInfo[] datanodes = lb.getLocations();
       
       //try each datanode location of the block
@@ -1364,211 +1359,6 @@ public class DFSClient implements FSCons
     }
   }
 
-  /** Lease management*/
-  class LeaseChecker {
-    static final long LEASE_RENEWER_GRACE_DEFAULT = 60*1000L;
-    static final long LEASE_RENEWER_SLEEP_DEFAULT = 1000L;
-    /** A map from src -> DFSOutputStream of files that are currently being
-     * written by this client.
-     */
-    private final SortedMap<String, OutputStream> pendingCreates
-        = new TreeMap<String, OutputStream>();
-    /** The time in milliseconds that the map became empty. */
-    private long emptyTime = Long.MAX_VALUE;
-    /** A fixed lease renewal time period in milliseconds */
-    private final long renewal;
-
-    /** A daemon for renewing lease */
-    private Daemon daemon = null;
-    /** Only the daemon with currentId should run. */
-    private int currentId = 0;
-
-    /** 
-     * A period in milliseconds that the lease renewer thread should run
-     * after the map became empty.
-     * If the map is empty for a time period longer than the grace period,
-     * the renewer should terminate.  
-     */
-    private long gracePeriod;
-    /**
-     * The time period in milliseconds
-     * that the renewer sleeps for each iteration. 
-     */
-    private volatile long sleepPeriod;
-
-    private LeaseChecker(final long timeout) {
-      this.renewal = (timeout > 0 && timeout < LEASE_SOFTLIMIT_PERIOD)? 
-          timeout/2: LEASE_SOFTLIMIT_PERIOD/2;
-      setGraceSleepPeriod(LEASE_RENEWER_GRACE_DEFAULT);
-    }
-
-    /** Set the grace period and adjust the sleep period accordingly. */
-    void setGraceSleepPeriod(final long gracePeriod) {
-      if (gracePeriod < 100L) {
-        throw new HadoopIllegalArgumentException(gracePeriod
-            + " = gracePeriod < 100ms is too small.");
-      }
-      synchronized(this) {
-        this.gracePeriod = gracePeriod;
-      }
-      final long half = gracePeriod/2;
-      this.sleepPeriod = half < LEASE_RENEWER_SLEEP_DEFAULT?
-          half: LEASE_RENEWER_SLEEP_DEFAULT;
-    }
-
-    /** Is the daemon running? */
-    synchronized boolean isRunning() {
-      return daemon != null && daemon.isAlive();
-    }
-
-    /** Is the empty period longer than the grace period? */  
-    private synchronized boolean isRenewerExpired() {
-      return emptyTime != Long.MAX_VALUE
-          && System.currentTimeMillis() - emptyTime > gracePeriod;
-    }
-
-    synchronized void put(String src, OutputStream out) {
-      if (clientRunning) {
-        if (daemon == null || isRenewerExpired()) {
-          //start a new deamon with a new id.
-          final int id = ++currentId;
-          daemon = new Daemon(new Runnable() {
-            @Override
-            public void run() {
-              try {
-                LeaseChecker.this.run(id);
-              } catch(InterruptedException e) {
-                if (LOG.isDebugEnabled()) {
-                  LOG.debug(LeaseChecker.this.getClass().getSimpleName()
-                      + " is interrupted.", e);
-                }
-              }
-            }
-          });
-          daemon.start();
-        }
-        pendingCreates.put(src, out);
-        emptyTime = Long.MAX_VALUE;
-      }
-    }
-    
-    synchronized void remove(String src) {
-      pendingCreates.remove(src);
-      if (pendingCreates.isEmpty() && emptyTime == Long.MAX_VALUE) {
-        //discover the first time that the map is empty.
-        emptyTime = System.currentTimeMillis();
-      }
-    }
-    
-    void interruptAndJoin() throws InterruptedException {
-      Daemon daemonCopy = null;
-      synchronized (this) {
-        if (isRunning()) {
-          daemon.interrupt();
-          daemonCopy = daemon;
-        }
-      }
-     
-      if (daemonCopy != null) {
-        if(LOG.isDebugEnabled()) {
-          LOG.debug("Wait for lease checker to terminate");
-        }
-        daemonCopy.join();
-      }
-    }
-
-    void close() {
-      while (true) {
-        String src;
-        OutputStream out;
-        synchronized (this) {
-          if (pendingCreates.isEmpty()) {
-            return;
-          }
-          src = pendingCreates.firstKey();
-          out = pendingCreates.remove(src);
-        }
-        if (out != null) {
-          try {
-            out.close();
-          } catch (IOException ie) {
-            LOG.error("Exception closing file " + src+ " : " + ie, ie);
-          }
-        }
-      }
-    }
-
-    /**
-     * Abort all open files. Release resources held. Ignore all errors.
-     */
-    synchronized void abort() {
-      clientRunning = false;
-      while (!pendingCreates.isEmpty()) {
-        String src = pendingCreates.firstKey();
-        DFSOutputStream out = (DFSOutputStream)pendingCreates.remove(src);
-        if (out != null) {
-          try {
-            out.abort();
-          } catch (IOException ie) {
-            LOG.error("Exception aborting file " + src+ ": ", ie);
-          }
-        }
-      }
-      RPC.stopProxy(rpcNamenode); // close connections to the namenode
-    }
-
-    private void renew() throws IOException {
-      synchronized(this) {
-        if (pendingCreates.isEmpty()) {
-          return;
-        }
-      }
-      namenode.renewLease(clientName);
-    }
-
-    /**
-     * Periodically check in with the namenode and renew all the leases
-     * when the lease period is half over.
-     */
-    private void run(final int id) throws InterruptedException {
-      for(long lastRenewed = System.currentTimeMillis();
-          clientRunning && !Thread.interrupted();
-          Thread.sleep(sleepPeriod)) {
-        if (System.currentTimeMillis() - lastRenewed >= renewal) {
-          try {
-            renew();
-            lastRenewed = System.currentTimeMillis();
-          } catch (SocketTimeoutException ie) {
-            LOG.warn("Failed to renew lease for " + clientName + " for "
-                + (renewal/1000) + " seconds.  Aborting ...", ie);
-            abort();
-            break;
-          } catch (IOException ie) {
-            LOG.warn("Failed to renew lease for " + clientName + " for "
-                + (renewal/1000) + " seconds.  Will retry shortly ...", ie);
-          }
-        }
-
-        synchronized(this) {
-          if (id != currentId || isRenewerExpired()) {
-            //no longer the current daemon or expired
-            return;
-          }
-        }
-      }
-    }
-
-    /** {@inheritDoc} */
-    public String toString() {
-      String s = getClass().getSimpleName();
-      if (LOG.isTraceEnabled()) {
-        return s + "@" + DFSClient.this + ": "
-               + StringUtils.stringifyException(new Throwable("for testing"));
-      }
-      return s;
-    }
-  }
-
   /**
    * The Hdfs implementation of {@link FSDataInputStream}
    */
@@ -1589,7 +1379,7 @@ public class DFSClient implements FSCons
     /**
      * Returns the block containing the target position. 
      */
-    public Block getCurrentBlock() {
+    public ExtendedBlock getCurrentBlock() {
       return ((DFSInputStream)in).getCurrentBlock();
     }
 
@@ -1608,7 +1398,7 @@ public class DFSClient implements FSCons
     }
   }
 
-  void reportChecksumFailure(String file, Block blk, DatanodeInfo dn) {
+  void reportChecksumFailure(String file, ExtendedBlock blk, DatanodeInfo dn) {
     DatanodeInfo [] dnArr = { dn };
     LocatedBlock [] lblocks = { new LocatedBlock(blk, dnArr) };
     reportChecksumFailure(file, lblocks);

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Thu May  5 05:40:07 2011
@@ -54,6 +54,7 @@ public class DFSConfigKeys extends Commo
   public static final long    DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = 1024*1024;
   public static final String  DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
   public static final String  DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50070";
+  public static final String  DFS_NAMENODE_RPC_ADDRESS_KEY = "dfs.namenode.rpc-address";
   public static final String  DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.servicerpc-address";
   public static final String  DFS_NAMENODE_MAX_OBJECTS_KEY = "dfs.namenode.max.objects";
   public static final long    DFS_NAMENODE_MAX_OBJECTS_DEFAULT = 0;
@@ -260,6 +261,9 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.https.principal";
   public static final String  DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold";
   public static final int     DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10;
+  
+  public static final String DFS_FEDERATION_NAMESERVICES = "dfs.federation.nameservices";
+  public static final String DFS_FEDERATION_NAMESERVICE_ID = "dfs.federation.nameservice.id";
   public static final String  DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY = "dfs.namenode.resource.check.interval";
   public static final int     DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT = 5000;
   public static final String  DFS_NAMENODE_DU_RESERVED_KEY = "dfs.namenode.resource.du.reserved";

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSInputStream.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSInputStream.java Thu May  5 05:40:07 2011
@@ -30,7 +30,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.UnresolvedLinkException;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -62,7 +62,7 @@ public class DFSInputStream extends FSIn
   private LocatedBlocks locatedBlocks = null;
   private long lastBlockBeingWrittenLength = 0;
   private DatanodeInfo currentNode = null;
-  private Block currentBlock = null;
+  private ExtendedBlock currentBlock = null;
   private long pos = 0;
   private long blockEnd = -1;
 
@@ -204,7 +204,7 @@ public class DFSInputStream extends FSIn
   /**
    * Returns the block containing the target position. 
    */
-  public Block getCurrentBlock() {
+  public ExtendedBlock getCurrentBlock() {
     return currentBlock;
   }
 
@@ -384,10 +384,10 @@ public class DFSInputStream extends FSIn
         s = dfsClient.socketFactory.createSocket();
         NetUtils.connect(s, targetAddr, dfsClient.socketTimeout);
         s.setSoTimeout(dfsClient.socketTimeout);
-        Block blk = targetBlock.getBlock();
+        ExtendedBlock blk = targetBlock.getBlock();
         Token<BlockTokenIdentifier> accessToken = targetBlock.getBlockToken();
         
-        blockReader = BlockReader.newBlockReader(s, src, blk, 
+        blockReader = BlockReader.newBlockReader(s, src, blk,
             accessToken, 
             offsetIntoBlock, blk.getNumBytes() - offsetIntoBlock,
             buffersize, verifyChecksum, dfsClient.clientName);

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java Thu May  5 05:40:07 2011
@@ -45,7 +45,6 @@ import org.apache.hadoop.fs.ParentNotDir
 import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
@@ -53,6 +52,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
@@ -282,7 +282,7 @@ class DFSOutputStream extends FSOutputSu
   //
   class DataStreamer extends Daemon {
     private volatile boolean streamerClosed = false;
-    private Block block; // its length is number of bytes acked
+    private ExtendedBlock block; // its length is number of bytes acked
     private Token<BlockTokenIdentifier> accessToken;
     private DataOutputStream blockStream;
     private DataInputStream blockReplyStream;
@@ -929,8 +929,8 @@ class DFSOutputStream extends FSOutputSu
 
       if (success) {
         // update pipeline at the namenode
-        Block newBlock = new Block(
-            block.getBlockId(), block.getNumBytes(), newGS);
+        ExtendedBlock newBlock = new ExtendedBlock(
+            block.getBlockPoolId(), block.getBlockId(), block.getNumBytes(), newGS);
         dfsClient.namenode.updatePipeline(dfsClient.clientName, block, newBlock, nodes);
         // update client side generation stamp
         block = newBlock;
@@ -1015,8 +1015,8 @@ class DFSOutputStream extends FSOutputSu
         blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));
 
         // send the request
-        DataTransferProtocol.Sender.opWriteBlock(out, block, nodes.length,
-            recoveryFlag ? stage.getRecoveryStage() : stage, newGS, 
+        DataTransferProtocol.Sender.opWriteBlock(out, block,
+            nodes.length, recoveryFlag ? stage.getRecoveryStage() : stage, newGS, 
             block.getNumBytes(), bytesSent, dfsClient.clientName, null, nodes,
             accessToken);
         checksum.writeHeader(out);
@@ -1120,7 +1120,7 @@ class DFSOutputStream extends FSOutputSu
       } 
     }
 
-    Block getBlock() {
+    ExtendedBlock getBlock() {
       return block;
     }
 
@@ -1636,10 +1636,10 @@ class DFSOutputStream extends FSOutputSu
 
       flushInternal();             // flush all data to Datanodes
       // get last block before destroying the streamer
-      Block lastBlock = streamer.getBlock();
+      ExtendedBlock lastBlock = streamer.getBlock();
       closeThreads(false);
       completeFile(lastBlock);
-      dfsClient.leasechecker.remove(src);
+      dfsClient.leaserenewer.remove(src);
     } finally {
       closed = true;
     }
@@ -1647,7 +1647,7 @@ class DFSOutputStream extends FSOutputSu
 
   // should be called holding (this) lock since setTestFilename() may 
   // be called during unit tests
-  private void completeFile(Block last) throws IOException {
+  private void completeFile(ExtendedBlock last) throws IOException {
     long localstart = System.currentTimeMillis();
     boolean fileComplete = false;
     while (!fileComplete) {



Mime
View raw message