Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id BDCE37908 for ; Wed, 10 Aug 2011 01:51:17 +0000 (UTC) Received: (qmail 97947 invoked by uid 500); 10 Aug 2011 01:51:17 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 97883 invoked by uid 500); 10 Aug 2011 01:51:16 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 97875 invoked by uid 99); 10 Aug 2011 01:51:16 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 10 Aug 2011 01:51:16 +0000 X-ASF-Spam-Status: No, hits=-1998.0 required=5.0 tests=ALL_TRUSTED,FB_GET_MEDS X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 10 Aug 2011 01:51:12 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 5518A238897D; Wed, 10 Aug 2011 01:50:53 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1155998 - in /hadoop/common/trunk/hdfs: ./ src/java/org/apache/hadoop/hdfs/server/blockmanagement/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/java/org/apache/hadoop/hdfs/util/ src/test/hdfs/org/apache/hadoop/fs/ src/test/hdfs/org... Date: Wed, 10 Aug 2011 01:50:52 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20110810015053.5518A238897D@eris.apache.org> Author: szetszwo Date: Wed Aug 10 01:50:51 2011 New Revision: 1155998 URL: http://svn.apache.org/viewvc?rev=1155998&view=rev Log: HDFS-2239. Reduce access levels of the fields and methods in FSNamesystem. Added: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/util/RwLock.java Modified: hadoop/common/trunk/hdfs/CHANGES.txt hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/fs/TestResolveHdfsSymlink.java hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRemove.java hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLease.java hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationToken.java hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java Modified: hadoop/common/trunk/hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/CHANGES.txt?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hdfs/CHANGES.txt Wed Aug 10 01:50:51 2011 @@ -654,6 +654,9 @@ Trunk (unreleased changes) FileJournalManager during checkpoint process (Ivan Kelly and Todd Lipcon via todd) + HDFS-2239. Reduce access levels of the fields and methods in FSNamesystem. + (szetszwo) + OPTIMIZATIONS HDFS-1458. Improve checkpoint performance by avoiding unnecessary image Modified: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original) +++ hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Wed Aug 10 01:50:51 2011 @@ -380,19 +380,8 @@ public class BlockManager { "commitBlock length is less than the stored one " + commitBlock.getNumBytes() + " vs. " + block.getNumBytes(); block.commitBlock(commitBlock); - - // Adjust disk space consumption if required - long diff = fileINode.getPreferredBlockSize() - commitBlock.getNumBytes(); - if (diff > 0) { - try { - String path = /* For finding parents */ - namesystem.leaseManager.findPath(fileINode); - namesystem.dir.updateSpaceConsumed(path, 0, -diff - * fileINode.getReplication()); - } catch (IOException e) { - LOG.warn("Unexpected exception while updating disk space.", e); - } - } + + namesystem.updateDiskSpaceConsumed(fileINode, commitBlock); } /** @@ -682,8 +671,26 @@ public class BlockManager { minReplication); } - /** Get all blocks with location information from a datanode. */ - public BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode, + /** + * return a list of blocks & their locations on datanode whose + * total size is size + * + * @param datanode on which blocks are located + * @param size total size of blocks + */ + public BlocksWithLocations getBlocks(DatanodeID datanode, long size + ) throws IOException { + namesystem.readLock(); + try { + namesystem.checkSuperuserPrivilege(); + return getBlocksWithLocations(datanode, size); + } finally { + namesystem.readUnlock(); + } + } + + /** Get all blocks with location information from a datanode. */ + private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode, final long size) throws UnregisteredNodeException { final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode); if (node == null) { Modified: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original) +++ hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Wed Aug 10 01:50:51 2011 @@ -649,8 +649,24 @@ public class DatanodeManager { heartbeatManager.addDatanode(nodeDescr); } + /** + * Rereads conf to get hosts and exclude list file names. + * Rereads the files to update the hosts and exclude lists. It + * checks if any of the hosts have changed states: + */ + public void refreshNodes(final Configuration conf) throws IOException { + namesystem.checkSuperuserPrivilege(); + refreshHostsReader(conf); + namesystem.writeLock(); + try { + refreshDatanodes(); + } finally { + namesystem.writeUnlock(); + } + } + /** Reread include/exclude files. */ - public void refreshHostsReader(Configuration conf) throws IOException { + private void refreshHostsReader(Configuration conf) throws IOException { // Reread the conf to get dfs.hosts and dfs.hosts.exclude filenames. // Update the file names and refresh internal includes and excludes list. if (conf == null) { @@ -662,15 +678,12 @@ public class DatanodeManager { } /** - * Rereads the config to get hosts and exclude list file names. - * Rereads the files to update the hosts and exclude lists. It - * checks if any of the hosts have changed states: * 1. Added to hosts --> no further work needed here. * 2. Removed from hosts --> mark AdminState as decommissioned. * 3. Added to exclude --> start decommission. * 4. Removed from exclude --> stop decommission. */ - public void refreshDatanodes() throws IOException { + private void refreshDatanodes() throws IOException { for(DatanodeDescriptor node : datanodeMap.values()) { // Check if not include. if (!inHostsList(node, null)) { Modified: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original) +++ hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Wed Aug 10 01:50:51 2011 @@ -1341,7 +1341,7 @@ public class FSDirectory implements Clos * @throws QuotaExceededException if the new count violates any quota limit * @throws FileNotFound if path does not exist. */ - public void updateSpaceConsumed(String path, long nsDelta, long dsDelta) + void updateSpaceConsumed(String path, long nsDelta, long dsDelta) throws QuotaExceededException, FileNotFoundException, UnresolvedLinkException { Modified: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original) +++ hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Wed Aug 10 01:50:51 2011 @@ -137,10 +137,6 @@ public class FSImage implements Closeabl FSImage.getCheckpointEditsDirs(conf, null)); storage = new NNStorage(conf, imageDirs, editsDirs); - if (ns != null) { - storage.setUpgradeManager(ns.upgradeManager); - } - if(conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT)) { storage.setRestoreFailedStorage(true); Modified: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Aug 10 01:50:51 2011 @@ -104,13 +104,13 @@ import org.apache.hadoop.hdfs.server.com import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean; -import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; +import org.apache.hadoop.hdfs.util.RwLock; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.Server; @@ -146,8 +146,8 @@ import org.mortbay.util.ajax.JSON; ***************************************************/ @InterfaceAudience.Private @Metrics(context="dfs") -public class FSNamesystem implements FSConstants, FSNamesystemMBean, - FSClusterStats, NameNodeMXBean { +public class FSNamesystem implements RwLock, FSConstants, FSClusterStats, + FSNamesystemMBean, NameNodeMXBean { static final Log LOG = LogFactory.getLog(FSNamesystem.class); private static final ThreadLocal auditBuffer = @@ -211,20 +211,16 @@ public class FSNamesystem implements FSC // // Stores the correct file name hierarchy // - public FSDirectory dir; + FSDirectory dir; private BlockManager blockManager; private DatanodeStatistics datanodeStatistics; // Block pool ID used by this namenode - String blockPoolId; + private String blockPoolId; - public LeaseManager leaseManager = new LeaseManager(this); + LeaseManager leaseManager = new LeaseManager(this); - // - // Threaded object that checks to see if we have been - // getting heartbeats from all clients. - // - public Daemon lmthread = null; // LeaseMonitor thread + Daemon lmthread = null; // LeaseMonitor thread Daemon smmthread = null; // SafeModeMonitor thread Daemon nnrmthread = null; // NamenodeResourceMonitor thread @@ -330,7 +326,7 @@ public class FSNamesystem implements FSC return getStorageDirs(conf, DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY); } - public static Collection getStorageDirs(Configuration conf, + private static Collection getStorageDirs(Configuration conf, String propertyName) { Collection dirNames = conf.getTrimmedStringCollection(propertyName); StartupOption startOpt = NameNode.getStartupOption(conf); @@ -364,31 +360,31 @@ public class FSNamesystem implements FSC return getStorageDirs(conf, DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY); } - // utility methods to acquire and release read lock and write lock + @Override public void readLock() { this.fsLock.readLock().lock(); } - + @Override public void readUnlock() { this.fsLock.readLock().unlock(); } - + @Override public void writeLock() { this.fsLock.writeLock().lock(); } - + @Override public void writeUnlock() { this.fsLock.writeLock().unlock(); } - + @Override public boolean hasWriteLock() { return this.fsLock.isWriteLockedByCurrentThread(); } - - boolean hasReadLock() { + @Override + public boolean hasReadLock() { return this.fsLock.getReadHoldCount() > 0; } - + @Override public boolean hasReadOrWriteLock() { return hasReadLock() || hasWriteLock(); } @@ -473,7 +469,8 @@ public class FSNamesystem implements FSC try { return new NamespaceInfo(dir.fsImage.getStorage().getNamespaceID(), getClusterId(), getBlockPoolId(), - dir.fsImage.getStorage().getCTime(), getDistributedUpgradeVersion()); + dir.fsImage.getStorage().getCTime(), + upgradeManager.getUpgradeVersion()); } finally { readUnlock(); } @@ -484,7 +481,7 @@ public class FSNamesystem implements FSC * Causes heartbeat and lease daemons to stop; waits briefly for * them to finish, but a short timeout returns control back to caller. */ - public void close() { + void close() { fsRunning = false; try { if (blockManager != null) blockManager.close(); @@ -564,30 +561,6 @@ public class FSNamesystem implements FSC ///////////////////////////////////////////////////////// // - // These methods are called by secondary namenodes - // - ///////////////////////////////////////////////////////// - /** - * return a list of blocks & their locations on datanode whose - * total size is size - * - * @param datanode on which blocks are located - * @param size total size of blocks - */ - BlocksWithLocations getBlocks(DatanodeID datanode, long size) - throws IOException { - readLock(); - try { - checkSuperuserPrivilege(); - return blockManager.getBlocksWithLocations(datanode, size); - } finally { - readUnlock(); - } - } - - - ///////////////////////////////////////////////////////// - // // These methods are called by HadoopFS clients // ///////////////////////////////////////////////////////// @@ -765,7 +738,7 @@ public class FSNamesystem implements FSC * @param srcs * @throws IOException */ - public void concat(String target, String [] srcs) + void concat(String target, String [] srcs) throws IOException, UnresolvedLinkException { if(FSNamesystem.LOG.isDebugEnabled()) { FSNamesystem.LOG.debug("concat " + Arrays.toString(srcs) + @@ -813,7 +786,7 @@ public class FSNamesystem implements FSC } /** See {@link #concat(String, String[])} */ - public void concatInternal(String target, String [] srcs) + private void concatInternal(String target, String [] srcs) throws IOException, UnresolvedLinkException { assert hasWriteLock(); @@ -1429,7 +1402,7 @@ public class FSNamesystem implements FSC * are replicated. Will return an empty 2-elt array if we want the * client to "try again later". */ - public LocatedBlock getAdditionalBlock(String src, + LocatedBlock getAdditionalBlock(String src, String clientName, ExtendedBlock previous, HashMap excludedNodes @@ -1632,7 +1605,7 @@ public class FSNamesystem implements FSC * (e.g if not all blocks have reached minimum replication yet) * @throws IOException on error (eg lease mismatch, file not open, file deleted) */ - public boolean completeFile(String src, String holder, ExtendedBlock last) + boolean completeFile(String src, String holder, ExtendedBlock last) throws SafeModeException, UnresolvedLinkException, IOException { checkBlock(last); boolean success = false; @@ -2258,7 +2231,7 @@ public class FSNamesystem implements FSC return false; } - Lease reassignLease(Lease lease, String src, String newHolder, + private Lease reassignLease(Lease lease, String src, String newHolder, INodeFileUnderConstruction pendingFile) throws IOException { assert hasWriteLock(); if(newHolder == null) @@ -2274,6 +2247,22 @@ public class FSNamesystem implements FSC return leaseManager.reassignLease(lease, src, newHolder); } + /** Update disk space consumed. */ + public void updateDiskSpaceConsumed(final INodeFileUnderConstruction fileINode, + final Block commitBlock) throws IOException { + assert hasWriteLock(); + + // Adjust disk space consumption if required + final long diff = fileINode.getPreferredBlockSize() - commitBlock.getNumBytes(); + if (diff > 0) { + try { + String path = leaseManager.findPath(fileINode); + dir.updateSpaceConsumed(path, 0, -diff * fileINode.getReplication()); + } catch (IOException e) { + LOG.warn("Unexpected exception while updating disk space.", e); + } + } + } private void finalizeINodeFileUnderConstruction(String src, INodeFileUnderConstruction pendingFile) @@ -2473,8 +2462,7 @@ public class FSNamesystem implements FSC * * @see org.apache.hadoop.hdfs.server.datanode.DataNode */ - public void registerDatanode(DatanodeRegistration nodeReg) - throws IOException { + void registerDatanode(DatanodeRegistration nodeReg) throws IOException { writeLock(); try { getBlockManager().getDatanodeManager().registerDatanode(nodeReg); @@ -2505,7 +2493,7 @@ public class FSNamesystem implements FSC * @return an array of datanode commands * @throws IOException */ - public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, + DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, long capacity, long dfsUsed, long remaining, long blockPoolUsed, int xceiverCount, int xmitsInProgress, int failedVolumes) throws IOException { @@ -2521,7 +2509,7 @@ public class FSNamesystem implements FSC } //check distributed upgrade - DatanodeCommand cmd = getDistributedUpgradeCommand(); + DatanodeCommand cmd = upgradeManager.getBroadcastCommand(); if (cmd != null) { return new DatanodeCommand[] {cmd}; } @@ -2737,30 +2725,10 @@ public class FSNamesystem implements FSC } } - public Date getStartTime() { + Date getStartTime() { return new Date(systemStart); } - /** - * Rereads the config to get hosts and exclude list file names. - * Rereads the files to update the hosts and exclude lists. It - * checks if any of the hosts have changed states: - * 1. Added to hosts --> no further work needed here. - * 2. Removed from hosts --> mark AdminState as decommissioned. - * 3. Added to exclude --> start decommission. - * 4. Removed from exclude --> stop decommission. - */ - public void refreshNodes(Configuration conf) throws IOException { - checkSuperuserPrivilege(); - getBlockManager().getDatanodeManager().refreshHostsReader(conf); - writeLock(); - try { - getBlockManager().getDatanodeManager().refreshDatanodes(); - } finally { - writeUnlock(); - } - } - void finalizeUpgrade() throws IOException { checkSuperuserPrivilege(); getFSImage().finalizeUpgrade(); @@ -2908,7 +2876,7 @@ public class FSNamesystem implements FSC // verify whether a distributed upgrade needs to be started boolean needUpgrade = false; try { - needUpgrade = startDistributedUpgradeIfNeeded(); + needUpgrade = upgradeManager.startUpgrade(); } catch(IOException e) { FSNamesystem.LOG.error("IOException in startDistributedUpgradeIfNeeded", e); } @@ -3101,10 +3069,10 @@ public class FSNamesystem implements FSC leaveMsg = "Safe mode will be turned off automatically"; } if(isManual()) { - if(getDistributedUpgradeState()) + if(upgradeManager.getUpgradeState()) return leaveMsg + " upon completion of " + "the distributed upgrade: upgrade progress = " + - getDistributedUpgradeStatus() + "%"; + upgradeManager.getUpgradeStatus() + "%"; leaveMsg = "Use \"hdfs dfsadmin -safemode leave\" to turn safe mode off"; } @@ -3306,7 +3274,7 @@ public class FSNamesystem implements FSC /** * Set the total number of blocks in the system. */ - void setBlockTotal() { + private void setBlockTotal() { // safeMode is volatile, and may be set to null at any time SafeModeInfo safeMode = this.safeMode; if (safeMode == null) @@ -3327,7 +3295,7 @@ public class FSNamesystem implements FSC * Get the total number of COMPLETE blocks in the system. * For safe mode only complete blocks are counted. */ - long getCompleteBlocksTotal() { + private long getCompleteBlocksTotal() { // Calculate number of blocks under construction long numUCBlocks = 0; readLock(); @@ -3398,7 +3366,7 @@ public class FSNamesystem implements FSC NameNode.stateChangeLog.info("STATE* Safe mode is already OFF."); return; } - if(getDistributedUpgradeState()) + if(upgradeManager.getUpgradeState()) throw new SafeModeException("Distributed upgrade is in progress", safeMode); safeMode.leave(checkForUpgrades); @@ -3487,26 +3455,6 @@ public class FSNamesystem implements FSC return upgradeManager.processUpgradeCommand(comm); } - int getDistributedUpgradeVersion() { - return upgradeManager.getUpgradeVersion(); - } - - UpgradeCommand getDistributedUpgradeCommand() throws IOException { - return upgradeManager.getBroadcastCommand(); - } - - boolean getDistributedUpgradeState() { - return upgradeManager.getUpgradeState(); - } - - short getDistributedUpgradeStatus() { - return upgradeManager.getUpgradeStatus(); - } - - boolean startDistributedUpgradeIfNeeded() throws IOException { - return upgradeManager.startUpgrade(); - } - PermissionStatus createFsOwnerPermissions(FsPermission permission) { return new PermissionStatus(fsOwner.getShortUserName(), supergroup, permission); } @@ -3536,7 +3484,8 @@ public class FSNamesystem implements FSC return checkPermission(path, false, null, null, null, null); } - private void checkSuperuserPrivilege() throws AccessControlException { + /** Check if the user has superuser privilege. */ + public void checkSuperuserPrivilege() throws AccessControlException { if (isPermissionEnabled) { FSPermissionChecker.checkSuperuserPrivilege(fsOwner, supergroup); } @@ -3644,7 +3593,7 @@ public class FSNamesystem implements FSC * Register the FSNamesystem MBean using the name * "hadoop:service=NameNode,name=FSNamesystemState" */ - void registerMBean() { + private void registerMBean() { // We can only implement one MXBean interface, so we keep the old one. try { StandardMBean bean = new StandardMBean(this, FSNamesystemMBean.class); @@ -3805,7 +3754,7 @@ public class FSNamesystem implements FSC } /** @see updatePipeline(String, ExtendedBlock, ExtendedBlock, DatanodeID[]) */ - void updatePipelineInternal(String clientName, ExtendedBlock oldBlock, + private void updatePipelineInternal(String clientName, ExtendedBlock oldBlock, ExtendedBlock newBlock, DatanodeID[] newNodes) throws IOException { assert hasWriteLock(); @@ -4043,7 +3992,7 @@ public class FSNamesystem implements FSC * Returns the DelegationTokenSecretManager instance in the namesystem. * @return delegation token secret manager object */ - public DelegationTokenSecretManager getDelegationTokenSecretManager() { + DelegationTokenSecretManager getDelegationTokenSecretManager() { return dtSecretManager; } @@ -4096,7 +4045,7 @@ public class FSNamesystem implements FSC * @throws InvalidToken * @throws IOException */ - public long renewDelegationToken(Token token) + long renewDelegationToken(Token token) throws InvalidToken, IOException { long expiryTime; writeLock(); @@ -4127,7 +4076,7 @@ public class FSNamesystem implements FSC * @param token * @throws IOException */ - public void cancelDelegationToken(Token token) + void cancelDelegationToken(Token token) throws IOException { writeLock(); try { Modified: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original) +++ hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Wed Aug 10 01:50:51 2011 @@ -626,7 +626,7 @@ public class NameNode implements Namenod "Unexpected not positive size: "+size); } - return namesystem.getBlocks(datanode, size); + return namesystem.getBlockManager().getBlocks(datanode, size); } @Override // NamenodeProtocol @@ -1039,7 +1039,8 @@ public class NameNode implements Namenod @Override // ClientProtocol public void refreshNodes() throws IOException { - namesystem.refreshNodes(new HdfsConfiguration()); + namesystem.getBlockManager().getDatanodeManager().refreshNodes( + new HdfsConfiguration()); } @Override // NamenodeProtocol Added: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/util/RwLock.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/util/RwLock.java?rev=1155998&view=auto ============================================================================== --- hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/util/RwLock.java (added) +++ hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/util/RwLock.java Wed Aug 10 01:50:51 2011 @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.util; + +/** Read-write lock interface. */ +public interface RwLock { + /** Acquire read lock. */ + public void readLock(); + + /** Release read lock. */ + public void readUnlock(); + + /** Check if the current thread holds read lock. */ + public boolean hasReadLock(); + + /** Acquire write lock. */ + public void writeLock(); + + /** Release write lock. */ + public void writeUnlock(); + + /** Check if the current thread holds write lock. */ + public boolean hasWriteLock(); + + /** Check if the current thread holds read or write lock. */ + public boolean hasReadOrWriteLock(); +} Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/fs/TestResolveHdfsSymlink.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/fs/TestResolveHdfsSymlink.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/fs/TestResolveHdfsSymlink.java (original) +++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/fs/TestResolveHdfsSymlink.java Wed Aug 10 01:50:51 2011 @@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.DFSTestUti import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; @@ -47,8 +48,8 @@ public class TestResolveHdfsSymlink { public static void setUp() throws IOException { Configuration conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).build(); - cluster.getNamesystem().getDelegationTokenSecretManager().startThreads(); cluster.waitActive(); + NameNodeAdapter.getDtSecretManager(cluster.getNamesystem()).startThreads(); } @AfterClass Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java (original) +++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java Wed Aug 10 01:50:51 2011 @@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configurat import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -52,7 +53,7 @@ public class TestViewFileSystemHdfs exte SupportsBlocks = true; cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build(); cluster.waitClusterUp(); - cluster.getNamesystem().getDelegationTokenSecretManager().startThreads(); + NameNodeAdapter.getDtSecretManager(cluster.getNamesystem()).startThreads(); fHdfs = cluster.getFileSystem(); defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java (original) +++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java Wed Aug 10 01:50:51 2011 @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -52,7 +53,7 @@ public class TestViewFsHdfs extends View SupportsBlocks = true; cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build(); cluster.waitClusterUp(); - cluster.getNamesystem().getDelegationTokenSecretManager().startThreads(); + NameNodeAdapter.getDtSecretManager(cluster.getNamesystem()).startThreads(); fc = FileContext.getFileContext(cluster.getURI(0), CONF); defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original) +++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Wed Aug 10 01:50:51 2011 @@ -48,13 +48,13 @@ import org.apache.hadoop.hdfs.protocol.C import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataStorage; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; @@ -71,7 +71,6 @@ import org.apache.hadoop.security.Refres import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; -import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.tools.GetUserMappingsProtocol; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; @@ -1659,9 +1658,7 @@ public class MiniDFSCluster { * Set the softLimit and hardLimit of client lease periods */ public void setLeasePeriod(long soft, long hard) { - final FSNamesystem namesystem = getNamesystem(); - namesystem.leaseManager.setLeasePeriod(soft, hard); - namesystem.lmthread.interrupt(); + NameNodeAdapter.setLeasePeriod(getNamesystem(), soft, hard); } /** Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRemove.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRemove.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRemove.java (original) +++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRemove.java Wed Aug 10 01:50:51 2011 @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs; -import java.io.*; +import java.io.DataOutputStream; +import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -27,10 +28,6 @@ import org.apache.hadoop.hdfs.protocol.F import org.apache.hadoop.hdfs.server.datanode.DataNode; public class TestDFSRemove extends junit.framework.TestCase { - static int countLease(MiniDFSCluster cluster) { - return cluster.getNamesystem().leaseManager.countLease(); - } - final Path dir = new Path("/test/remove/"); void list(FileSystem fs, String name) throws IOException { Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java (original) +++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java Wed Aug 10 01:50:51 2011 @@ -17,16 +17,18 @@ */ package org.apache.hadoop.hdfs; -import java.io.*; +import java.io.DataOutputStream; +import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; public class TestDFSRename extends junit.framework.TestCase { static int countLease(MiniDFSCluster cluster) { - return cluster.getNamesystem().leaseManager.countLease(); + return NameNodeAdapter.getLeaseManager(cluster.getNamesystem()).countLease(); } final Path dir = new Path("/test/rename/"); Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java (original) +++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java Wed Aug 10 01:50:51 2011 @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -30,14 +34,12 @@ import org.apache.hadoop.fs.FSDataOutput import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; -import static org.junit.Assert.*; - import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -224,7 +226,7 @@ public class TestDecommission { } nodes.add(nodename); writeConfigFile(excludeFile, nodes); - cluster.getNamesystem(nnIndex).refreshNodes(conf); + refreshNodes(cluster.getNamesystem(nnIndex), conf); DatanodeInfo ret = NameNodeAdapter.getDatanode( cluster.getNamesystem(nnIndex), info[index]); waitNodeState(ret, waitForState); @@ -235,7 +237,7 @@ public class TestDecommission { private void recomissionNode(DatanodeInfo decommissionedNode) throws IOException { LOG.info("Recommissioning node: " + decommissionedNode.getName()); writeConfigFile(excludeFile, null); - cluster.getNamesystem().refreshNodes(conf); + refreshNodes(cluster.getNamesystem(), conf); waitNodeState(decommissionedNode, AdminStates.NORMAL); } @@ -284,6 +286,11 @@ public class TestDecommission { validateCluster(client, numDatanodes); } } + + static void refreshNodes(final FSNamesystem ns, final Configuration conf + ) throws IOException { + ns.getBlockManager().getDatanodeManager().refreshNodes(conf); + } private void verifyStats(NameNode namenode, FSNamesystem fsn, DatanodeInfo node, boolean decommissioning) throws InterruptedException { @@ -465,7 +472,7 @@ public class TestDecommission { // Stop decommissioning and verify stats writeConfigFile(excludeFile, null); - fsn.refreshNodes(conf); + refreshNodes(fsn, conf); DatanodeInfo ret = NameNodeAdapter.getDatanode(fsn, downnode); waitNodeState(ret, AdminStates.NORMAL); verifyStats(namenode, fsn, ret, false); @@ -509,7 +516,7 @@ public class TestDecommission { writeConfigFile(hostsFile, list); for (int j = 0; j < numNameNodes; j++) { - cluster.getNamesystem(j).refreshNodes(conf); + refreshNodes(cluster.getNamesystem(j), conf); DFSClient client = getDfsClient(cluster.getNameNode(j), conf); DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE); Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLease.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLease.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLease.java (original) +++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLease.java Wed Aug 10 01:50:51 2011 @@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configurat import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.security.UserGroupInformation; import org.junit.Assert; import org.junit.Test; @@ -31,7 +32,8 @@ import org.mockito.Mockito; public class TestLease { static boolean hasLease(MiniDFSCluster cluster, Path src) { - return cluster.getNamesystem().leaseManager.getLeaseByPath(src.toString()) != null; + return NameNodeAdapter.getLeaseManager(cluster.getNamesystem() + ).getLeaseByPath(src.toString()) != null; } final Path dir = new Path("/test/lease/"); Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java (original) +++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java Wed Aug 10 01:50:51 2011 @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.L import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.TestInterDatanodeProtocol; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; public class TestLeaseRecovery extends junit.framework.TestCase { static final int BLOCK_SIZE = 1024; @@ -133,7 +134,7 @@ public class TestLeaseRecovery extends j DFSTestUtil.waitReplication(dfs, filepath, (short)1); waitLeaseRecovery(cluster); // verify that we still cannot recover the lease - LeaseManager lm = cluster.getNamesystem().leaseManager; + LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem()); assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1); cluster.getNameNode().setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE); } Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationToken.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationToken.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationToken.java (original) +++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationToken.java Wed Aug 10 01:50:51 2011 @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.HdfsConfig import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -48,7 +49,8 @@ import org.junit.Test; public class TestDelegationToken { private MiniDFSCluster cluster; - Configuration config; + private DelegationTokenSecretManager dtSecretManager; + private Configuration config; private static final Log LOG = LogFactory.getLog(TestDelegationToken.class); @Before @@ -61,7 +63,9 @@ public class TestDelegationToken { FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0"); cluster = new MiniDFSCluster.Builder(config).build(); cluster.waitActive(); - cluster.getNamesystem().getDelegationTokenSecretManager().startThreads(); + dtSecretManager = NameNodeAdapter.getDtSecretManager( + cluster.getNamesystem()); + dtSecretManager.startThreads(); } @After @@ -73,8 +77,6 @@ public class TestDelegationToken { private Token generateDelegationToken( String owner, String renewer) { - DelegationTokenSecretManager dtSecretManager = cluster.getNamesystem() - .getDelegationTokenSecretManager(); DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text( owner), new Text(renewer), null); return new Token(dtId, dtSecretManager); @@ -82,8 +84,6 @@ public class TestDelegationToken { @Test public void testDelegationTokenSecretManager() throws Exception { - DelegationTokenSecretManager dtSecretManager = cluster.getNamesystem() - .getDelegationTokenSecretManager(); Token token = generateDelegationToken( "SomeUser", "JobTracker"); // Fake renewer should not be able to renew @@ -122,8 +122,6 @@ public class TestDelegationToken { @Test public void testCancelDelegationToken() throws Exception { - DelegationTokenSecretManager dtSecretManager = cluster.getNamesystem() - .getDelegationTokenSecretManager(); Token token = generateDelegationToken( "SomeUser", "JobTracker"); //Fake renewer should not be able to renew @@ -144,7 +142,6 @@ public class TestDelegationToken { @Test public void testDelegationTokenDFSApi() throws Exception { - DelegationTokenSecretManager dtSecretManager = cluster.getNamesystem().getDelegationTokenSecretManager(); DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem(); Token token = dfs.getDelegationToken("JobTracker"); DelegationTokenIdentifier identifier = new DelegationTokenIdentifier(); Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java (original) +++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java Wed Aug 10 01:50:51 2011 @@ -31,19 +31,20 @@ import java.util.Enumeration; import junit.framework.Assert; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.io.Text; -import org.apache.commons.logging.*; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.security.TestDoAsEffectiveUser; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -98,7 +99,7 @@ public class TestDelegationTokenForProxy FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0"); cluster = new MiniDFSCluster.Builder(config).build(); cluster.waitActive(); - cluster.getNamesystem().getDelegationTokenSecretManager().startThreads(); + NameNodeAdapter.getDtSecretManager(cluster.getNamesystem()).startThreads(); ProxyUsers.refreshSuperUserGroupsConfiguration(config); } Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java (original) +++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java Wed Aug 10 01:50:51 2011 @@ -413,7 +413,7 @@ public class TestBlocksWithNotEnoughRack fs.getFileStatus(filePath), 0, Long.MAX_VALUE); String name = locs[0].getNames()[0]; DFSTestUtil.writeFile(localFileSys, excludeFile, name); - ns.refreshNodes(conf); + ns.getBlockManager().getDatanodeManager().refreshNodes(conf); DFSTestUtil.waitForDecommission(fs, name); // Check the block still has sufficient # replicas across racks @@ -468,7 +468,7 @@ public class TestBlocksWithNotEnoughRack if (!top.startsWith("/rack2")) { String name = top.substring("/rack1".length()+1); DFSTestUtil.writeFile(localFileSys, excludeFile, name); - ns.refreshNodes(conf); + ns.getBlockManager().getDatanodeManager().refreshNodes(conf); DFSTestUtil.waitForDecommission(fs, name); break; } Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java (original) +++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java Wed Aug 10 01:50:51 2011 @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import java.io.IOException; import java.util.ArrayList; import junit.framework.TestCase; @@ -76,7 +75,7 @@ public class TestHeartbeatHandling exten dd.addBlockToBeReplicated( new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP), ONE_TARGET); } - DatanodeCommand[]cmds = sendHeartBeat(nodeReg, dd, namesystem); + DatanodeCommand[]cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem); assertEquals(1, cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction()); assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length); @@ -86,26 +85,26 @@ public class TestHeartbeatHandling exten blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP)); } dd.addBlocksToBeInvalidated(blockList); - cmds = sendHeartBeat(nodeReg, dd, namesystem); + cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem); assertEquals(2, cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction()); assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length); assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction()); assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length); - cmds = sendHeartBeat(nodeReg, dd, namesystem); + cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem); assertEquals(2, cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction()); assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length); assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction()); assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length); - cmds = sendHeartBeat(nodeReg, dd, namesystem); + cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem); assertEquals(1, cmds.length); assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction()); assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length); - cmds = sendHeartBeat(nodeReg, dd, namesystem); + cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem); assertEquals(null, cmds); } } finally { @@ -115,10 +114,4 @@ public class TestHeartbeatHandling exten cluster.shutdown(); } } - - private static DatanodeCommand[] sendHeartBeat(DatanodeRegistration nodeReg, - DatanodeDescriptor dd, FSNamesystem namesystem) throws IOException { - return namesystem.handleHeartbeat(nodeReg, dd.getCapacity(), - dd.getDfsUsed(), dd.getRemaining(), dd.getBlockPoolUsed(), 0, 0, 0); - } } Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original) +++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Wed Aug 10 01:50:51 2011 @@ -21,7 +21,10 @@ import java.io.IOException; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.ipc.Server; /** @@ -52,11 +55,32 @@ public class NameNodeAdapter { return namenode.server; } + public static DelegationTokenSecretManager getDtSecretManager( + final FSNamesystem ns) { + return ns.getDelegationTokenSecretManager(); + } + + public static DatanodeCommand[] sendHeartBeat(DatanodeRegistration nodeReg, + DatanodeDescriptor dd, FSNamesystem namesystem) throws IOException { + return namesystem.handleHeartbeat(nodeReg, dd.getCapacity(), + dd.getDfsUsed(), dd.getRemaining(), dd.getBlockPoolUsed(), 0, 0, 0); + } + public static boolean setReplication(final FSNamesystem ns, final String src, final short replication) throws IOException { return ns.setReplication(src, replication); } + public static LeaseManager getLeaseManager(final FSNamesystem ns) { + return ns.leaseManager; + } + + /** Set the softLimit and hardLimit of client lease periods. */ + public static void setLeasePeriod(final FSNamesystem namesystem, long soft, long hard) { + getLeaseManager(namesystem).setLeasePeriod(soft, hard); + namesystem.lmthread.interrupt(); + } + public static String getLeaseHolderForPath(NameNode namenode, String path) { return namenode.getNamesystem().leaseManager.getLeaseByPath(path).getHolder(); } Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java?rev=1155998&r1=1155997&r2=1155998&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java (original) +++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java Wed Aug 10 01:50:51 2011 @@ -148,7 +148,7 @@ public class TestDecommissioningStatus { /* * Decommissions the node at the given index */ - private String decommissionNode(FSNamesystem namesystem, Configuration conf, + private String decommissionNode(FSNamesystem namesystem, DFSClient client, FileSystem localFileSys, int nodeIndex) throws IOException { DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE); @@ -160,7 +160,6 @@ public class TestDecommissioningStatus { ArrayList nodes = new ArrayList(decommissionedNodes); nodes.add(nodename); writeConfigFile(localFileSys, excludeFile, nodes); - namesystem.refreshNodes(conf); return nodename; } @@ -203,8 +202,8 @@ public class TestDecommissioningStatus { FSNamesystem fsn = cluster.getNamesystem(); final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager(); for (int iteration = 0; iteration < numDatanodes; iteration++) { - String downnode = decommissionNode(fsn, conf, client, localFileSys, - iteration); + String downnode = decommissionNode(fsn, client, localFileSys, iteration); + dm.refreshNodes(conf); decommissionedNodes.add(downnode); Thread.sleep(5000); final List decommissioningNodes = dm.getDecommissioningNodes(); @@ -224,7 +223,7 @@ public class TestDecommissioningStatus { // This will remove the datanodes from decommissioning list and // make them available again. writeConfigFile(localFileSys, excludeFile, null); - fsn.refreshNodes(conf); + dm.refreshNodes(conf); st1.close(); cleanupFile(fileSys, file1); cleanupFile(fileSys, file2);