Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 19B27D57C for ; Fri, 11 Jan 2013 00:10:02 +0000 (UTC) Received: (qmail 34894 invoked by uid 500); 11 Jan 2013 00:10:01 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 34855 invoked by uid 500); 11 Jan 2013 00:10:01 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 34843 invoked by uid 99); 11 Jan 2013 00:10:01 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 11 Jan 2013 00:10:01 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 11 Jan 2013 00:09:58 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id E196C23888FE; Fri, 11 Jan 2013 00:09:37 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1431753 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ src/main/java/org/apache/hadoop/hdfs/server/datanode/ Date: Fri, 11 Jan 2013 00:09:37 -0000 To: hdfs-commits@hadoop.apache.org From: eli@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20130111000937.E196C23888FE@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: eli Date: Fri Jan 11 00:09:37 2013 New Revision: 1431753 URL: http://svn.apache.org/viewvc?rev=1431753&view=rev Log: HDFS-4377. Some trivial DN comment cleanup. Contributed by Eli Collins Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1431753&r1=1431752&r2=1431753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Jan 11 00:09:37 2013 @@ -478,6 +478,8 @@ Release 2.0.3-alpha - Unreleased HDFS-4363. Combine PBHelper and HdfsProtoUtil and remove redundant methods. (suresh) + HDFS-4377. Some trivial DN comment cleanup. (eli) + OPTIMIZATIONS BUG FIXES Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1431753&r1=1431752&r2=1431753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Fri Jan 11 00:09:37 2013 @@ -171,20 +171,19 @@ public class BlockManager { */ private final Set postponedMisreplicatedBlocks = Sets.newHashSet(); - // - // Keeps a TreeSet for every named node. Each treeset contains - // a list of the blocks that are "extra" at that location. We'll - // eventually remove these extras. - // Mapping: StorageID -> TreeSet - // + /** + * Maps a StorageID to the set of blocks that are "extra" for this + * DataNode. We'll eventually remove these extras. + */ public final Map> excessReplicateMap = new TreeMap>(); - // - // Store set of Blocks that need to be replicated 1 or more times. - // We also store pending replication-orders. - // + /** + * Store set of Blocks that need to be replicated 1 or more times. + * We also store pending replication-orders. + */ public final UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks(); + @VisibleForTesting final PendingReplicationBlocks pendingReplications; Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1431753&r1=1431752&r2=1431753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri Jan 11 00:09:37 2013 @@ -970,29 +970,27 @@ public class DataNode extends Configured dnId.setStorageID(createNewStorageId(dnId.getXferPort())); } + /** + * @return a unique storage ID of form "DS-randInt-ipaddr-port-timestamp" + */ static String createNewStorageId(int port) { - /* Return - * "DS-randInt-ipaddr-currentTimeMillis" - * It is considered extermely rare for all these numbers to match - * on a different machine accidentally for the following - * a) SecureRandom(INT_MAX) is pretty much random (1 in 2 billion), and - * b) Good chance ip address would be different, and - * c) Even on the same machine, Datanode is designed to use different ports. - * d) Good chance that these are started at different times. - * For a confict to occur all the 4 above have to match!. - * The format of this string can be changed anytime in future without - * affecting its functionality. - */ + // It is unlikely that we will create a non-unique storage ID + // for the following reasons: + // a) SecureRandom is a cryptographically strong random number generator + // b) IP addresses will likely differ on different hosts + // c) DataNode xfer ports will differ on the same host + // d) StorageIDs will likely be generated at different times (in ms) + // A conflict requires that all four conditions are violated. + // NB: The format of this string can be changed in the future without + // requiring that old SotrageIDs be updated. String ip = "unknownIP"; try { ip = DNS.getDefaultIP("default"); } catch (UnknownHostException ignored) { - LOG.warn("Could not find ip address of \"default\" inteface."); + LOG.warn("Could not find an IP address for the \"default\" inteface."); } - int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE); - return "DS-" + rand + "-" + ip + "-" + port + "-" - + Time.now(); + return "DS-" + rand + "-" + ip + "-" + port + "-" + Time.now(); } /** Ensure the authentication method is kerberos */ Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1431753&r1=1431752&r2=1431753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Fri Jan 11 00:09:37 2013 @@ -62,7 +62,7 @@ import org.apache.hadoop.util.DiskChecke */ @InterfaceAudience.Private public class DataStorage extends Storage { - // Constants + public final static String BLOCK_SUBDIR_PREFIX = "subdir"; final static String BLOCK_FILE_PREFIX = "blk_"; final static String COPY_FILE_PREFIX = "dncp_"; @@ -71,13 +71,13 @@ public class DataStorage extends Storage public final static String STORAGE_DIR_FINALIZED = "finalized"; public final static String STORAGE_DIR_TMP = "tmp"; - /** Access to this variable is guarded by "this" */ + /** Unique storage ID. {@see DataNode#createNewStorageId(int)} for details */ private String storageID; - // flag to ensure initialzing storage occurs only once - private boolean initilized = false; + // Flag to ensure we only initialize storage once + private boolean initialized = false; - // BlockPoolStorage is map of + // Maps block pool IDs to block pool storage private Map bpStorageMap = Collections.synchronizedMap(new HashMap()); @@ -130,7 +130,7 @@ public class DataStorage extends Storage synchronized void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo, Collection dataDirs, StartupOption startOpt) throws IOException { - if (initilized) { + if (initialized) { // DN storage has been initialized, no need to do anything return; } @@ -200,7 +200,7 @@ public class DataStorage extends Storage this.writeAll(); // 4. mark DN storage is initilized - this.initilized = true; + this.initialized = true; } /**