hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From s..@apache.org
Subject svn commit: r801500 - in /hadoop/hdfs/trunk: CHANGES.txt src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Date Thu, 06 Aug 2009 02:39:29 GMT
Author: shv
Date: Thu Aug  6 02:39:28 2009
New Revision: 801500

URL: http://svn.apache.org/viewvc?rev=801500&view=rev
Log:
HDFS-529. Use BlockInfo instead of Block to avoid redundant block searches in BlockManager.
Contributed by Konstantin Shvachko.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=801500&r1=801499&r2=801500&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Thu Aug  6 02:39:28 2009
@@ -83,7 +83,11 @@
 
     HDFS-527. Remove/deprecate unnecessary DFSClient constructors.  (szetszwo)
 
+    HDFS-529. Use BlockInfo instead of Block to avoid redundant block searches
+    in BlockManager. (shv)
+
   BUG FIXES
+
     HDFS-76. Better error message to users when commands fail because of 
     lack of quota. Allow quota to be set even if the limit is lower than
     current consumption. (Boris Shkolnik via rangadi)

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=801500&r1=801499&r2=801500&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java Thu
Aug  6 02:39:28 2009
@@ -893,11 +893,11 @@
             } else {
               // new replica is larger in size than existing block.
               // Mark pre-existing replicas as corrupt.
-              int numNodes = blocksMap.numNodes(block);
+              int numNodes = storedBlock.numNodes();
               int count = 0;
               DatanodeDescriptor nodes[] = new DatanodeDescriptor[numNodes];
-              Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block);
-              for (; it != null && it.hasNext(); ) {
+              Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(storedBlock);
+              while (it.hasNext()) {
                 DatanodeDescriptor dd = it.next();
                 if (!dd.equals(node)) {
                   nodes[count++] = dd;
@@ -1262,9 +1262,9 @@
     return blocksMap.size() - (int)pendingDeletionBlocksCount;
   }
 
-  DatanodeDescriptor[] getNodes(Block block) {
+  DatanodeDescriptor[] getNodes(BlockInfo block) {
     DatanodeDescriptor[] nodes =
-      new DatanodeDescriptor[blocksMap.numNodes(block)];
+      new DatanodeDescriptor[block.numNodes()];
     Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block);
     for (int i = 0; it != null && it.hasNext(); i++) {
       nodes[i] = it.next();

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java?rev=801500&r1=801499&r2=801500&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java Thu Aug
 6 02:39:28 2009
@@ -108,9 +108,20 @@
     return map.get(b);
   }
 
-  /** Returned Iterator does not support. */
+  /**
+   * Searches for the block in the BlocksMap and 
+   * returns Iterator that iterates through the nodes the block belongs to.
+   */
   Iterator<DatanodeDescriptor> nodeIterator(Block b) {
-    return new NodeIterator(map.get(b));
+    return nodeIterator(map.get(b));
+  }
+
+  /**
+   * For a block that has already been retrieved from the BlocksMap
+   * returns Iterator that iterates through the nodes the block belongs to.
+   */
+  Iterator<DatanodeDescriptor> nodeIterator(BlockInfo storedBlock) {
+    return new NodeIterator(storedBlock);
   }
 
   /** counts number of containing nodes. Better than using iterator. */

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=801500&r1=801499&r2=801500&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu
Aug  6 02:39:28 2009
@@ -1030,13 +1030,16 @@
     synchronized (this) {
       INodeFileUnderConstruction file = (INodeFileUnderConstruction)dir.getFileINode(src);
 
-      Block[] blocks = file.getBlocks();
+      BlockInfo[] blocks = file.getBlocks();
       if (blocks != null && blocks.length > 0) {
-        Block last = blocks[blocks.length-1];
+        BlockInfo last = blocks[blocks.length-1];
+        // this is a redundant search in blocksMap
+        // should be resolved by the new implementation of append
         BlockInfo storedBlock = blockManager.getStoredBlock(last);
+        assert last == storedBlock : "last block should be in the blocksMap";
         if (file.getPreferredBlockSize() > storedBlock.getNumBytes()) {
           long fileLength = file.computeContentSummary().getLength();
-          DatanodeDescriptor[] targets = blockManager.getNodes(last);
+          DatanodeDescriptor[] targets = blockManager.getNodes(storedBlock);
           // remove the replica locations of this block from the node
           for (int i = 0; i < targets.length; i++) {
             targets[i].removeBlock(storedBlock);
@@ -1578,8 +1581,8 @@
       }
       // setup the Inode.targets for the last block from the blockManager
       //
-      Block[] blocks = pendingFile.getBlocks();
-      Block last = blocks[blocks.length-1];
+      BlockInfo[] blocks = pendingFile.getBlocks();
+      BlockInfo last = blocks[blocks.length-1];
       DatanodeDescriptor[] targets = blockManager.getNodes(last);
       pendingFile.setTargets(targets);
     }



Mime
View raw message