hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From s..@apache.org
Subject svn commit: r798736 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/
Date Wed, 29 Jul 2009 00:35:25 GMT
Author: shv
Date: Wed Jul 29 00:35:24 2009
New Revision: 798736

URL: http://svn.apache.org/viewvc?rev=798736&view=rev
Log:
HDFS-508. Factor out BlockInfo from BlocksMap. Contributed by Konstantin Shvachko.

Added:
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfo.java   (with
props)
Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=798736&r1=798735&r2=798736&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Wed Jul 29 00:35:24 2009
@@ -48,10 +48,12 @@
     HDFS-493. Change build.xml so that the fault-injected tests are executed
     only by the run-test-*-faul-inject targets.  (Konstantin Boudnik via
     szetszwo)
-   
+
     HADOOP-6160. Fix releaseaudit target to run on specific directories.
     (gkesavan)
 
+    HDFS-508. Factor out BlockInfo from BlocksMap. (shv)
+
   BUG FIXES
     HDFS-76. Better error message to users when commands fail because of 
     lack of quota. Allow quota to be set even if the limit is lower than

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfo.java?rev=798736&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfo.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfo.java Wed Jul
29 00:35:24 2009
@@ -0,0 +1,276 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.hdfs.protocol.Block;
+
+/**
+ * Internal class for block metadata.
+ */
+class BlockInfo extends Block {
+  private INodeFile inode;
+
+  /**
+   * This array contains triplets of references.
+   * For each i-th datanode the block belongs to
+   * triplets[3*i] is the reference to the DatanodeDescriptor
+   * and triplets[3*i+1] and triplets[3*i+2] are references 
+   * to the previous and the next blocks, respectively, in the 
+   * list of blocks belonging to this data-node.
+   */
+  private Object[] triplets;
+
+  BlockInfo(Block blk, int replication) {
+    super(blk);
+    this.triplets = new Object[3*replication];
+    this.inode = null;
+  }
+
+  INodeFile getINode() {
+    return inode;
+  }
+
+  void setINode(INodeFile inode) {
+    this.inode = inode;
+  }
+
+  DatanodeDescriptor getDatanode(int index) {
+    assert this.triplets != null : "BlockInfo is not initialized";
+    assert index >= 0 && index*3 < triplets.length : "Index is out of bound";
+    DatanodeDescriptor node = (DatanodeDescriptor)triplets[index*3];
+    assert node == null || 
+        DatanodeDescriptor.class.getName().equals(node.getClass().getName()) : 
+              "DatanodeDescriptor is expected at " + index*3;
+    return node;
+  }
+
+  BlockInfo getPrevious(int index) {
+    assert this.triplets != null : "BlockInfo is not initialized";
+    assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
+    BlockInfo info = (BlockInfo)triplets[index*3+1];
+    assert info == null || 
+        BlockInfo.class.getName().equals(info.getClass().getName()) : 
+              "BlockInfo is expected at " + index*3;
+    return info;
+  }
+
+  BlockInfo getNext(int index) {
+    assert this.triplets != null : "BlockInfo is not initialized";
+    assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
+    BlockInfo info = (BlockInfo)triplets[index*3+2];
+    assert info == null || 
+        BlockInfo.class.getName().equals(info.getClass().getName()) : 
+              "BlockInfo is expected at " + index*3;
+    return info;
+  }
+
+  void setDatanode(int index, DatanodeDescriptor node) {
+    assert this.triplets != null : "BlockInfo is not initialized";
+    assert index >= 0 && index*3 < triplets.length : "Index is out of bound";
+    triplets[index*3] = node;
+  }
+
+  void setPrevious(int index, BlockInfo to) {
+    assert this.triplets != null : "BlockInfo is not initialized";
+    assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
+    triplets[index*3+1] = to;
+  }
+
+  void setNext(int index, BlockInfo to) {
+    assert this.triplets != null : "BlockInfo is not initialized";
+    assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
+    triplets[index*3+2] = to;
+  }
+
+  int getCapacity() {
+    assert this.triplets != null : "BlockInfo is not initialized";
+    assert triplets.length % 3 == 0 : "Malformed BlockInfo";
+    return triplets.length / 3;
+  }
+
+  /**
+   * Ensure that there is enough  space to include num more triplets.
+   * @return first free triplet index.
+   */
+  private int ensureCapacity(int num) {
+    assert this.triplets != null : "BlockInfo is not initialized";
+    int last = numNodes();
+    if(triplets.length >= (last+num)*3)
+      return last;
+    /* Not enough space left. Create a new array. Should normally 
+     * happen only when replication is manually increased by the user. */
+    Object[] old = triplets;
+    triplets = new Object[(last+num)*3];
+    for(int i=0; i < last*3; i++) {
+      triplets[i] = old[i];
+    }
+    return last;
+  }
+
+  /**
+   * Count the number of data-nodes the block belongs to.
+   */
+  int numNodes() {
+    assert this.triplets != null : "BlockInfo is not initialized";
+    assert triplets.length % 3 == 0 : "Malformed BlockInfo";
+    for(int idx = getCapacity()-1; idx >= 0; idx--) {
+      if(getDatanode(idx) != null)
+        return idx+1;
+    }
+    return 0;
+  }
+
+  /**
+   * Add data-node this block belongs to.
+   */
+  boolean addNode(DatanodeDescriptor node) {
+    if(findDatanode(node) >= 0) // the node is already there
+      return false;
+    // find the last null node
+    int lastNode = ensureCapacity(1);
+    setDatanode(lastNode, node);
+    setNext(lastNode, null);
+    setPrevious(lastNode, null);
+    return true;
+  }
+
+  /**
+   * Remove data-node from the block.
+   */
+  boolean removeNode(DatanodeDescriptor node) {
+    int dnIndex = findDatanode(node);
+    if(dnIndex < 0) // the node is not found
+      return false;
+    assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : 
+      "Block is still in the list and must be removed first.";
+    // find the last not null node
+    int lastNode = numNodes()-1; 
+    // replace current node triplet by the lastNode one 
+    setDatanode(dnIndex, getDatanode(lastNode));
+    setNext(dnIndex, getNext(lastNode)); 
+    setPrevious(dnIndex, getPrevious(lastNode)); 
+    // set the last triplet to null
+    setDatanode(lastNode, null);
+    setNext(lastNode, null); 
+    setPrevious(lastNode, null); 
+    return true;
+  }
+
+  /**
+   * Find specified DatanodeDescriptor.
+   * @param dn
+   * @return index or -1 if not found.
+   */
+  int findDatanode(DatanodeDescriptor dn) {
+    int len = getCapacity();
+    for(int idx = 0; idx < len; idx++) {
+      DatanodeDescriptor cur = getDatanode(idx);
+      if(cur == dn)
+        return idx;
+      if(cur == null)
+        break;
+    }
+    return -1;
+  }
+
+  /**
+   * Insert this block into the head of the list of blocks 
+   * related to the specified DatanodeDescriptor.
+   * If the head is null then form a new list.
+   * @return current block as the new head of the list.
+   */
+  BlockInfo listInsert(BlockInfo head, DatanodeDescriptor dn) {
+    int dnIndex = this.findDatanode(dn);
+    assert dnIndex >= 0 : "Data node is not found: current";
+    assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : 
+            "Block is already in the list and cannot be inserted.";
+    this.setPrevious(dnIndex, null);
+    this.setNext(dnIndex, head);
+    if(head != null)
+      head.setPrevious(head.findDatanode(dn), this);
+    return this;
+  }
+
+  /**
+   * Remove this block from the list of blocks 
+   * related to the specified DatanodeDescriptor.
+   * If this block is the head of the list then return the next block as 
+   * the new head.
+   * @return the new head of the list or null if the list becomes
+   * empty after deletion.
+   */
+  BlockInfo listRemove(BlockInfo head, DatanodeDescriptor dn) {
+    if(head == null)
+      return null;
+    int dnIndex = this.findDatanode(dn);
+    if(dnIndex < 0) // this block is not on the data-node list
+      return head;
+
+    BlockInfo next = this.getNext(dnIndex);
+    BlockInfo prev = this.getPrevious(dnIndex);
+    this.setNext(dnIndex, null);
+    this.setPrevious(dnIndex, null);
+    if(prev != null)
+      prev.setNext(prev.findDatanode(dn), next);
+    if(next != null)
+      next.setPrevious(next.findDatanode(dn), prev);
+    if(this == head)  // removing the head
+      head = next;
+    return head;
+  }
+
+  int listCount(DatanodeDescriptor dn) {
+    int count = 0;
+    for(BlockInfo cur = this; cur != null;
+          cur = cur.getNext(cur.findDatanode(dn)))
+      count++;
+    return count;
+  }
+
+  boolean listIsConsistent(DatanodeDescriptor dn) {
+    // going forward
+    int count = 0;
+    BlockInfo next, nextPrev;
+    BlockInfo cur = this;
+    while(cur != null) {
+      next = cur.getNext(cur.findDatanode(dn));
+      if(next != null) {
+        nextPrev = next.getPrevious(next.findDatanode(dn));
+        if(cur != nextPrev) {
+          System.out.println("Inconsistent list: cur->next->prev != cur");
+          return false;
+        }
+      }
+      cur = next;
+      count++;
+    }
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    // Super implementation is sufficient
+    return super.hashCode();
+  }
+  
+  @Override
+  public boolean equals(Object obj) {
+    // Sufficient to rely on super's implementation
+    return (this == obj) || super.equals(obj);
+  }
+}
\ No newline at end of file

Propchange: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfo.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=798736&r1=798735&r2=798736&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java Wed
Jul 29 00:35:24 2009
@@ -37,7 +37,6 @@
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas;
 import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator;
 import org.apache.hadoop.security.AccessTokenHandler;
@@ -1051,7 +1050,7 @@
     long nrInvalid = 0, nrOverReplicated = 0, nrUnderReplicated = 0;
     synchronized (namesystem) {
       neededReplications.clear();
-      for (BlocksMap.BlockInfo block : blocksMap.getBlocks()) {
+      for (BlockInfo block : blocksMap.getBlocks()) {
         INodeFile fileINode = block.getINode();
         if (fileINode == null) {
           // block does not belong to any file
@@ -1415,13 +1414,13 @@
     blocksMap.removeBlock(block);
   }
   
-  public int getCapacity() {
+  int getCapacity() {
     synchronized(namesystem) {
       return blocksMap.getCapacity();
     }
   }
   
-  public float getLoadFactor() {
+  float getLoadFactor() {
     return blocksMap.getLoadFactor();
   }
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java?rev=798736&r1=798735&r2=798736&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java Wed Jul
29 00:35:24 2009
@@ -30,259 +30,6 @@
  * the datanodes that store the block.
  */
 class BlocksMap {
-        
-  /**
-   * Internal class for block metadata.
-   */
-  static class BlockInfo extends Block {
-    private INodeFile inode;
-
-    /**
-     * This array contains triplets of references.
-     * For each i-th datanode the block belongs to
-     * triplets[3*i] is the reference to the DatanodeDescriptor
-     * and triplets[3*i+1] and triplets[3*i+2] are references 
-     * to the previous and the next blocks, respectively, in the 
-     * list of blocks belonging to this data-node.
-     */
-    private Object[] triplets;
-
-    BlockInfo(Block blk, int replication) {
-      super(blk);
-      this.triplets = new Object[3*replication];
-      this.inode = null;
-    }
-
-    INodeFile getINode() {
-      return inode;
-    }
-
-    DatanodeDescriptor getDatanode(int index) {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      assert index >= 0 && index*3 < triplets.length : "Index is out of bound";
-      DatanodeDescriptor node = (DatanodeDescriptor)triplets[index*3];
-      assert node == null || 
-          DatanodeDescriptor.class.getName().equals(node.getClass().getName()) : 
-                "DatanodeDescriptor is expected at " + index*3;
-      return node;
-    }
-
-    BlockInfo getPrevious(int index) {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
-      BlockInfo info = (BlockInfo)triplets[index*3+1];
-      assert info == null || 
-          BlockInfo.class.getName().equals(info.getClass().getName()) : 
-                "BlockInfo is expected at " + index*3;
-      return info;
-    }
-
-    BlockInfo getNext(int index) {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
-      BlockInfo info = (BlockInfo)triplets[index*3+2];
-      assert info == null || 
-          BlockInfo.class.getName().equals(info.getClass().getName()) : 
-                "BlockInfo is expected at " + index*3;
-      return info;
-    }
-
-    void setDatanode(int index, DatanodeDescriptor node) {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      assert index >= 0 && index*3 < triplets.length : "Index is out of bound";
-      triplets[index*3] = node;
-    }
-
-    void setPrevious(int index, BlockInfo to) {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
-      triplets[index*3+1] = to;
-    }
-
-    void setNext(int index, BlockInfo to) {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
-      triplets[index*3+2] = to;
-    }
-
-    private int getCapacity() {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      assert triplets.length % 3 == 0 : "Malformed BlockInfo";
-      return triplets.length / 3;
-    }
-
-    /**
-     * Ensure that there is enough  space to include num more triplets.
-     *      * @return first free triplet index.
-     */
-    private int ensureCapacity(int num) {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      int last = numNodes();
-      if(triplets.length >= (last+num)*3)
-        return last;
-      /* Not enough space left. Create a new array. Should normally 
-       * happen only when replication is manually increased by the user. */
-      Object[] old = triplets;
-      triplets = new Object[(last+num)*3];
-      for(int i=0; i < last*3; i++) {
-        triplets[i] = old[i];
-      }
-      return last;
-    }
-
-    /**
-     * Count the number of data-nodes the block belongs to.
-     */
-    int numNodes() {
-      assert this.triplets != null : "BlockInfo is not initialized";
-      assert triplets.length % 3 == 0 : "Malformed BlockInfo";
-      for(int idx = getCapacity()-1; idx >= 0; idx--) {
-        if(getDatanode(idx) != null)
-          return idx+1;
-      }
-      return 0;
-    }
-
-    /**
-     * Add data-node this block belongs to.
-     */
-    boolean addNode(DatanodeDescriptor node) {
-      if(findDatanode(node) >= 0) // the node is already there
-        return false;
-      // find the last null node
-      int lastNode = ensureCapacity(1);
-      setDatanode(lastNode, node);
-      setNext(lastNode, null);
-      setPrevious(lastNode, null);
-      return true;
-    }
-
-    /**
-     * Remove data-node from the block.
-     */
-    boolean removeNode(DatanodeDescriptor node) {
-      int dnIndex = findDatanode(node);
-      if(dnIndex < 0) // the node is not found
-        return false;
-      assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : 
-        "Block is still in the list and must be removed first.";
-      // find the last not null node
-      int lastNode = numNodes()-1; 
-      // replace current node triplet by the lastNode one 
-      setDatanode(dnIndex, getDatanode(lastNode));
-      setNext(dnIndex, getNext(lastNode)); 
-      setPrevious(dnIndex, getPrevious(lastNode)); 
-      // set the last triplet to null
-      setDatanode(lastNode, null);
-      setNext(lastNode, null); 
-      setPrevious(lastNode, null); 
-      return true;
-    }
-
-    /**
-     * Find specified DatanodeDescriptor.
-     * @param dn
-     * @return index or -1 if not found.
-     */
-    int findDatanode(DatanodeDescriptor dn) {
-      int len = getCapacity();
-      for(int idx = 0; idx < len; idx++) {
-        DatanodeDescriptor cur = getDatanode(idx);
-        if(cur == dn)
-          return idx;
-        if(cur == null)
-          break;
-      }
-      return -1;
-    }
-
-    /**
-     * Insert this block into the head of the list of blocks 
-     * related to the specified DatanodeDescriptor.
-     * If the head is null then form a new list.
-     * @return current block as the new head of the list.
-     */
-    BlockInfo listInsert(BlockInfo head, DatanodeDescriptor dn) {
-      int dnIndex = this.findDatanode(dn);
-      assert dnIndex >= 0 : "Data node is not found: current";
-      assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : 
-              "Block is already in the list and cannot be inserted.";
-      this.setPrevious(dnIndex, null);
-      this.setNext(dnIndex, head);
-      if(head != null)
-        head.setPrevious(head.findDatanode(dn), this);
-      return this;
-    }
-
-    /**
-     * Remove this block from the list of blocks 
-     * related to the specified DatanodeDescriptor.
-     * If this block is the head of the list then return the next block as 
-     * the new head.
-     * @return the new head of the list or null if the list becomes
-     * empty after deletion.
-     */
-    BlockInfo listRemove(BlockInfo head, DatanodeDescriptor dn) {
-      if(head == null)
-        return null;
-      int dnIndex = this.findDatanode(dn);
-      if(dnIndex < 0) // this block is not on the data-node list
-        return head;
-
-      BlockInfo next = this.getNext(dnIndex);
-      BlockInfo prev = this.getPrevious(dnIndex);
-      this.setNext(dnIndex, null);
-      this.setPrevious(dnIndex, null);
-      if(prev != null)
-        prev.setNext(prev.findDatanode(dn), next);
-      if(next != null)
-        next.setPrevious(next.findDatanode(dn), prev);
-      if(this == head)  // removing the head
-        head = next;
-      return head;
-    }
-
-    int listCount(DatanodeDescriptor dn) {
-      int count = 0;
-      for(BlockInfo cur = this; cur != null;
-            cur = cur.getNext(cur.findDatanode(dn)))
-        count++;
-      return count;
-    }
-
-    boolean listIsConsistent(DatanodeDescriptor dn) {
-      // going forward
-      int count = 0;
-      BlockInfo next, nextPrev;
-      BlockInfo cur = this;
-      while(cur != null) {
-        next = cur.getNext(cur.findDatanode(dn));
-        if(next != null) {
-          nextPrev = next.getPrevious(next.findDatanode(dn));
-          if(cur != nextPrev) {
-            System.out.println("Inconsistent list: cur->next->prev != cur");
-            return false;
-          }
-        }
-        cur = next;
-        count++;
-      }
-      return true;
-    }
-
-    @Override
-    public int hashCode() {
-      // Super implementation is sufficient
-      return super.hashCode();
-    }
-    
-    @Override
-    public boolean equals(Object obj) {
-      // Sufficient to rely on super's implementation
-      return (this == obj) || super.equals(obj);
-    }
-  }
-
   private static class NodeIterator implements Iterator<DatanodeDescriptor> {
     private BlockInfo blockInfo;
     private int nextIdx = 0;
@@ -320,29 +67,22 @@
     this.map = new HashMap<BlockInfo, BlockInfo>(initialCapacity, loadFactor);
   }
 
-  /**
-   * Add BlockInfo if mapping does not exist.
-   */
-  private BlockInfo checkBlockInfo(Block b, int replication) {
-    BlockInfo info = map.get(b);
-    if (info == null) {
-      info = new BlockInfo(b, replication);
-      map.put(info, info);
-    }
-    return info;
-  }
-
   INodeFile getINode(Block b) {
     BlockInfo info = map.get(b);
-    return (info != null) ? info.inode : null;
+    return (info != null) ? info.getINode() : null;
   }
 
   /**
    * Add block b belonging to the specified file inode to the map.
    */
   BlockInfo addINode(Block b, INodeFile iNode) {
-    BlockInfo info = checkBlockInfo(b, iNode.getReplication());
-    info.inode = iNode;
+    int replication = iNode.getReplication();
+    BlockInfo info = map.get(b);
+    if (info == null) {
+      info = new BlockInfo(b, replication);
+      map.put(info, info);
+    }
+    info.setINode(iNode);
     return info;
   }
 
@@ -356,7 +96,7 @@
     if (blockInfo == null)
       return;
 
-    blockInfo.inode = null;
+    blockInfo.setINode(null);
     for(int idx = blockInfo.numNodes()-1; idx >= 0; idx--) {
       DatanodeDescriptor dn = blockInfo.getDatanode(idx);
       dn.removeBlock(blockInfo); // remove from the list and wipe the location
@@ -379,15 +119,6 @@
     return info == null ? 0 : info.numNodes();
   }
 
-  /** returns true if the node does not already exists and is added.
-   * false if the node already exists.*/
-  boolean addNode(Block b, DatanodeDescriptor node, int replication) {
-    // insert into the map if not there yet
-    BlockInfo info = checkBlockInfo(b, replication);
-    // add block to the data-node list and the node to the block info
-    return node.addBlock(info);
-  }
-
   /**
    * Remove data-node reference from the block.
    * Remove the block from the block map
@@ -402,7 +133,7 @@
     boolean removed = node.removeBlock(info);
 
     if (info.getDatanode(0) == null     // no datanodes left
-              && info.inode == null) {  // does not belong to a file
+              && info.getINode() == null) {  // does not belong to a file
       map.remove(b);  // remove block from the map
     }
     return removed;
@@ -437,7 +168,7 @@
   }
   
   /** Get the capacity of the HashMap that stores blocks */
-  public int getCapacity() {
+  int getCapacity() {
     // Capacity doubles every time the map size reaches the threshold
     while (map.size() > (int)(capacity * loadFactor)) {
       capacity <<= 1;
@@ -446,7 +177,7 @@
   }
   
   /** Get the load factor of the map */
-  public float getLoadFactor() {
+  float getLoadFactor() {
     return loadFactor;
   }
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java?rev=798736&r1=798735&r2=798736&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
(original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
Wed Jul 29 00:35:24 2009
@@ -25,7 +25,6 @@
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.io.Text;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=798736&r1=798735&r2=798736&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Wed
Jul 29 00:35:24 2009
@@ -33,7 +33,6 @@
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 
 /*************************************************
  * FSDirectory stores the filesystem directory state.

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=798736&r1=798735&r2=798736&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Wed Jul
29 00:35:24 2009
@@ -58,7 +58,6 @@
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 import org.apache.hadoop.hdfs.server.namenode.JournalStream.JournalType;
 import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=798736&r1=798735&r2=798736&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed
Jul 29 00:35:24 2009
@@ -26,7 +26,6 @@
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
-import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics;
 import org.apache.hadoop.security.AccessControlException;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=798736&r1=798735&r2=798736&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Wed Jul
29 00:35:24 2009
@@ -24,7 +24,6 @@
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 
 class INodeFile extends INode {
   static final FsPermission UMASK = FsPermission.createImmutable((short)0111);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=798736&r1=798735&r2=798736&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
(original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
Wed Jul 29 00:35:24 2009
@@ -21,7 +21,6 @@
 
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 
 
 class INodeFileUnderConstruction extends INodeFile {

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=798736&r1=798735&r2=798736&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
Wed Jul 29 00:35:24 2009
@@ -19,15 +19,12 @@
 
 import java.io.File;
 import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
 
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 
 /**
  * 



Mime
View raw message