hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1075655 - in /hadoop/hdfs/branches/HDFS-1052: ./ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/datanode/metrics/ src/java/...
Date Tue, 01 Mar 2011 04:32:35 GMT
Author: suresh
Date: Tue Mar  1 04:32:34 2011
New Revision: 1075655

URL: http://svn.apache.org/viewvc?rev=1075655&view=rev
Log:
HDFS-1668. Federation: Datanodes sends block pool usage information to the namenode in heartbeat.
Contributed by Suresh Srinivas.


Modified:
    hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java
    hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java

Modified: hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/CHANGES.txt?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-1052/CHANGES.txt Tue Mar  1 04:32:34 2011
@@ -78,6 +78,8 @@ Trunk (unreleased changes)
     HDFS-1654. Federation: Fix TestDFSUpgrade and TestDFSRollback failures.
     (suresh)
     
+    HDFS-1668. Federation: Datanodes sends block pool usage information 
+    to the namenode in heartbeat. (suresh)
 
   IMPROVEMENTS
 

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSUtil.java Tue Mar  1
04:32:34 2011
@@ -305,5 +305,15 @@ public class DFSUtil {
     return isas;
   }
   
+  
+  /** Return used as percentage of capacity */
+  public static float getPercentUsed(long used, long capacity) {
+    return capacity <= 0 ? 100 : ((float)used * 100.0f)/(float)capacity; 
+  }
+  
+  /** Return remaining as percentage of capacity */
+  public static float getPercentRemaining(long remaining, long capacity) {
+    return capacity <= 0 ? 0 : ((float)remaining * 100.0f)/(float)capacity; 
+  }
 }
 

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
Tue Mar  1 04:32:34 2011
@@ -24,6 +24,7 @@ import java.util.Date;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
@@ -48,6 +49,7 @@ public class DatanodeInfo extends Datano
   protected long capacity;
   protected long dfsUsed;
   protected long remaining;
+  protected long blockPoolUsed;
   protected long lastUpdate;
   protected int xceiverCount;
   protected String location = NetworkTopology.DEFAULT_RACK;
@@ -74,6 +76,7 @@ public class DatanodeInfo extends Datano
     this.capacity = from.getCapacity();
     this.dfsUsed = from.getDfsUsed();
     this.remaining = from.getRemaining();
+    this.blockPoolUsed = from.getBlockPoolUsed();
     this.lastUpdate = from.getLastUpdate();
     this.xceiverCount = from.getXceiverCount();
     this.location = from.getNetworkLocation();
@@ -86,6 +89,7 @@ public class DatanodeInfo extends Datano
     this.capacity = 0L;
     this.dfsUsed = 0L;
     this.remaining = 0L;
+    this.blockPoolUsed = 0L;
     this.lastUpdate = 0L;
     this.xceiverCount = 0;
     this.adminState = null;    
@@ -103,6 +107,9 @@ public class DatanodeInfo extends Datano
   /** The used space by the data node. */
   public long getDfsUsed() { return dfsUsed; }
 
+  /** The used space by the block pool on data node. */
+  public long getBlockPoolUsed() { return blockPoolUsed; }
+
   /** The used space by the data node. */
   public long getNonDfsUsed() { 
     long nonDFSUsed = capacity - dfsUsed - remaining;
@@ -111,23 +118,20 @@ public class DatanodeInfo extends Datano
 
   /** The used space by the data node as percentage of present capacity */
   public float getDfsUsedPercent() { 
-    if (capacity <= 0) {
-      return 100;
-    }
-
-    return ((float)dfsUsed * 100.0f)/(float)capacity; 
+    return DFSUtil.getPercentUsed(dfsUsed, capacity);
   }
 
   /** The raw free space. */
   public long getRemaining() { return remaining; }
 
+  /** Used space by the block pool as percentage of present capacity */
+  public float getBlockPoolUsedPercent() {
+    return DFSUtil.getPercentUsed(blockPoolUsed, capacity);
+  }
+  
   /** The remaining space as percentage of configured capacity. */
   public float getRemainingPercent() { 
-    if (capacity <= 0) {
-      return 0;
-    }
-
-    return ((float)remaining * 100.0f)/(float)capacity; 
+    return DFSUtil.getPercentRemaining(remaining, capacity);
   }
 
   /** The time when this information was accurate. */
@@ -146,6 +150,11 @@ public class DatanodeInfo extends Datano
     this.remaining = remaining; 
   }
 
+  /** Sets block pool used space */
+  public void setBlockPoolUsed(long bpUsed) { 
+    this.blockPoolUsed = bpUsed; 
+  }
+
   /** Sets time when this information was accurate. */
   public void setLastUpdate(long lastUpdate) { 
     this.lastUpdate = lastUpdate; 
@@ -327,6 +336,7 @@ public class DatanodeInfo extends Datano
     out.writeLong(capacity);
     out.writeLong(dfsUsed);
     out.writeLong(remaining);
+    out.writeLong(blockPoolUsed);
     out.writeLong(lastUpdate);
     out.writeInt(xceiverCount);
     Text.writeString(out, location);
@@ -344,6 +354,7 @@ public class DatanodeInfo extends Datano
     this.capacity = in.readLong();
     this.dfsUsed = in.readLong();
     this.remaining = in.readLong();
+    this.blockPoolUsed = in.readLong();
     this.lastUpdate = in.readLong();
     this.xceiverCount = in.readInt();
     this.location = Text.readString(in);

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
Tue Mar  1 04:32:34 2011
@@ -841,6 +841,7 @@ public class DataNode extends Configured
           data.getCapacity(),
           data.getDfsUsed(),
           data.getRemaining(),
+          data.getBlockPoolUsed(blockPoolId),
           xmitsInProgress.get(),
           getXceiverCount());
     }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
Tue Mar  1 04:32:34 2011
@@ -588,6 +588,10 @@ public class FSDataset implements FSCons
       return dfsUsed;
     }
     
+    long getBlockPoolUsed(String bpid) throws IOException {
+      return getBlockPool(bpid).getDfsUsed();
+    }
+    
     /**
      * Calculate the capacity of the filesystem, after removing any
      * reserved capacity.
@@ -751,6 +755,14 @@ public class FSDataset implements FSCons
       return dfsUsed;
     }
 
+    long getBlockPoolUsed(String bpid) throws IOException {
+      long dfsUsed = 0L;
+      for (int idx = 0; idx < volumes.length; idx++) {
+        dfsUsed += volumes[idx].getBlockPoolUsed(bpid);
+      }
+      return dfsUsed;
+    }
+
     long getCapacity() throws IOException {
       long capacity = 0L;
       for (int idx = 0; idx < volumes.length; idx++) {
@@ -1074,6 +1086,7 @@ public class FSDataset implements FSCons
   /**
    * Return the total space used by dfs datanode
    */
+  @Override // FSDatasetMBean
   public long getDfsUsed() throws IOException {
     synchronized(statsLock) {
       return volumes.getDfsUsed();
@@ -1081,6 +1094,16 @@ public class FSDataset implements FSCons
   }
 
   /**
+   * Return the total space used by dfs datanode
+   */
+  @Override // FSDatasetMBean
+  public long getBlockPoolUsed(String bpid) throws IOException {
+    synchronized(statsLock) {
+      return volumes.getBlockPoolUsed(bpid);
+    }
+  }
+  
+  /**
    * Return true - if there are still valid volumes on the DataNode. 
    */
   @Override // FSDatasetInterface
@@ -1091,6 +1114,7 @@ public class FSDataset implements FSCons
   /**
    * Return total capacity, used and unused
    */
+  @Override // FSDatasetMBean
   public long getCapacity() throws IOException {
     synchronized(statsLock) {
       return volumes.getCapacity();
@@ -1100,6 +1124,7 @@ public class FSDataset implements FSCons
   /**
    * Return how many bytes can still be stored in the FSDataset
    */
+  @Override // FSDatasetMBean
   public long getRemaining() throws IOException {
     synchronized(statsLock) {
       return volumes.getRemaining();
@@ -2008,6 +2033,7 @@ public class FSDataset implements FSCons
     }
   }
 
+  @Override // FSDatasetMBean
   public String getStorageInfo() {
     return toString();
   }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
Tue Mar  1 04:32:34 2011
@@ -40,6 +40,13 @@ import org.apache.hadoop.classification.
 public interface FSDatasetMBean {
   
   /**
+   * Returns the total space (in bytes) used by a block pool
+   * @return  the total space used by a block pool
+   * @throws IOException
+   */  
+  public long getBlockPoolUsed(String bpid) throws IOException;
+  
+  /**
    * Returns the total space (in bytes) used by dfs datanode
    * @return  the total space used by dfs datanode
    * @throws IOException

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
Tue Mar  1 04:32:34 2011
@@ -27,13 +27,8 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
-import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
-import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.io.WritableUtils;
@@ -132,7 +127,7 @@ public class DatanodeDescriptor extends 
    * @param nodeID id of the data node
    */
   public DatanodeDescriptor(DatanodeID nodeID) {
-    this(nodeID, 0L, 0L, 0L, 0);
+    this(nodeID, 0L, 0L, 0L, 0L, 0);
   }
 
   /** DatanodeDescriptor constructor
@@ -154,7 +149,7 @@ public class DatanodeDescriptor extends 
   public DatanodeDescriptor(DatanodeID nodeID, 
                             String networkLocation,
                             String hostName) {
-    this(nodeID, networkLocation, hostName, 0L, 0L, 0L, 0);
+    this(nodeID, networkLocation, hostName, 0L, 0L, 0L, 0L, 0);
   }
   
   /** DatanodeDescriptor constructor
@@ -162,16 +157,18 @@ public class DatanodeDescriptor extends 
    * @param nodeID id of the data node
    * @param capacity capacity of the data node
    * @param dfsUsed space used by the data node
-   * @param remaining remaing capacity of the data node
+   * @param remaining remaining capacity of the data node
+   * @param bpused space used by the block pool corresponding to this namenode
    * @param xceiverCount # of data transfers at the data node
    */
   public DatanodeDescriptor(DatanodeID nodeID, 
                             long capacity,
                             long dfsUsed,
                             long remaining,
+                            long bpused,
                             int xceiverCount) {
     super(nodeID);
-    updateHeartbeat(capacity, dfsUsed, remaining, xceiverCount);
+    updateHeartbeat(capacity, dfsUsed, remaining, bpused, xceiverCount);
   }
 
   /** DatanodeDescriptor constructor
@@ -181,6 +178,7 @@ public class DatanodeDescriptor extends 
    * @param capacity capacity of the data node, including space used by non-dfs
    * @param dfsUsed the used space by dfs datanode
    * @param remaining remaining capacity of the data node
+   * @param bpused space used by the block pool corresponding to this namenode
    * @param xceiverCount # of data transfers at the data node
    */
   public DatanodeDescriptor(DatanodeID nodeID,
@@ -189,9 +187,10 @@ public class DatanodeDescriptor extends 
                             long capacity,
                             long dfsUsed,
                             long remaining,
+                            long bpused,
                             int xceiverCount) {
     super(nodeID, networkLocation, hostName);
-    updateHeartbeat(capacity, dfsUsed, remaining, xceiverCount);
+    updateHeartbeat(capacity, dfsUsed, remaining, bpused, xceiverCount);
   }
 
   /**
@@ -247,6 +246,7 @@ public class DatanodeDescriptor extends 
   void resetBlocks() {
     this.capacity = 0;
     this.remaining = 0;
+    this.blockPoolUsed = 0;
     this.dfsUsed = 0;
     this.xceiverCount = 0;
     this.blockList = null;
@@ -260,10 +260,11 @@ public class DatanodeDescriptor extends 
   /**
    */
   void updateHeartbeat(long capacity, long dfsUsed, long remaining,
-      int xceiverCount) {
+      long blockPoolUsed, int xceiverCount) {
     this.capacity = capacity;
     this.dfsUsed = dfsUsed;
     this.remaining = remaining;
+    this.blockPoolUsed = blockPoolUsed;
     this.lastUpdate = System.currentTimeMillis();
     this.xceiverCount = xceiverCount;
     rollBlocksScheduled(lastUpdate);
@@ -543,6 +544,7 @@ public class DatanodeDescriptor extends 
     this.capacity = in.readLong();
     this.dfsUsed = in.readLong();
     this.remaining = in.readLong();
+    this.blockPoolUsed = in.readLong();
     this.lastUpdate = in.readLong();
     this.xceiverCount = in.readInt();
     this.location = Text.readString(in);

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Tue Mar  1 04:32:34 2011
@@ -178,6 +178,7 @@ public class FSNamesystem implements FSC
   // FSNamesystemMetrics counter variables
   private FSNamesystemMetrics myFSMetrics;
   private long capacityTotal = 0L, capacityUsed = 0L, capacityRemaining = 0L;
+  private long blockPoolUsed = 0L;
   private int totalLoad = 0;
   boolean isBlockTokenEnabled;
   BlockTokenSecretManager blockTokenSecretManager;
@@ -2591,7 +2592,7 @@ public class FSNamesystem implements FSC
         if( !heartbeats.contains(nodeS)) {
           heartbeats.add(nodeS);
           //update its timestamp
-          nodeS.updateHeartbeat(0L, 0L, 0L, 0);
+          nodeS.updateHeartbeat(0L, 0L, 0L, 0L, 0);
           nodeS.isAlive = true;
         }
       }
@@ -2706,7 +2707,7 @@ public class FSNamesystem implements FSC
    * @throws IOException
    */
   DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
-      long capacity, long dfsUsed, long remaining,
+      long capacity, long dfsUsed, long remaining, long blockPoolUsed,
       int xceiverCount, int xmitsInProgress) throws IOException {
     DatanodeCommand cmd = null;
     synchronized (heartbeats) {
@@ -2729,7 +2730,8 @@ public class FSNamesystem implements FSC
         }
 
         updateStats(nodeinfo, false);
-        nodeinfo.updateHeartbeat(capacity, dfsUsed, remaining, xceiverCount);
+        nodeinfo.updateHeartbeat(capacity, dfsUsed, remaining, blockPoolUsed,
+            xceiverCount);
         updateStats(nodeinfo, true);
         
         //check lease recovery
@@ -2788,11 +2790,13 @@ public class FSNamesystem implements FSC
     if (isAdded) {
       capacityTotal += node.getCapacity();
       capacityUsed += node.getDfsUsed();
+      blockPoolUsed += node.getBlockPoolUsed();
       capacityRemaining += node.getRemaining();
       totalLoad += node.getXceiverCount();
     } else {
       capacityTotal -= node.getCapacity();
       capacityUsed -= node.getDfsUsed();
+      blockPoolUsed -= node.getBlockPoolUsed();
       capacityRemaining -= node.getRemaining();
       totalLoad -= node.getXceiverCount();
     }
@@ -3252,26 +3256,28 @@ public class FSNamesystem implements FSC
   /**
    * Total raw bytes including non-dfs used space.
    */
+  @Override // FSNamesystemMBean
   public long getCapacityTotal() {
-    return getStats()[0];
+    synchronized(heartbeats) {
+      return capacityTotal;
+    }
   }
 
   /**
    * Total used space by data nodes
    */
+  @Override // FSNamesystemMBean
   public long getCapacityUsed() {
-    return getStats()[1];
+    synchronized(heartbeats) {
+      return capacityUsed;
+    }
   }
   /**
    * Total used space by data nodes as percentage of total capacity
    */
   public float getCapacityUsedPercent() {
     synchronized(heartbeats){
-      if (capacityTotal <= 0) {
-        return 100;
-      }
-
-      return ((float)capacityUsed * 100.0f)/(float)capacityTotal;
+      return DFSUtil.getPercentUsed(capacityUsed, capacityTotal);
     }
   }
   /**
@@ -3289,7 +3295,9 @@ public class FSNamesystem implements FSC
    * Total non-used raw bytes.
    */
   public long getCapacityRemaining() {
-    return getStats()[2];
+    synchronized(heartbeats) {
+      return capacityRemaining;
+    }
   }
 
   /**
@@ -3297,16 +3305,13 @@ public class FSNamesystem implements FSC
    */
   public float getCapacityRemainingPercent() {
     synchronized(heartbeats){
-      if (capacityTotal <= 0) {
-        return 0;
-      }
-
-      return ((float)capacityRemaining * 100.0f)/(float)capacityTotal;
+      return DFSUtil.getPercentRemaining(capacityRemaining, capacityTotal);
     }
   }
   /**
    * Total number of connections.
    */
+  @Override // FSNamesystemMBean
   public int getTotalLoad() {
     synchronized (heartbeats) {
       return this.totalLoad;
@@ -4133,6 +4138,7 @@ public class FSNamesystem implements FSC
   /**
    * Get the total number of blocks in the system. 
    */
+  @Override // FSNamesystemMBean
   public long getBlocksTotal() {
     return blockManager.getTotalBlocks();
   }
@@ -4403,14 +4409,17 @@ public class FSNamesystem implements FSC
     return maxFsObjects;
   }
 
+  @Override // FSNamesystemMBean
   public long getFilesTotal() {
     return this.dir.totalInodes();
   }
 
+  @Override // FSNamesystemMBean
   public long getPendingReplicationBlocks() {
     return blockManager.pendingReplicationBlocksCount;
   }
 
+  @Override // FSNamesystemMBean
   public long getUnderReplicatedBlocks() {
     return blockManager.underReplicatedBlocksCount;
   }
@@ -4420,6 +4429,7 @@ public class FSNamesystem implements FSC
     return blockManager.corruptReplicaBlocksCount;
   }
 
+  @Override // FSNamesystemMBean
   public long getScheduledReplicationBlocks() {
     return blockManager.scheduledReplicationBlocksCount;
   }
@@ -4436,6 +4446,7 @@ public class FSNamesystem implements FSC
     return blockManager.getCapacity();
   }
 
+  @Override // FSNamesystemMBean
   public String getFSState() {
     return isInSafeMode() ? "safeMode" : "Operational";
   }
@@ -4481,6 +4492,7 @@ public class FSNamesystem implements FSC
    * Number of live data nodes
    * @return Number of live data nodes
    */
+  @Override // FSNamesystemMBean
   public int getNumLiveDataNodes() {
     int numLive = 0;
     synchronized (datanodeMap) {   
@@ -4500,6 +4512,7 @@ public class FSNamesystem implements FSC
    * Number of dead data nodes
    * @return Number of dead data nodes
    */
+  @Override // FSNamesystemMBean
   public int getNumDeadDataNodes() {
     int numDead = 0;
     synchronized (datanodeMap) {   
@@ -5171,6 +5184,20 @@ public class FSNamesystem implements FSC
   }
 
   @Override // NameNodeMXBean
+  public long getBlockPoolUsedSpace() {
+    synchronized(heartbeats) {
+      return blockPoolUsed;
+    }
+  }
+
+  @Override // NameNodeMXBean
+  public float getPercentBlockPoolUsed() {
+    synchronized(heartbeats) {
+      return DFSUtil.getPercentUsed(blockPoolUsed, capacityTotal);
+    }
+  }
+
+  @Override // NameNodeMXBean
   public float getPercentRemaining() {
     return getCapacityRemainingPercent();
   }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
Tue Mar  1 04:32:34 2011
@@ -1233,11 +1233,12 @@ public class NameNode implements Namenod
                                        long capacity,
                                        long dfsUsed,
                                        long remaining,
+                                       long blockPoolUsed,
                                        int xmitsInProgress,
                                        int xceiverCount) throws IOException {
     verifyRequest(nodeReg);
     return namesystem.handleHeartbeat(nodeReg, capacity, dfsUsed, remaining,
-        xceiverCount, xmitsInProgress);
+        blockPoolUsed, xceiverCount, xmitsInProgress);
   }
 
   /**

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
Tue Mar  1 04:32:34 2011
@@ -21,7 +21,6 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability;
 
 /**
- * 
  * This is the JMX management interface for namenode information
  */
 @InterfaceAudience.Public
@@ -96,6 +95,16 @@ public interface NameNodeMXBean {
   public float getPercentRemaining();
   
   /**
+   * Get the total space used by the block pools of this namenode
+   */
+  public long getBlockPoolUsedSpace();
+  
+  /**
+   * Get the total space used by the block pool as percentage of total capacity
+   */
+  public float getPercentBlockPoolUsed();
+    
+  /**
    * Gets the total numbers of blocks on the cluster.
    * 
    * @return the total number of blocks of the cluster

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
Tue Mar  1 04:32:34 2011
@@ -87,11 +87,20 @@ public interface DatanodeProtocol extend
    * an array of "DatanodeCommand" objects.
    * A DatanodeCommand tells the DataNode to invalidate local block(s), 
    * or to copy them to other DataNodes, etc.
+   * @param registration datanode registration information
+   * @param capacity total storage capacity available at the datanode
+   * @param dfsUsed storage used by HDFS
+   * @param remaining remaining storage available for HDFS
+   * @param blockPoolUsed storage used by the block pool
+   * @param xmitsInProgress number of transfers from this datanode to others
+   * @param xceiverCount number of active transceiver threads
+   * @throws IOException on error
    */
   @Nullable
   public DatanodeCommand[] sendHeartbeat(DatanodeRegistration registration,
                                        long capacity,
                                        long dfsUsed, long remaining,
+                                       long blockPoolUsed,
                                        int xmitsInProgress,
                                        int xceiverCount) throws IOException;
 

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
Tue Mar  1 04:32:34 2011
@@ -399,14 +399,23 @@ public class SimulatedFSDataset  impleme
         new ArrayList<Block>(Arrays.asList(blockTable)), null);
   }
 
+  @Override // FSDatasetMBean
   public long getCapacity() throws IOException {
     return storage.getCapacity();
   }
 
+  @Override // FSDatasetMBean
   public long getDfsUsed() throws IOException {
     return storage.getUsed();
   }
 
+  @Override // FSDatasetMBean
+  public long getBlockPoolUsed(String bpid) throws IOException {
+    // TODO:FEDERATION currently a single block pool is supported
+    return storage.getUsed();
+  }
+  
+  @Override // FSDatasetMBean
   public long getRemaining() throws IOException {
     return storage.getFree();
   }

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
Tue Mar  1 04:32:34 2011
@@ -796,8 +796,9 @@ public class NNThroughputBenchmark {
      */
     void sendHeartbeat() throws IOException {
       // register datanode
-      DatanodeCommand[] cmds = nameNode.sendHeartbeat(
-          dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, 0, 0);
+      // TODO:FEDERATION currently a single block pool is supported
+      DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration,
+          DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
       if(cmds != null) {
         for (DatanodeCommand cmd : cmds ) {
           if(LOG.isDebugEnabled()) {
@@ -840,8 +841,9 @@ public class NNThroughputBenchmark {
     @SuppressWarnings("unused") // keep it for future blockReceived benchmark
     int replicateBlocks() throws IOException {
       // register datanode
-      DatanodeCommand[] cmds = nameNode.sendHeartbeat(
-          dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, 0, 0);
+      // TODO:FEDERATION currently a single block pool is supported
+      DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration,
+          DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
       if (cmds != null) {
         for (DatanodeCommand cmd : cmds) {
           if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
Tue Mar  1 04:32:34 2011
@@ -121,7 +121,7 @@ public class TestDeadDatanode {
 
     // Ensure heartbeat from dead datanode is rejected with a command
     // that asks datanode to register again
-    DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, 0, 0, 0, 0, 0);
+    DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, 0, 0, 0, 0, 0, 0);
     Assert.assertEquals(1, cmd.length);
     Assert.assertEquals(cmd[0].getAction(), DatanodeCommand.REGISTER
         .getAction());

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java
Tue Mar  1 04:32:34 2011
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import java.io.IOException;
 import java.util.ArrayList;
 
 import org.apache.hadoop.conf.Configuration;
@@ -37,9 +38,10 @@ import junit.framework.TestCase;
  */
 public class TestHeartbeatHandling extends TestCase {
   /**
-   * Test if {@link FSNamesystem#handleHeartbeat(DatanodeRegistration, long, long, long,
int, int)}
-   * can pick up replication and/or invalidate requests and 
-   * observes the max limit
+   * Test if
+   * {@link FSNamesystem#handleHeartbeat(DatanodeRegistration, long, long, long, long, int,
int)}
+   * can pick up replication and/or invalidate requests and observes the max
+   * limit
    */
   public void testHeartbeat() throws Exception {
     final Configuration conf = new HdfsConfiguration();
@@ -64,8 +66,7 @@ public class TestHeartbeatHandling exten
         dd.addBlockToBeReplicated(
             new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP), ONE_TARGET);
       }
-      DatanodeCommand[] cmds = namesystem.handleHeartbeat(
-          nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
+      DatanodeCommand[]cmds = sendHeartBeat(nodeReg, dd, namesystem);
       assertEquals(1, cmds.length);
       assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
       assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
@@ -76,34 +77,36 @@ public class TestHeartbeatHandling exten
       }
       dd.addBlocksToBeInvalidated(blockList);
            
-      cmds = namesystem.handleHeartbeat(
-          nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
+      sendHeartBeat(nodeReg, dd, namesystem);
       assertEquals(2, cmds.length);
       assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
       assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
       assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
       assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
       
-      cmds = namesystem.handleHeartbeat(
-          nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
+      sendHeartBeat(nodeReg, dd, namesystem);
       assertEquals(2, cmds.length);
       assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
       assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
       assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
       assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
       
-      cmds = namesystem.handleHeartbeat(
-          nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
+      cmds = sendHeartBeat(nodeReg, dd, namesystem);
       assertEquals(1, cmds.length);
       assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
       assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
 
-      cmds = namesystem.handleHeartbeat(
-          nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
+      cmds = sendHeartBeat(nodeReg, dd, namesystem);
       assertEquals(null, cmds);
       }
     } finally {
       cluster.shutdown();
     }
   }
+  
+  private static DatanodeCommand[] sendHeartBeat(DatanodeRegistration nodeReg,
+      DatanodeDescriptor dd, FSNamesystem namesystem) throws IOException {
+    return namesystem.handleHeartbeat(nodeReg, dd.getCapacity(), 
+        dd.getDfsUsed(), dd.getRemaining(), dd.getBlockPoolUsed(), 0, 0);
+  }
 }

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
Tue Mar  1 04:32:34 2011
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DF;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
@@ -65,8 +66,8 @@ public class TestNamenodeCapacityReport 
       
       assertTrue(live.size() == 1);
       
-      long used, remaining, configCapacity, nonDFSUsed;
-      float percentUsed, percentRemaining;
+      long used, remaining, configCapacity, nonDFSUsed, bpUsed;
+      float percentUsed, percentRemaining, percentBpUsed;
       
       for (final DatanodeDescriptor datanode : live) {
         used = datanode.getDfsUsed();
@@ -75,6 +76,8 @@ public class TestNamenodeCapacityReport 
         configCapacity = datanode.getCapacity();
         percentUsed = datanode.getDfsUsedPercent();
         percentRemaining = datanode.getRemainingPercent();
+        bpUsed = datanode.getBlockPoolUsed();
+        percentBpUsed = datanode.getBlockPoolUsedPercent();
         
         LOG.info("Datanode configCapacity " + configCapacity
             + " used " + used + " non DFS used " + nonDFSUsed 
@@ -82,8 +85,11 @@ public class TestNamenodeCapacityReport 
             + " percentRemaining " + percentRemaining);
         
         assertTrue(configCapacity == (used + remaining + nonDFSUsed));
-        assertTrue(percentUsed == ((100.0f * (float)used)/(float)configCapacity));
-        assertTrue(percentRemaining == ((100.0f * (float)remaining)/(float)configCapacity));
+        assertTrue(percentUsed == DFSUtil.getPercentUsed(used, configCapacity));
+        assertTrue(percentRemaining == DFSUtil.getPercentRemaining(remaining,
+            configCapacity));
+        assertTrue(percentBpUsed == DFSUtil.getPercentUsed(bpUsed,
+            configCapacity));
       }   
       
       DF df = new DF(new File(cluster.getDataDirectory()), conf);
@@ -109,6 +115,8 @@ public class TestNamenodeCapacityReport 
       remaining = namesystem.getCapacityRemaining();
       percentUsed = namesystem.getCapacityUsedPercent();
       percentRemaining = namesystem.getCapacityRemainingPercent();
+      bpUsed = namesystem.getBlockPoolUsedSpace();
+      percentBpUsed = namesystem.getPercentBlockPoolUsed();
       
       LOG.info("Data node directory " + cluster.getDataDirectory());
            
@@ -116,7 +124,8 @@ public class TestNamenodeCapacityReport 
           + configCapacity + " reserved " + reserved + " used " + used 
           + " remaining " + remaining + " nonDFSUsed " + nonDFSUsed 
           + " remaining " + remaining + " percentUsed " + percentUsed 
-          + " percentRemaining " + percentRemaining);
+          + " percentRemaining " + percentRemaining + " bpUsed " + bpUsed
+          + " percentBpUsed " + percentBpUsed);
       
       // Ensure new total capacity reported excludes the reserved space
       assertTrue(configCapacity == diskCapacity - reserved);
@@ -125,7 +134,10 @@ public class TestNamenodeCapacityReport 
       assertTrue(configCapacity == (used + remaining + nonDFSUsed));
 
       // Ensure percent used is calculated based on used and present capacity
-      assertTrue(percentUsed == ((float)used * 100.0f)/(float)configCapacity);
+      assertTrue(percentUsed == DFSUtil.getPercentUsed(used, configCapacity));
+
+      // Ensure percent used is calculated based on used and present capacity
+      assertTrue(percentBpUsed == DFSUtil.getPercentUsed(bpUsed, configCapacity));
 
       // Ensure percent used is calculated based on used and present capacity
       assertTrue(percentRemaining == ((float)remaining * 100.0f)/(float)configCapacity);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
Tue Mar  1 04:32:34 2011
@@ -83,7 +83,7 @@ public class TestOverReplicatedBlocks ex
         // so they will be chosen to be deleted when over-replication occurs
         for (DatanodeDescriptor datanode : namesystem.heartbeats) {
           if (!corruptDataNode.equals(datanode)) {
-            datanode.updateHeartbeat(100L, 100L, 0L, 0);
+            datanode.updateHeartbeat(100L, 100L, 0L, 100L, 0);
           }
         }
         

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java
Tue Mar  1 04:32:34 2011
@@ -76,7 +76,7 @@ public class TestReplicationPolicy exten
     for(int i=0; i<NUM_OF_DATANODES; i++) {
       dataNodes[i].updateHeartbeat(
           2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
-          2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0);
+          2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0);
     }
   }
   
@@ -92,7 +92,7 @@ public class TestReplicationPolicy exten
   public void testChooseTarget1() throws Exception {
     dataNodes[0].updateHeartbeat(
         2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 
-        FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 4); // overloaded
+        FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 4); // overloaded
 
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(filename,
@@ -127,7 +127,7 @@ public class TestReplicationPolicy exten
     
     dataNodes[0].updateHeartbeat(
         2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
-        FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0); 
+        FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0); 
   }
 
   /**
@@ -204,7 +204,7 @@ public class TestReplicationPolicy exten
     // make data node 0 to be not qualified to choose
     dataNodes[0].updateHeartbeat(
         2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
-        (FSConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0); // no space
+        (FSConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0); // no space
         
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(filename,
@@ -242,7 +242,7 @@ public class TestReplicationPolicy exten
 
     dataNodes[0].updateHeartbeat(
         2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
-        FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0); 
+        FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0); 
   }
   
   /**
@@ -258,7 +258,7 @@ public class TestReplicationPolicy exten
     for(int i=0; i<2; i++) {
       dataNodes[i].updateHeartbeat(
           2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
-          (FSConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0);
+          (FSConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0);
     }
       
     DatanodeDescriptor[] targets;
@@ -290,7 +290,7 @@ public class TestReplicationPolicy exten
     for(int i=0; i<2; i++) {
       dataNodes[i].updateHeartbeat(
           2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
-          FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0);
+          FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0);
     }
   }
   /**

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java?rev=1075655&r1=1075654&r2=1075655&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
Tue Mar  1 04:32:34 2011
@@ -102,7 +102,7 @@ public class TestBlockRecovery {
     when(namenode.versionRequest()).thenReturn(new NamespaceInfo
         (1, "cid-test", "bpid-test", 1L, 1));
     when(namenode.sendHeartbeat(any(DatanodeRegistration.class), anyLong(), 
-        anyLong(), anyLong(), anyInt(), anyInt())).thenReturn(
+        anyLong(), anyLong(), anyLong(), anyInt(), anyInt())).thenReturn(
             new DatanodeCommand[0]);
     dn = new DataNode(conf, dirs, null);
     dn.namenodeTODO_FED = namenode; // TODO:FEDERATION - should go to a specific bpid



Mime
View raw message