hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hair...@apache.org
Subject svn commit: r706350 - in /hadoop/core/trunk: ./ conf/ src/hdfs/org/apache/hadoop/hdfs/protocol/ src/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/hdfs/org/apache/hadoop/hdfs/server/namenode/ src/test/org/apache/hadoop/hdfs/server/namenode/ src/webap...
Date Mon, 20 Oct 2008 18:08:24 GMT
Author: hairong
Date: Mon Oct 20 11:08:23 2008
New Revision: 706350

URL: http://svn.apache.org/viewvc?rev=706350&view=rev
Log:
HADOOP-4430. Namenode Web UI capacity report is inconsistent with Balancer. Contributed by
Suresh Srinivas.

Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/conf/hadoop-default.xml
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
    hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=706350&r1=706349&r2=706350&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Mon Oct 20 11:08:23 2008
@@ -144,6 +144,16 @@
     consistent with the web interface as defined in HADOOP-2816. (Suresh
     Srinivas via cdouglas)
 
+    HADOOP-4430. Further change the cluster summary at name node web that was
+    changed in HADOOP-2816:
+      Non DFS Used - This indicates the disk space taken by non DFS file from
+                     the Configured capacity
+      DFS Used % - DFS Used % of Configured Capacity 
+      DFS Remaining % - Remaing % Configured Capacity available for DFS use
+    DFS command line report reflects the same change. Config parameter 
+    dfs.datanode.du.pct is no longer used and is removed from the 
+    hadoop-default.xml. (Suresh Srinivas via hairong)
+
   NEW FEATURES
 
     HADOOP-3341. Allow streaming jobs to specify the field separator for map

Modified: hadoop/core/trunk/conf/hadoop-default.xml
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/conf/hadoop-default.xml?rev=706350&r1=706349&r2=706350&view=diff
==============================================================================
--- hadoop/core/trunk/conf/hadoop-default.xml (original)
+++ hadoop/core/trunk/conf/hadoop-default.xml Mon Oct 20 11:08:23 2008
@@ -374,13 +374,6 @@
 </property>
 
 <property>
-  <name>dfs.datanode.du.pct</name>
-  <value>0.98f</value>
-  <description>When calculating remaining space, only use this percentage of the real
available space
-  </description>
-</property>
-
-<property>
   <name>dfs.name.dir</name>
   <value>${hadoop.tmp.dir}/dfs/name</value>
   <description>Determines where on the local filesystem the DFS name node

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java?rev=706350&r1=706349&r2=706350&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java Mon Oct 20
11:08:23 2008
@@ -91,24 +91,36 @@
   /** The raw capacity. */
   public long getCapacity() { return capacity; }
   
-  /** The present capacity available for DFS. */
-  public long getPresentCapacity() { return dfsUsed + remaining; }
-  
   /** The used space by the data node. */
   public long getDfsUsed() { return dfsUsed; }
 
+  /** The used space by the data node. */
+  public long getNonDfsUsed() { 
+    long nonDFSUsed = capacity - dfsUsed - remaining;
+    return nonDFSUsed < 0 ? 0 : nonDFSUsed;
+  }
+
   /** The used space by the data node as percentage of present capacity */
   public float getDfsUsedPercent() { 
-    if (getPresentCapacity() <= 0) {
+    if (capacity <= 0) {
       return 100;
     }
 
-    return ((float)dfsUsed * 100.0f)/(float)getPresentCapacity(); 
+    return ((float)dfsUsed * 100.0f)/(float)capacity; 
   }
 
   /** The raw free space. */
   public long getRemaining() { return remaining; }
 
+  /** The remaining space as percentage of configured capacity. */
+  public float getRemainingPercent() { 
+    if (capacity <= 0) {
+      return 0;
+    }
+
+    return ((float)remaining * 100.0f)/(float)capacity; 
+  }
+
   /** The time when this information was accurate. */
   public long getLastUpdate() { return lastUpdate; }
 
@@ -155,10 +167,11 @@
   public String getDatanodeReport() {
     StringBuffer buffer = new StringBuffer();
     long c = getCapacity();
-    long pc = getPresentCapacity();
     long r = getRemaining();
     long u = getDfsUsed();
+    long nonDFSUsed = getNonDfsUsed();
     float usedPercent = getDfsUsedPercent();
+    float remainingPercent = getRemainingPercent();
 
     buffer.append("Name: "+name+"\n");
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
@@ -173,10 +186,11 @@
       buffer.append("Normal\n");
     }
     buffer.append("Configured Capacity: "+c+" ("+FsShell.byteDesc(c)+")"+"\n");
-    buffer.append("Present Capacity: "+pc+" ("+FsShell.byteDesc(pc)+")"+"\n");
-    buffer.append("DFS Remaining: " +r+ "("+FsShell.byteDesc(r)+")"+"\n");
     buffer.append("DFS Used: "+u+" ("+FsShell.byteDesc(u)+")"+"\n");
+    buffer.append("Non DFS Used: "+nonDFSUsed+" ("+FsShell.byteDesc(nonDFSUsed)+")"+"\n");
+    buffer.append("DFS Remaining: " +r+ "("+FsShell.byteDesc(r)+")"+"\n");
     buffer.append("DFS Used%: "+FsShell.limitDecimalTo2(usedPercent)+"%\n");
+    buffer.append("DFS Remaining%: "+FsShell.limitDecimalTo2(remainingPercent)+"%\n");
     buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
     return buffer.toString();
   }
@@ -185,7 +199,6 @@
   public String dumpDatanode() {
     StringBuffer buffer = new StringBuffer();
     long c = getCapacity();
-    long pc = getPresentCapacity();
     long r = getRemaining();
     long u = getDfsUsed();
     buffer.append(name);
@@ -200,7 +213,6 @@
       buffer.append(" IN");
     }
     buffer.append(" " + c + "(" + FsShell.byteDesc(c)+")");
-    buffer.append(" " + pc + "(" + FsShell.byteDesc(c)+")");
     buffer.append(" " + u + "(" + FsShell.byteDesc(u)+")");
     buffer.append(" " + FsShell.limitDecimalTo2(((1.0*u)/c)*100)+"%");
     buffer.append(" " + r + "(" + FsShell.byteDesc(r)+")");

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=706350&r1=706349&r2=706350&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Mon Oct
20 11:08:23 2008
@@ -287,21 +287,16 @@
   }
 
   class FSVolume {
-    static final double USABLE_DISK_PCT_DEFAULT = 0.98f; 
-
     private FSDir dataDir;
     private File tmpDir;
     private File detachDir; // copy on write for blocks in snapshot
     private DF usage;
     private DU dfsUsage;
     private long reserved;
-    private double usableDiskPct = USABLE_DISK_PCT_DEFAULT;
 
     
     FSVolume(File currentDir, Configuration conf) throws IOException {
       this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
-      this.usableDiskPct = conf.getFloat("dfs.datanode.du.pct",
-                                         (float) USABLE_DISK_PCT_DEFAULT);
       File parent = currentDir.getParentFile();
 
       this.detachDir = new File(parent, "detach");
@@ -356,7 +351,7 @@
       if (remaining>available) {
         remaining = available;
       }
-      return (remaining > 0) ? (long)(remaining * usableDiskPct) : 0;
+      return (remaining > 0) ? remaining : 0;
     }
       
     String getMount() throws IOException {

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=706350&r1=706349&r2=706350&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon
Oct 20 11:08:23 2008
@@ -3277,15 +3277,6 @@
   }
 
   /**
-   * Total raw bytes including non-dfs used space.
-   */
-  public long getPresentCapacity() {
-    synchronized (heartbeats) {
-      return this.capacityUsed + this.capacityRemaining;
-    }
-  }
-
-  /**
    * Total used space by data nodes
    */
   public long getCapacityUsed() {
@@ -3294,18 +3285,29 @@
     }
   }
   /**
-   * Total used space by data nodes
+   * Total used space by data nodes as percentage of total capacity
    */
   public float getCapacityUsedPercent() {
     synchronized(heartbeats){
-      if (getPresentCapacity() <= 0) {
+      if (capacityTotal <= 0) {
         return 100;
       }
 
-      return ((float)getCapacityUsed() * 100.0f)/(float)getPresentCapacity();
+      return ((float)capacityUsed * 100.0f)/(float)capacityTotal;
     }
   }
   /**
+   * Total used space by data nodes for non DFS purposes such
+   * as storing temporary files on the local file system
+   */
+  public long getCapacityUsedNonDFS() {
+    long nonDFSUsed = 0;
+    synchronized(heartbeats){
+      nonDFSUsed = capacityTotal - capacityRemaining - capacityUsed;
+    }
+    return nonDFSUsed < 0 ? 0 : nonDFSUsed;
+  }
+  /**
    * Total non-used raw bytes.
    */
   public long getCapacityRemaining() {
@@ -3315,6 +3317,18 @@
   }
 
   /**
+   * Total remaining space by data nodes as percentage of total capacity
+   */
+  public float getCapacityRemainingPercent() {
+    synchronized(heartbeats){
+      if (capacityTotal <= 0) {
+        return 0;
+      }
+
+      return ((float)capacityRemaining * 100.0f)/(float)capacityTotal;
+    }
+  }
+  /**
    * Total number of connections.
    */
   public int getTotalLoad() {

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java?rev=706350&r1=706349&r2=706350&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java Mon Oct
20 11:08:23 2008
@@ -240,8 +240,12 @@
         FIELD_NAME              = 1,
         FIELD_LAST_CONTACT      = 2,
         FIELD_BLOCKS            = 3,
-        FIELD_SIZE              = 4,
-        FIELD_DISK_USED         = 5,
+        FIELD_CAPACITY          = 4,
+        FIELD_USED              = 5,
+        FIELD_PERCENT_USED      = 6,
+        FIELD_NONDFS_USED       = 7,
+        FIELD_REMAINING         = 8,
+        FIELD_PERCENT_REMAINING = 9,
         SORT_ORDER_ASC          = 1,
         SORT_ORDER_DSC          = 2;
 
@@ -251,12 +255,20 @@
       public NodeComapare(String field, String order) {
         if (field.equals("lastcontact")) {
           sortField = FIELD_LAST_CONTACT;
-        } else if (field.equals("size")) {
-          sortField = FIELD_SIZE;
+        } else if (field.equals("capacity")) {
+          sortField = FIELD_CAPACITY;
+        } else if (field.equals("used")) {
+          sortField = FIELD_USED;
+        } else if (field.equals("nondfsused")) {
+          sortField = FIELD_NONDFS_USED;
+        } else if (field.equals("remaining")) {
+          sortField = FIELD_REMAINING;
+        } else if (field.equals("pcused")) {
+          sortField = FIELD_PERCENT_USED;
+        } else if (field.equals("pcremaining")) {
+          sortField = FIELD_PERCENT_REMAINING;
         } else if (field.equals("blocks")) {
           sortField = FIELD_BLOCKS;
-        } else if (field.equals("pcused")) {
-          sortField = FIELD_DISK_USED;
         } else {
           sortField = FIELD_NAME;
         }
@@ -275,18 +287,35 @@
         case FIELD_LAST_CONTACT:
           ret = (int) (d2.getLastUpdate() - d1.getLastUpdate());
           break;
-        case FIELD_BLOCKS:
-          ret = d1.numBlocks() - d2.numBlocks();
-          break;
-        case FIELD_SIZE:
+        case FIELD_CAPACITY:
           long  dlong = d1.getCapacity() - d2.getCapacity();
           ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
           break;
-        case FIELD_DISK_USED:
-          double ddbl =((d2.getRemaining()*1.0/d2.getCapacity())-
-                        (d1.getRemaining()*1.0/d1.getCapacity()));
+        case FIELD_USED:
+          dlong = d1.getDfsUsed() - d2.getDfsUsed();
+          ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
+          break;
+        case FIELD_NONDFS_USED:
+          dlong = d1.getNonDfsUsed() - d2.getNonDfsUsed();
+          ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
+          break;
+        case FIELD_REMAINING:
+          dlong = d1.getRemaining() - d2.getRemaining();
+          ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
+          break;
+        case FIELD_PERCENT_USED:
+          double ddbl =((d1.getDfsUsedPercent())-
+                        (d2.getDfsUsedPercent()));
+          ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0);
+          break;
+        case FIELD_PERCENT_REMAINING:
+          ddbl =((d1.getRemainingPercent())-
+                 (d2.getRemainingPercent()));
           ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0);
           break;
+        case FIELD_BLOCKS:
+          ret = d1.numBlocks() - d2.numBlocks();
+          break;
         case FIELD_NAME: 
           ret = d1.getHostName().compareTo(d2.getHostName());
           break;

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java?rev=706350&r1=706349&r2=706350&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
(original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
Mon Oct 20 11:08:23 2008
@@ -64,22 +64,25 @@
       
       assertTrue(live.size() == 1);
       
-      long used, remaining, totalCapacity, presentCapacity;
-      float percentUsed;
+      long used, remaining, configCapacity, nonDFSUsed;
+      float percentUsed, percentRemaining;
       
       for (final DatanodeDescriptor datanode : live) {
         used = datanode.getDfsUsed();
         remaining = datanode.getRemaining();
-        totalCapacity = datanode.getCapacity();
-        presentCapacity = datanode.getPresentCapacity();
+        nonDFSUsed = datanode.getNonDfsUsed();
+        configCapacity = datanode.getCapacity();
         percentUsed = datanode.getDfsUsedPercent();
+        percentRemaining = datanode.getRemainingPercent();
         
-        LOG.info("Datanode totalCapacity " + totalCapacity
-            + " presentCapacity " + presentCapacity + " used " + used
-            + " remaining " + remaining + " perenceUsed " + percentUsed);
+        LOG.info("Datanode configCapacity " + configCapacity
+            + " used " + used + " non DFS used " + nonDFSUsed 
+            + " remaining " + remaining + " perentUsed " + percentUsed
+            + " percentRemaining " + percentRemaining);
         
-        assertTrue(presentCapacity == (used + remaining));
-        assertTrue(percentUsed == ((100.0f * (float)used)/(float)presentCapacity));
+        assertTrue(configCapacity == (used + remaining + nonDFSUsed));
+        assertTrue(percentUsed == ((100.0f * (float)used)/(float)configCapacity));
+        assertTrue(percentRemaining == ((100.0f * (float)remaining)/(float)configCapacity));
       }   
       
       DF df = new DF(new File(cluster.getDataDirectory()), conf);
@@ -99,27 +102,32 @@
       long diskCapacity = numOfDataDirs * df.getCapacity();
       reserved *= numOfDataDirs;
       
-      totalCapacity = namesystem.getCapacityTotal();
-      presentCapacity = namesystem.getPresentCapacity();
+      configCapacity = namesystem.getCapacityTotal();
       used = namesystem.getCapacityUsed();
+      nonDFSUsed = namesystem.getCapacityUsedNonDFS();
       remaining = namesystem.getCapacityRemaining();
       percentUsed = namesystem.getCapacityUsedPercent();
+      percentRemaining = namesystem.getCapacityRemainingPercent();
       
       LOG.info("Data node directory " + cluster.getDataDirectory());
            
-      LOG.info("Name node diskCapacity " + diskCapacity + " totalCapacity "
-          + totalCapacity + " reserved " + reserved + " presentCapacity "
-          + presentCapacity + " used " + used + " remaining " + remaining
-          + " percentUsed " + percentUsed);
+      LOG.info("Name node diskCapacity " + diskCapacity + " configCapacity "
+          + configCapacity + " reserved " + reserved + " used " + used 
+          + " remaining " + remaining + " nonDFSUsed " + nonDFSUsed 
+          + " remaining " + remaining + " percentUsed " + percentUsed 
+          + " percentRemaining " + percentRemaining);
       
       // Ensure new total capacity reported excludes the reserved space
-      assertTrue(totalCapacity == diskCapacity - reserved);
-      
-      // Ensure present capacity is sum of used and remaining
-      assertTrue(presentCapacity == (used + remaining));
+      assertTrue(configCapacity == diskCapacity - reserved);
       
+      // Ensure new total capacity reported excludes the reserved space
+      assertTrue(configCapacity == (used + remaining + nonDFSUsed));
+
+      // Ensure percent used is calculated based on used and present capacity
+      assertTrue(percentUsed == ((float)used * 100.0f)/(float)configCapacity);
+
       // Ensure percent used is calculated based on used and present capacity
-      assertTrue(percentUsed == ((float)used * 100.0f)/(float)presentCapacity);
+      assertTrue(percentRemaining == ((float)remaining * 100.0f)/(float)configCapacity);
     }
     finally {
       if (cluster != null) {cluster.shutdown();}

Modified: hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp?rev=706350&r1=706349&r2=706350&view=diff
==============================================================================
--- hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp (original)
+++ hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp Mon Oct 20 11:08:23 2008
@@ -85,9 +85,11 @@
         return;
     
     long c = d.getCapacity();
-    long pc = d.getPresentCapacity();
     long u = d.getDfsUsed();
+    long nu = d.getNonDfsUsed();
+    long r = d.getRemaining();
     String percentUsed = FsShell.limitDecimalTo2(d.getDfsUsedPercent());    
+    String percentRemaining = FsShell.limitDecimalTo2(d.getRemainingPercent());    
     
     String adminState = (d.isDecommissioned() ? "Decommissioned" :
                          (d.isDecommissionInProgress() ? "Decommission In Progress":
@@ -97,16 +99,20 @@
     long currentTime = System.currentTimeMillis();
     out.print("<td class=\"lastcontact\"> " +
               ((currentTime - timestamp)/1000) +
-	      "<td class=\"adminstate\">" +
+              "<td class=\"adminstate\">" +
               adminState +
-	      "<td class=\"size\">" +
+              "<td align=\"right\" class=\"capacity\">" +
               FsShell.limitDecimalTo2(c*1.0/diskBytes) +
-	      "<td align=\"right\" class=\"pcapacity\">" +
-              FsShell.limitDecimalTo2(pc*1.0/diskBytes) +      
-	      "<td class=\"pcused\">" + percentUsed +"<td class=\"pcused\">" +
-	      ServletUtil.percentageGraph( (int)Double.parseDouble(percentUsed) , 100) +
-	      "<td class=\"size\">" +
-              FsShell.limitDecimalTo2(d.getRemaining()*1.0/diskBytes) +
+              "<td align=\"right\" class=\"used\">" +
+              FsShell.limitDecimalTo2(u*1.0/diskBytes) +      
+              "<td align=\"right\" class=\"nondfsused\">" +
+              FsShell.limitDecimalTo2(nu*1.0/diskBytes) +      
+              "<td align=\"right\" class=\"remaining\">" +
+              FsShell.limitDecimalTo2(r*1.0/diskBytes) +      
+              "<td align=\"right\" class=\"pcused\">" + percentUsed +
+              "<td class=\"pcused\">" +
+              ServletUtil.percentageGraph( (int)Double.parseDouble(percentUsed) , 100) +
+              "<td align=\"right\" class=\"pcremaining`\">" + percentRemaining +
               "<td title=" + "\"blocks scheduled : " + d.getBlocksScheduled() + 
               "\" class=\"blocks\">" + d.numBlocks() + "\n");
   }
@@ -125,9 +131,6 @@
     if ( sorterOrder == null )
         sorterOrder = "ASC";
 
-    jspHelper.sortNodeList(live, sorterField, sorterOrder);
-    jspHelper.sortNodeList(dead, "name", "ASC");
-    
     // Find out common suffix. Should this be before or after the sort?
     String port_suffix = null;
     if ( live.size() > 0 ) {
@@ -148,22 +151,25 @@
     counterReset();
     
     long total = fsn.getCapacityTotal();
-    long present = fsn.getPresentCapacity();
     long remaining = fsn.getCapacityRemaining();
     long used = fsn.getCapacityUsed();
+    long nonDFS = fsn.getCapacityUsedNonDFS();
     float percentUsed = fsn.getCapacityUsedPercent();
+    float percentRemaining = fsn.getCapacityRemainingPercent();
 
     out.print( "<div id=\"dfstable\"> <table>\n" +
 	       rowTxt() + colTxt() + "Configured Capacity" + colTxt() + ":" + colTxt() +
 	       FsShell.byteDesc( total ) +
-	       rowTxt() + colTxt() + "Present Capacity" + colTxt() + ":" + colTxt() +
-	       FsShell.byteDesc( present ) +
-	       rowTxt() + colTxt() + "DFS Remaining" + colTxt() + ":" + colTxt() +
-	       FsShell.byteDesc( remaining ) +
 	       rowTxt() + colTxt() + "DFS Used" + colTxt() + ":" + colTxt() +
 	       FsShell.byteDesc( used ) +
+	       rowTxt() + colTxt() + "Non DFS Used" + colTxt() + ":" + colTxt() +
+	       FsShell.byteDesc( nonDFS ) +
+	       rowTxt() + colTxt() + "DFS Remaining" + colTxt() + ":" + colTxt() +
+	       FsShell.byteDesc( remaining ) +
 	       rowTxt() + colTxt() + "DFS Used%" + colTxt() + ":" + colTxt() +
 	       FsShell.limitDecimalTo2(percentUsed) + " %" +
+	       rowTxt() + colTxt() + "DFS Remaining%" + colTxt() + ":" + colTxt() +
+	       FsShell.limitDecimalTo2(percentRemaining) + " %" +
 	       rowTxt() + colTxt() +
                "<a href=\"#LiveNodes\">Live Nodes</a> " +
                colTxt() + ":" + colTxt() + live.size() +
@@ -191,24 +197,28 @@
                 diskByteStr = "TB";
             }
 
-	    out.print( "<tr class=\"headerRow\"> <th " +
-                       ("name") + "> Node <th " +
-                       NodeHeaderStr("lastcontact") + "> Last Contact <th " +
-                       NodeHeaderStr("adminstate") + "> Admin State <th " +
-                       NodeHeaderStr("size") + "> Configured capacity (" + 
-                       diskByteStr + ") <th " + 
-                       NodeHeaderStr("pcapacity") + "> Present capacity (" + 
-                       diskByteStr + ") <th " + 
-                       NodeHeaderStr("pcused") + "> Used (%) <th " + 
-                       NodeHeaderStr("pcused") + "> Used (%) <th " +
-                       NodeHeaderStr("remaining") + "> Remaining (" + 
-                       diskByteStr + ") <th " +
-                       NodeHeaderStr("blocks") + "> Blocks\n" );
+      out.print( "<tr class=\"headerRow\"> <th " +
+                 NodeHeaderStr("name") + "> Node <th " +
+                 NodeHeaderStr("lastcontact") + "> Last <br>Contact <th " +
+                 NodeHeaderStr("adminstate") + "> Admin State <th " +
+                 NodeHeaderStr("capacity") + "> Configured <br>Capacity (" + 
+                 diskByteStr + ") <th " + 
+                 NodeHeaderStr("used") + "> Used <br>(" + 
+                 diskByteStr + ") <th " + 
+                 NodeHeaderStr("nondfsused") + "> Non DFS <br>Used (" + 
+                 diskByteStr + ") <th " + 
+                 NodeHeaderStr("remaining") + "> Remaining <br>(" + 
+                 diskByteStr + ") <th " + 
+                 NodeHeaderStr("pcused") + "> Used <br>(%) <th " + 
+                 NodeHeaderStr("pcused") + "> Used <br>(%) <th " +
+                 NodeHeaderStr("pcremaining") + "> Remaining <br>(%) <th " +
+                 NodeHeaderStr("blocks") + "> Blocks\n" );
             
-	    for ( int i=0; i < live.size(); i++ ) {
-		generateNodeData( out, live.get(i), port_suffix, true );
-	    }
-	}
+      jspHelper.sortNodeList(live, sorterField, sorterOrder);
+      for ( int i=0; i < live.size(); i++ ) {
+        generateNodeData( out, live.get(i), port_suffix, true );
+      }
+    }
     out.print("</table>\n");
     
     counterReset();
@@ -220,6 +230,7 @@
 	    out.print( "<table border=1 cellspacing=0> <tr id=\"row1\"> " +
 		       "<td> Node \n" );
 	    
+      jspHelper.sortNodeList(dead, "name", "ASC");
 	    for ( int i=0; i < dead.size() ; i++ ) {
                 generateNodeData( out, dead.get(i), port_suffix, false );
 	    }



Mime
View raw message