hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r477850 - in /lucene/hadoop/trunk: ./ src/java/org/apache/hadoop/dfs/ src/webapps/dfs/ src/webapps/static/
Date Tue, 21 Nov 2006 19:53:41 GMT
Author: cutting
Date: Tue Nov 21 11:53:40 2006
New Revision: 477850

URL: http://svn.apache.org/viewvc?view=rev&rev=477850
Log:
HADOOP-699.  Fix DFS web interface port number problem.  Contributed by Raghu.

Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java
    lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp
    lucene/hadoop/trunk/src/webapps/static/hadoop.css

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=477850&r1=477849&r2=477850
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Tue Nov 21 11:53:40 2006
@@ -111,6 +111,11 @@
     the format of IPC requests back-compatibly in subsequent releases.
     (omalley via cutting)
 
+34. HADOOP-699.  Fix DFS web interface so that filesystem browsing
+    works correctly, using the right port number.  Also add support
+    for sorting datanode list by various columns.
+    (Raghu Angadi via cutting)
+
 
 Release 0.8.0 - 2006-11-03
 

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java?view=diff&rev=477850&r1=477849&r2=477850
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java Tue Nov 21 11:53:40 2006
@@ -158,8 +158,6 @@
         this(InetAddress.getLocalHost().getHostName(), 
              dataDirs,
              createSocketAddr(conf.get("fs.default.name", "local")), conf);
-        // register datanode
-        register();
         int infoServerPort = conf.getInt("dfs.datanode.info.port", 50075);
         String infoServerBindAddress = conf.get("dfs.datanode.info.bindAddress", "0.0.0.0");
         this.infoServer = new StatusHttpServer("datanode", infoServerBindAddress, infoServerPort,
true);
@@ -167,6 +165,8 @@
         this.infoServer.addServlet(null, "/streamFile/*", StreamFile.class);
         this.infoServer.start();
         this.dnRegistration.infoPort = this.infoServer.getPort();
+        // register datanode
+        register();
         datanodeObject = this;
     }
     

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java?view=diff&rev=477850&r1=477849&r2=477850
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java Tue Nov 21
11:53:40 2006
@@ -74,6 +74,10 @@
     this.xceiverCount = 0;
     this.blocks.clear();
   }
+
+  int numBlocks() {
+    return blocks.size();
+  }
   
   /**
    */

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java?view=diff&rev=477850&r1=477849&r2=477850
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java Tue Nov 21 11:53:40
2006
@@ -94,6 +94,16 @@
     return name;
   }
   
+  /**
+   * Update fields when a new registration request comes in.
+   * Note that this does not update storageID.
+   */
+  void updateRegInfo( DatanodeID nodeReg ) {
+      name = nodeReg.getName();
+      infoPort = nodeReg.getInfoPort();
+      // update any more fields added in future.
+  }
+    
   /** Comparable.
    * Basis of compare is the String name (host:portNumber) only.
    * @param o

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?view=diff&rev=477850&r1=477849&r2=477850
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Tue Nov 21 11:53:40
2006
@@ -1198,17 +1198,7 @@
       DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID());
       DatanodeDescriptor nodeN = getDatanodeByName( nodeReg.getName() );
       
-      if( nodeN != null && nodeS != null && nodeN == nodeS ) {
-        // The same datanode has been just restarted to serve the same data 
-        // storage. We do not need to remove old data blocks, the delta will  
-        // be calculated on the next block report from the datanode
-        NameNode.stateChangeLog.info(
-            "BLOCK* NameSystem.registerDatanode: "
-            + "node restarted." );
-        return;
-      }
-      
-      if( nodeN != null ) {
+      if( nodeN != null && nodeN != nodeS ) {
         // nodeN previously served a different data storage, 
         // which is not served by anybody anymore.
         removeDatanode( nodeN );
@@ -1218,18 +1208,25 @@
         getEditLog().logRemoveDatanode( nodeN );
         nodeN = null;
       }
-      
-      // nodeN is not found
-      if( nodeS != null ) {
-        // nodeS is found
-        // The registering datanode is a replacement node for the existing 
-        // data storage, which from now on will be served by a new node.
-        NameNode.stateChangeLog.debug(
+
+      if ( nodeS != null ) {
+        if( nodeN == nodeS ) {
+          // The same datanode has been just restarted to serve the same data 
+          // storage. We do not need to remove old data blocks, the delta will
+          // be calculated on the next block report from the datanode
+          NameNode.stateChangeLog.debug("BLOCK* NameSystem.registerDatanode: "
+                                        + "node restarted." );
+        } else {
+          // nodeS is found
+          // The registering datanode is a replacement node for the existing 
+          // data storage, which from now on will be served by a new node.
+          NameNode.stateChangeLog.debug(
             "BLOCK* NameSystem.registerDatanode: "
             + "node " + nodeS.name
             + " is replaced by " + nodeReg.getName() + "." );
+        }
         getEditLog().logRemoveDatanode( nodeS );
-        nodeS.name = nodeReg.getName();
+        nodeS.updateRegInfo( nodeReg );
         getEditLog().logAddDatanode( nodeS );
         return;
       }
@@ -1763,8 +1760,8 @@
     
     /**
      */
-    public void DFSNodesStatus( Vector<DatanodeDescriptor> live, 
-                                Vector<DatanodeDescriptor> dead) {
+    public void DFSNodesStatus( ArrayList<DatanodeDescriptor> live, 
+                                ArrayList<DatanodeDescriptor> dead ) {
       synchronized (heartbeats) {
         synchronized (datanodeMap) {
           for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); it.hasNext();
) {

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java?view=diff&rev=477850&r1=477849&r2=477850
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java Tue Nov 21 11:53:40
2006
@@ -140,16 +140,10 @@
       in.close();
       out.print(new String(buf));
     }
-    public void DFSNodesStatus(Vector live, Vector dead) {
-      if (fsn == null) return;
-      TreeMap nodesSortedByName = new TreeMap();
-      fsn.DFSNodesStatus(live, dead);
-      for (int num = 0; num < live.size(); num++) {
-        DatanodeInfo d = (DatanodeInfo)live.elementAt(num);
-        nodesSortedByName.put(d.getName(), d);
-      }
-      live.clear();
-      live.addAll(nodesSortedByName.values());
+    public void DFSNodesStatus( ArrayList<DatanodeDescriptor> live,
+                                ArrayList<DatanodeDescriptor> dead ) {
+        if ( fsn != null )
+            fsn.DFSNodesStatus(live, dead);
     }
     public void addTableHeader(JspWriter out) throws IOException {
       out.print("<table border=\"1\""+
@@ -183,6 +177,72 @@
     public String getSafeModeText() {
       if( ! fsn.isInSafeMode() )
         return "";
-      return "Safe mode is ON. <em>" + fsn.getSafeModeTip() + "</em>";
+      return "Safe mode is ON. <em>" + fsn.getSafeModeTip() + "</em><br>";
+    }
+    
+    public void sortNodeList(ArrayList<DatanodeDescriptor> nodes,
+                             String field, String order) {
+        
+        class NodeComapare implements Comparator<DatanodeDescriptor> {
+            static final int 
+                FIELD_NAME              = 1,
+                FIELD_LAST_CONTACT      = 2,
+                FIELD_BLOCKS            = 3,
+                FIELD_SIZE              = 4,
+                FIELD_DISK_USED         = 5,
+                SORT_ORDER_ASC          = 1,
+                SORT_ORDER_DSC          = 2;
+
+            int sortField = FIELD_NAME;
+            int sortOrder = SORT_ORDER_ASC;
+            
+            public NodeComapare(String field, String order) {
+                if ( field.equals( "lastcontact" ) ) {
+                    sortField = FIELD_LAST_CONTACT;
+                } else if ( field.equals( "size" ) ) {
+                    sortField = FIELD_SIZE;
+                } else if ( field.equals( "blocks" ) ) {
+                    sortField = FIELD_BLOCKS;
+                } else if ( field.equals( "pcused" ) ) {
+                    sortField = FIELD_DISK_USED;
+                } else {
+                    sortField = FIELD_NAME;
+                }
+                
+                if ( order.equals("DSC") ) {
+                    sortOrder = SORT_ORDER_DSC;
+                } else {
+                    sortOrder = SORT_ORDER_ASC;
+                }
+            }
+
+            public int compare( DatanodeDescriptor d1,
+                                DatanodeDescriptor d2 ) {
+                int ret = 0;
+                switch ( sortField ) {
+                case FIELD_LAST_CONTACT:
+                    ret = (int) (d2.getLastUpdate() - d1.getLastUpdate());
+                    break;
+                case FIELD_BLOCKS:
+                    ret = d1.numBlocks() - d2.numBlocks();
+                    break;
+                case FIELD_SIZE:
+                    long  dlong = d1.getCapacity() - d2.getCapacity();
+                    ret = (dlong < 0) ? -1 : ( (dlong > 0) ? 1 : 0 );
+                    break;
+                case FIELD_DISK_USED:
+                    double ddbl =((d2.getRemaining()*1.0/d2.getCapacity())-
+                                  (d1.getRemaining()*1.0/d1.getCapacity()));
+                    ret = (ddbl < 0) ? -1 : ( (ddbl > 0) ? 1 : 0 );
+                    break;
+                case FIELD_NAME: 
+                    ret = d1.getName().compareTo(d2.getName());
+                    break;
+                }
+                return ( sortOrder == SORT_ORDER_DSC ) ? -ret : ret;
+            }
+        }
+        
+        Collections.sort( nodes, new NodeComapare( field, order ) );
     }
 }

Modified: lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp?view=diff&rev=477850&r1=477849&r2=477850
==============================================================================
--- lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp (original)
+++ lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp Tue Nov 21 11:53:40 2006
@@ -15,8 +15,52 @@
   long currentTime;
   JspHelper jspHelper = new JspHelper();
 
-  public void generateLiveNodeData(JspWriter out, DatanodeInfo d) 
+  int rowNum = 0;
+  int colNum = 0;
+
+  String rowTxt() { colNum = 0;
+      return "<tr class=\"" + (((rowNum++)%2 == 0)? "rowNormal" : "rowAlt")
+          + "\"> "; }
+  String colTxt() { return "<td id=\"col" + ++colNum + "\"> "; }
+  void counterReset () { colNum = 0; rowNum = 0 ; }
+
+  long diskBytes = 1024 * 1024 * 1024;
+  String diskByteStr = "GB";
+
+  String sorterField = null;
+  String sorterOrder = null;
+
+  String NodeHeaderStr(String name) {
+      String ret = "class=header";
+      String order = "ASC";
+      if ( name.equals( sorterField ) ) {
+          ret += sorterOrder;
+          if ( sorterOrder.equals("ASC") )
+              order = "DSC";
+      }
+      ret += " onClick=\"window.document.location=" +
+          "'/dfshealth.jsp?sorter/field=" + name + "&sorter/order=" +
+          order + "'\" title=\"sort on this column\"";
+      
+      return ret;
+  }
+      
+  public void generateLiveNodeData( JspWriter out, DatanodeDescriptor d,
+                                    String suffix, boolean alive )
     throws IOException {
+    
+    String name = d.getName();
+    if ( !name.matches( "\\d+\\.\\d+.\\d+\\.\\d+.*" ) ) 
+        name = name.replaceAll( "\\.[^.:]*", "" );
+    
+    int idx = (suffix != null && name.endsWith( suffix )) ?
+        name.indexOf( suffix ) : -1;    
+    out.print( rowTxt() + "<td class=\"name\"><a title=\"" + d.getName() +
+               "\">" + (( idx > 0 ) ? name.substring(0, idx) : name) +
+               (( alive ) ? "" : "\n") );
+    if ( !alive )
+        return;
+    
     long c = d.getCapacity();
     long r = d.getRemaining();
     long u = c - r;
@@ -27,66 +71,112 @@
     else
       percentUsed = "100";
     
-    out.print("<tr> <td id=\"col1\">" + d.getName() +
-              "<td>" + ((currentTime - d.getLastUpdate())/1000) +
-	      "<td>" + DFSShell.byteDesc(c) +
-	      "<td>" + percentUsed + "\n");
+    out.print("<td class=\"lastcontact\"> " +
+              ((currentTime - d.getLastUpdate())/1000) +
+	      "<td class=\"size\">" +
+              DFSShell.limitDecimal(c*1.0/diskBytes, 2) +
+	      "<td class=\"pcused\">" + percentUsed +
+              "<td class=\"blocks\">" + d.numBlocks() + "\n");
   }
 
   public void generateDFSHealthReport(JspWriter out,
                                       HttpServletRequest request)
                                       throws IOException {
-    Vector live = new Vector();
-    Vector dead = new Vector();
+    ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+    ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
     jspHelper.DFSNodesStatus(live, dead);
+
+    sorterField = request.getParameter("sorter/field");
+    sorterOrder = request.getParameter("sorter/order");
+    if ( sorterField == null )
+        sorterField = "name";
+    if ( sorterOrder == null )
+        sorterOrder = "ASC";
+
+    jspHelper.sortNodeList(live, sorterField, sorterOrder);
+    jspHelper.sortNodeList(dead, "name", "ASC");
+    
+    // Find out common suffix. Should this be before or after the sort?
+    String port_suffix = null;
+    if ( live.size() > 0 ) {
+        String name = live.get(0).getName();
+        int idx = name.indexOf(':');
+        if ( idx > 0 ) {
+            port_suffix = name.substring( idx );
+        }
+        
+        for ( int i=1; port_suffix != null && i < live.size(); i++ ) {
+            if ( live.get(i).getName().endsWith( port_suffix ) == false ) {
+                port_suffix = null;
+                break;
+            }
+        }
+    }
+        
+    counterReset();
     
     out.print( "<div id=\"dfstable\"> <table>\n" +
-	       "<tr> <td id=\"col1\"> Capacity <td> : <td>" +
+	       rowTxt() + colTxt() + "Capacity" + colTxt() + ":" + colTxt() +
 	       DFSShell.byteDesc( fsn.totalCapacity() ) +
-	       "<tr> <td id=\"col1\"> Remaining <td> : <td>" +
+	       rowTxt() + colTxt() + "Remaining" + colTxt() + ":" + colTxt() +
 	       DFSShell.byteDesc( fsn.totalRemaining() ) +
-	       "<tr> <td id=\"col1\"> Used <td> : <td>" +
+	       rowTxt() + colTxt() + "Used" + colTxt() + ":" + colTxt() +
 	       DFSShell.limitDecimal((fsn.totalCapacity() -
 				      fsn.totalRemaining())*100.0/
-				     (fsn.totalCapacity() + 1e-10), 2) +
-	       "%<tr> <td id=\"col1\"> Live Nodes <td> : <td>" + live.size()
+
-	       "<tr> <td id=\"col1\"> Dead Nodes <td> : <td>" + dead.size()
+
+				     (fsn.totalCapacity() + 1e-10), 2) + " %" +
+	       rowTxt() + colTxt() +
+               "<a href=\"#LiveNodes\">Live Nodes</a> " +
+               colTxt() + ":" + colTxt() + live.size() +
+	       rowTxt() + colTxt() +
+               "<a href=\"#DeadNodes\">Dead Nodes</a> " +
+               colTxt() + ":" + colTxt() + dead.size() +
                "</table></div><br><hr>\n" );
     
     if (live.isEmpty() && dead.isEmpty()) {
 	out.print("There are no datanodes in the cluster");
     }
     else {
-	
+        
         currentTime = System.currentTimeMillis();
 	out.print( "<div id=\"dfsnodetable\"> "+
-                   "<a id=\"title\">" +
+                   "<a name=\"LiveNodes\" id=\"title\">" +
                    "Live Datanodes: " + live.size() + "</a>" +
-                   "<br><br>\n<table border=\"1\">\n" );
+                   "<br><br>\n<table border=1 cellspacing=0>\n" );
 
+        counterReset();
+        
 	if ( live.size() > 0 ) {
+            
+            if ( live.get(0).getCapacity() > 1024 * diskBytes ) {
+                diskBytes *= 1024;
+                diskByteStr = "TB";
+            }
 
-	    out.print( "<tr id=\"row1\">" +
-		       "<td> Node <td> Last Contact <td> Size " +
-		       "<td> Used (%)\n" );
+	    out.print( "<tr class=\"headerRow\"> <th " +
+                       NodeHeaderStr("name") + "> Node <th " +
+                       NodeHeaderStr("lastcontact") + "> Last Contact <th " +
+                       NodeHeaderStr("size") + "> Size (" + diskByteStr +
+                       ") <th " + NodeHeaderStr("pcused") +
+                       "> Used (%) <th " + NodeHeaderStr("blocks") +
+                       "> Blocks\n" );
             
 	    for ( int i=0; i < live.size(); i++ ) {
-		DatanodeInfo d = ( DatanodeInfo ) live.elementAt(i);
-		generateLiveNodeData( out, d );
+		generateLiveNodeData( out, live.get(i), port_suffix, true );
 	    }
 	}
         out.print("</table>\n");
+
+        counterReset();
 	
-	out.print("<br> <a id=\"title\"> " +
+	out.print("<br> <a name=\"DeadNodes\" id=\"title\"> " +
                   " Dead Datanodes : " +dead.size() + "</a><br><br>\n");
 
 	if ( dead.size() > 0 ) {
-	    out.print( "<table border=\"1\"> <tr id=\"row1\"> " +
+	    out.print( "<table border=1 cellspacing=0> <tr id=\"row1\"> " +
 		       "<td> Node \n" );
 	    
 	    for ( int i=0; i < dead.size() ; i++ ) {
-		DatanodeInfo d = ( DatanodeInfo ) dead.elementAt(i);
-		out.print( "<tr> <td> " + d.getName() + "\n" );
+                generateLiveNodeData( out, dead.get(i), port_suffix, false );
 	    }
 	    
 	    out.print("</table>\n");

Modified: lucene/hadoop/trunk/src/webapps/static/hadoop.css
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/webapps/static/hadoop.css?view=diff&rev=477850&r1=477849&r2=477850
==============================================================================
--- lucene/hadoop/trunk/src/webapps/static/hadoop.css (original)
+++ lucene/hadoop/trunk/src/webapps/static/hadoop.css Tue Nov 21 11:53:40 2006
@@ -1,8 +1,16 @@
+body {
+  background-color : #ffffff;
+  font-family : sans-serif;
+}
 
 div#dfsnodetable tr#row1, div#dfstable td#col1 {
 	font-weight : bolder;
 }
 
+div#dfstable td#col3 {
+	text-align : right;
+}
+
 div#dfsnodetable caption {
 	text-align : left;
 }
@@ -12,11 +20,35 @@
 	font-weight : bolder;
 }
 
+div#dfsnodetable td, th {
+	border-bottom-style : none;
+}
+
+div#dfsnodetable th.header, th.headerASC, th.headerDSC {
+        padding-bottom : 4px;
+        padding-top : 4px;       
+}
+div#dfsnodetable th.header:hover, th.headerASC:hover, th.headerDSC:hover {
+        text-decoration : underline;
+	cursor : pointer;
+}
+
+div#dfsnodetable td.blocks, td.size, td.pcused, td.lastcontact {
+	text-align : right;
+}
+
+div#dfsnodetable .rowNormal .header {
+	background-color : #ffffff;
+}
+div#dfsnodetable .rowAlt, .headerASC, .headerDSC {
+	background-color : lightyellow;
+}
+
 div#dfstable table {
 	width : 40%;
 }
 
-div#dfsnodetable td, div#dfstable td {
+div#dfsnodetable td, div#dfsnodetable th, div#dfstable td {
 	padding-left : 10px;
 	padding-right : 10px;
 }



Mime
View raw message