hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r417566 - in /lucene/hadoop/trunk: ./ src/java/org/apache/hadoop/dfs/ src/webapps/dfs/
Date Tue, 27 Jun 2006 19:32:12 GMT
Author: cutting
Date: Tue Jun 27 12:32:11 2006
New Revision: 417566

URL: http://svn.apache.org/viewvc?rev=417566&view=rev
Log:
HADOOP-321.  Refactor some DFS classes.  Contributed by Konstantin.

Added:
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java
Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSShell.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNodeReport.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeInfo.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
    lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?rev=417566&r1=417565&r2=417566&view=diff
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Tue Jun 27 12:32:11 2006
@@ -70,6 +70,10 @@
     on a DFS datanode.  One may specify both the percentage free and
     the number of bytes.  (Johan Oskarson via cutting)
 
+17. HADOOP-321.  Refactor some DFS classes.  DataNodeReport nearly
+    duplicated DataNodeInfo.  The former is now deprecated, replaced
+    by the latter.  (Konstantin Shvachko via cutting)
+
 
 Release 0.3.2 - 2006-06-09
 

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSShell.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSShell.java?rev=417566&r1=417565&r2=417566&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSShell.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSShell.java Tue Jun 27 12:32:11 2006
@@ -284,11 +284,11 @@
         System.out.println("Effective replication multiplier: " + (1.0 * rawUsed / used));
 
         System.out.println("-------------------------------------------------");
-        DataNodeReport info[] = dfs.getDataNodeStats();
+        DatanodeInfo info[] = dfs.getDataNodeStats();
         System.out.println("Datanodes available: " + info.length);
         System.out.println();
         for (int i = 0; i < info.length; i++) {
-          System.out.println(info[i]);
+          System.out.println(info[i].getDatanodeReport());
           System.out.println();
         }
       }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNodeReport.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNodeReport.java?rev=417566&r1=417565&r2=417566&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNodeReport.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNodeReport.java Tue Jun 27 12:32:11
2006
@@ -1,46 +1,12 @@
 package org.apache.hadoop.dfs;
 
-import java.util.Date;
-
-import org.apache.hadoop.io.UTF8;
-
 /** A report on the status of a DataNode.
  *
  * @see DistributedFileSystem#getDataNodeStats
+ * @deprecated Use {@link DatanodeInfo} instead.
  */
-public class DataNodeReport {
-  String name;
-  String host;
-  long capacity;
-  long remaining;
-  long lastUpdate;
-  
-  /** The name of the datanode. */
-  public String getName() { return name; }
-
-  /** The hostname of the datanode. */
-  public String getHost() { return host; }
-
-  /** The raw capacity. */
-  public long getCapacity() { return capacity; }
-
-  /** The raw free space. */
-  public long getRemaining() { return remaining; }
-
-  /** The time when this information was accurate. */
-  public long getLastUpdate() { return lastUpdate; }
-
+public class DataNodeReport extends DatanodeInfo {
   public String toString() {
-    StringBuffer buffer = new StringBuffer();
-    long c = getCapacity();
-    long r = getRemaining();
-    long u = c - r;
-    buffer.append("Name: "+name+"\n");
-    buffer.append("Total raw bytes: "+c+" ("+DFSShell.byteDesc(c)+")"+"\n");
-    buffer.append("Used raw bytes: "+u+" ("+DFSShell.byteDesc(u)+")"+"\n");
-    buffer.append("% used: "+DFSShell.limitDecimal(((1.0*u)/c)*100,2)+"%"+"\n");
-    buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
-    return buffer.toString();
+    return super.getDatanodeReport();
   }
-
 }

Added: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java?rev=417566&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java (added)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java Tue Jun 27
12:32:11 2006
@@ -0,0 +1,108 @@
+/**
+ * Copyright 2005 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.dfs;
+
+import java.util.*;
+
+/**************************************************
+ * DatanodeDescriptor tracks stats on a given DataNode,
+ * such as available storage capacity, last update time, etc.,
+ * and maintains a set of blocks stored on the datanode. 
+ *
+ * @author Mike Cafarella
+ * @author Konstantin Shvachko
+ **************************************************/
+class DatanodeDescriptor extends DatanodeInfo implements Comparable {
+
+  private volatile TreeSet blocks;
+
+  DatanodeDescriptor( DatanodeID nodeID ) {
+    this( nodeID.getName(), nodeID.getStorageID(), 0, 0);
+  }
+  
+  /**
+   * Create DatanodeDescriptor.
+   */
+  DatanodeDescriptor( DatanodeID nodeID, 
+                            long capacity, 
+                            long remaining) {
+    this( nodeID.getName(), nodeID.getStorageID(), capacity, remaining );
+  }
+
+  /**
+   * @param name hostname:portNumber as String object.
+   */
+  DatanodeDescriptor( String name, 
+                            String storageID, 
+                            long capacity, 
+                            long remaining) {
+    super( name, storageID );
+    this.blocks = new TreeSet();
+    updateHeartbeat(capacity, remaining);
+  }
+
+  /**
+   */
+  void updateBlocks(Block newBlocks[]) {
+    blocks.clear();
+    for (int i = 0; i < newBlocks.length; i++) {
+      blocks.add(newBlocks[i]);
+    }
+  }
+
+  /**
+   */
+  void addBlock(Block b) {
+    blocks.add(b);
+  }
+
+  /**
+   */
+  void updateHeartbeat(long capacity, long remaining) {
+    this.capacity = capacity;
+    this.remaining = remaining;
+    this.lastUpdate = System.currentTimeMillis();
+  }
+  
+  /**
+   * Verify whether the node is dead.
+   * 
+   * A data node is considered dead if its last heartbeat was received
+   * EXPIRE_INTERVAL msecs ago.
+   */
+  boolean isDead() {
+    return getLastUpdate() < 
+              System.currentTimeMillis() - FSConstants.EXPIRE_INTERVAL;
+  }
+
+  Block[] getBlocks() {
+    return (Block[]) blocks.toArray(new Block[blocks.size()]);
+  }
+
+  Iterator getBlockIterator() {
+    return blocks.iterator();
+  }
+
+  /** Comparable.
+   * Basis of compare is the String name (host:portNumber) only.
+   * @param o
+   * @return as specified by Comparable.
+   */
+  public int compareTo(Object o) {
+    DatanodeDescriptor d = (DatanodeDescriptor) o;
+    return name.compareTo(d.getName());
+  }
+}

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java?rev=417566&r1=417565&r2=417566&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java Tue Jun 27 12:32:11
2006
@@ -7,7 +7,7 @@
  * 
  * @author Konstantin Shvachko
  */
-class DatanodeID {
+public class DatanodeID {
 
   protected String name;      /// hostname:portNumber
   protected String storageID; /// unique per cluster storageID

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeInfo.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeInfo.java?rev=417566&r1=417565&r2=417566&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeInfo.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeInfo.java Tue Jun 27 12:32:11
2006
@@ -15,151 +15,95 @@
  */
 package org.apache.hadoop.dfs;
 
-import org.apache.hadoop.io.*;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Date;
+
+import org.apache.hadoop.io.UTF8;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
 
-import java.io.*;
-import java.util.*;
-
-/**************************************************
- * DatanodeInfo tracks stats on a given DataNode,
- * such as available storage capacity, last update
- * time, etc.
+/** 
+ * DatanodeInfo represents the status of a DataNode.
  *
  * @author Mike Cafarella
- **************************************************/
-public class DatanodeInfo extends DatanodeID implements Writable, Comparable {
+ * @author Konstantin Shvachko
+ */
+public class DatanodeInfo extends DatanodeID implements Writable {
+  protected long capacity;
+  protected long remaining;
+  protected long lastUpdate;
+
+  DatanodeInfo() {
+    this( new String(), new String() );
+  }
+  
+  DatanodeInfo( String name, String storageID) {
+    super( name, storageID );
+    this.capacity = 0L;
+    this.remaining = 0L;
+    this.lastUpdate = 0L;
+  }
+  
+  /** The raw capacity. */
+  public long getCapacity() { return capacity; }
+
+  /** The raw free space. */
+  public long getRemaining() { return remaining; }
+
+  /** The time when this information was accurate. */
+  public long getLastUpdate() { return lastUpdate; }
+
+  /** @deprecated Use {@link #getLastUpdate()} instead. */
+  public long lastUpdate() { return getLastUpdate(); }
+
+  /** A formatted string for reporting the status of the DataNode. */
+  public String getDatanodeReport() {
+    StringBuffer buffer = new StringBuffer();
+    long c = getCapacity();
+    long r = getRemaining();
+    long u = c - r;
+    buffer.append("Name: "+name+"\n");
+    buffer.append("Total raw bytes: "+c+" ("+DFSShell.byteDesc(c)+")"+"\n");
+    buffer.append("Used raw bytes: "+u+" ("+DFSShell.byteDesc(u)+")"+"\n");
+    buffer.append("% used: "+DFSShell.limitDecimal(((1.0*u)/c)*100,2)+"%"+"\n");
+    buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
+    return buffer.toString();
+  }
+
+  /////////////////////////////////////////////////
+  // Writable
+  /////////////////////////////////////////////////
+  static {                                      // register a ctor
+    WritableFactories.setFactory
+      (DatanodeInfo.class,
+       new WritableFactory() {
+         public Writable newInstance() { return new DatanodeInfo(); }
+       });
+  }
 
-    static {                                      // register a ctor
-      WritableFactories.setFactory
-        (DatanodeInfo.class,
-         new WritableFactory() {
-           public Writable newInstance() { return new DatanodeInfo(); }
-         });
-    }
-
-    private long capacityBytes, remainingBytes, lastUpdate;
-    private volatile TreeSet blocks;
-
-    /** Create an empty DatanodeInfo.
-     */
-    public DatanodeInfo() {
-        this(new String(), new String(), 0, 0);
-    }
-
-    public DatanodeInfo( DatanodeID nodeID ) {
-      this( nodeID.getName(), nodeID.getStorageID(), 0, 0);
-    }
-    
-   /**
-    * Create an empty DatanodeInfo.
-    */
-    public DatanodeInfo(DatanodeID nodeID, 
-                        long capacity, 
-                        long remaining) {
-      this( nodeID.getName(), nodeID.getStorageID(), capacity, remaining );
-    }
-
-   /**
-    * @param name hostname:portNumber as String object.
-    */
-    public DatanodeInfo(String name, 
-                        String storageID, 
-                        long capacity, 
-                        long remaining) {
-        super( name, storageID );
-        this.blocks = new TreeSet();
-        updateHeartbeat(capacity, remaining);
-    }
-
-   /**
-    */
-    public void updateBlocks(Block newBlocks[]) {
-        blocks.clear();
-        for (int i = 0; i < newBlocks.length; i++) {
-            blocks.add(newBlocks[i]);
-        }
-    }
-
-   /**
-    */
-    public void addBlock(Block b) {
-        blocks.add(b);
-    }
-
-    /**
-     */
-    public void updateHeartbeat(long capacity, long remaining) {
-        this.capacityBytes = capacity;
-        this.remainingBytes = remaining;
-        this.lastUpdate = System.currentTimeMillis();
-    }
-
-    public Block[] getBlocks() {
-        return (Block[]) blocks.toArray(new Block[blocks.size()]);
-    }
-    public Iterator getBlockIterator() {
-        return blocks.iterator();
-    }
-    public long getCapacity() {
-        return capacityBytes;
-    }
-    public long getRemaining() {
-        return remainingBytes;
-    }
-    public long lastUpdate() {
-        return lastUpdate;
-    }
-
-  /** Comparable.
-   * Basis of compare is the String name (host:portNumber) only.
-   * @param o
-   * @return as specified by Comparable.
+  /**
    */
-    public int compareTo(Object o) {
-        DatanodeInfo d = (DatanodeInfo) o;
-        return name.compareTo(d.getName());
-    }
-
-    /////////////////////////////////////////////////
-    // Writable
-    /////////////////////////////////////////////////
-    /**
-     */
-    public void write(DataOutput out) throws IOException {
-        new UTF8( this.name ).write(out);
-        new UTF8( this.storageID ).write(out);
-        out.writeLong(capacityBytes);
-        out.writeLong(remainingBytes);
-        out.writeLong(lastUpdate);
-
-        /**
-        out.writeInt(blocks.length);
-        for (int i = 0; i < blocks.length; i++) {
-            blocks[i].write(out);
-        }
-        **/
-    }
-
-    /**
-     */
-    public void readFields(DataInput in) throws IOException {
-        UTF8 uStr = new UTF8();
-        uStr.readFields(in);
-        this.name = uStr.toString();
-        uStr.readFields(in);
-        this.storageID = uStr.toString();
-        this.capacityBytes = in.readLong();
-        this.remainingBytes = in.readLong();
-        this.lastUpdate = in.readLong();
-
-        /**
-        int numBlocks = in.readInt();
-        this.blocks = new Block[numBlocks];
-        for (int i = 0; i < blocks.length; i++) {
-            blocks[i] = new Block();
-            blocks[i].readFields(in);
-        }
-        **/
-    }
-}
+  public void write(DataOutput out) throws IOException {
+    new UTF8( this.name ).write(out);
+    new UTF8( this.storageID ).write(out);
+    out.writeLong(capacity);
+    out.writeLong(remaining);
+    out.writeLong(lastUpdate);
+  }
 
+  /**
+   */
+  public void readFields(DataInput in) throws IOException {
+    UTF8 uStr = new UTF8();
+    uStr.readFields(in);
+    this.name = uStr.toString();
+    uStr.readFields(in);
+    this.storageID = uStr.toString();
+    this.capacity = in.readLong();
+    this.remaining = in.readLong();
+    this.lastUpdate = in.readLong();
+  }
+}

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java?rev=417566&r1=417565&r2=417566&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java Tue Jun
27 12:32:11 2006
@@ -242,18 +242,7 @@
     }
 
     /** Return statistics for each datanode.*/
-    public DataNodeReport[] getDataNodeStats() throws IOException {
-      DatanodeInfo[]  dnReport = dfs.datanodeReport();
-      DataNodeReport[] reports = new DataNodeReport[dnReport.length];
-
-      for (int i = 0; i < dnReport.length; i++) {
-        reports[i] = new DataNodeReport();
-        reports[i].name = dnReport[i].getName();
-        reports[i].host = dnReport[i].getHost();
-        reports[i].capacity = dnReport[i].getCapacity();
-        reports[i].remaining = dnReport[i].getRemaining();
-        reports[i].lastUpdate = dnReport[i].lastUpdate();
-      }
-      return reports;
+    public DatanodeInfo[] getDataNodeStats() throws IOException {
+      return dfs.datanodeReport();
     }
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?rev=417566&r1=417565&r2=417566&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Tue Jun 27 12:32:11
2006
@@ -49,6 +49,7 @@
     //
     // Stores the block-->datanode(s) map.  Updated only in response
     // to client-sent information.
+    // Mapping: Block -> TreeSet<DatanodeDescriptor>
     //
     TreeMap blocksMap = new TreeMap();
 
@@ -56,10 +57,10 @@
     // Stores the datanode-->block map.  Done by storing a 
     // set of datanode info objects, sorted by name.  Updated only in
     // response to client-sent information.
+    // Mapping: StorageID -> DatanodeDescriptor
     //
     TreeMap datanodeMap = new TreeMap();
 
-    
     //
     // Stores the set of dead datanodes
     TreeMap deaddatanodeMap = new TreeMap();
@@ -68,6 +69,7 @@
     // Keeps a Vector for every named machine.  The Vector contains
     // blocks that have recently been invalidated and are thought to live
     // on the machine in question.
+    // Mapping: StorageID -> Vector<Block>
     //
     TreeMap recentInvalidateSets = new TreeMap();
 
@@ -75,19 +77,20 @@
     // Keeps a TreeSet for every named node.  Each treeset contains
     // a list of the blocks that are "extra" at that location.  We'll
     // eventually remove these extras.
+    // Mapping: Block -> TreeSet<DatanodeDescriptor>
     //
     TreeMap excessReplicateMap = new TreeMap();
 
     //
     // Keeps track of files that are being created, plus the
     // blocks that make them up.
-    //
-    // Maps file names to FileUnderConstruction objects
+    // Mapping: fileName -> FileUnderConstruction
     //
     TreeMap pendingCreates = new TreeMap();
 
     //
     // Keeps track of the blocks that are part of those pending creates
+    // Set of: Block
     //
     TreeSet pendingCreateBlocks = new TreeSet();
 
@@ -108,14 +111,14 @@
     Random r = new Random();
 
     //
-    // Stores a set of datanode info objects, sorted by heartbeat
+    // Stores a set of DatanodeDescriptor objects, sorted by heartbeat
     //
     TreeSet heartbeats = new TreeSet(new Comparator() {
         public int compare(Object o1, Object o2) {
-            DatanodeInfo d1 = (DatanodeInfo) o1;
-            DatanodeInfo d2 = (DatanodeInfo) o2;            
-            long lu1 = d1.lastUpdate();
-            long lu2 = d2.lastUpdate();
+            DatanodeDescriptor d1 = (DatanodeDescriptor) o1;
+            DatanodeDescriptor d2 = (DatanodeDescriptor) o2;            
+            long lu1 = d1.getLastUpdate();
+            long lu2 = d2.getLastUpdate();
             if (lu1 < lu2) {
                 return -1;
             } else if (lu1 > lu2) {
@@ -129,14 +132,17 @@
     //
     // Store set of Blocks that need to be replicated 1 or more times.
     // We also store pending replication-orders.
+    // Set of: Block
     //
     private TreeSet neededReplications = new TreeSet();
     private TreeSet pendingReplications = new TreeSet();
 
     //
     // Used for handling lock-leases
+    // Mapping: leaseHolder -> Lease
     //
     private TreeMap leases = new TreeMap();
+    // Set of: Lease
     private TreeSet sortedLeases = new TreeSet();
 
     //
@@ -246,17 +252,17 @@
         Block blocks[] = dir.getFile(src);
         if (blocks != null) {
             results = new Object[2];
-            DatanodeInfo machineSets[][] = new DatanodeInfo[blocks.length][];
+            DatanodeDescriptor machineSets[][] = new DatanodeDescriptor[blocks.length][];
 
             for (int i = 0; i < blocks.length; i++) {
                 TreeSet containingNodes = (TreeSet) blocksMap.get(blocks[i]);
                 if (containingNodes == null) {
-                    machineSets[i] = new DatanodeInfo[0];
+                    machineSets[i] = new DatanodeDescriptor[0];
                 } else {
-                    machineSets[i] = new DatanodeInfo[containingNodes.size()];
+                    machineSets[i] = new DatanodeDescriptor[containingNodes.size()];
                     int j = 0;
                     for (Iterator it = containingNodes.iterator(); it.hasNext(); j++) {
-                        machineSets[i][j] = (DatanodeInfo) it.next();
+                        machineSets[i][j] = (DatanodeDescriptor) it.next();
                     }
                 }
             }
@@ -382,7 +388,7 @@
         }
 
         // Get the array of replication targets 
-        DatanodeInfo targets[] = chooseTargets(replication, null, 
+        DatanodeDescriptor targets[] = chooseTargets(replication, null, 
                                                clientMachine, blockSize);
         if (targets.length < this.minReplication) {
             throw new IOException("failed to create file "+src
@@ -464,7 +470,7 @@
         }
         
         // Get the array of replication targets 
-        DatanodeInfo targets[] = chooseTargets(pendingFile.getReplication(), 
+        DatanodeDescriptor targets[] = chooseTargets(pendingFile.getReplication(), 
             null, pendingFile.getClientMachine(), pendingFile.getBlockSize());
         if (targets.length < this.minReplication) {
           throw new IOException("File " + src + " could only be replicated to " +
@@ -568,7 +574,7 @@
         for (int i = 0; i < nrBlocks; i++) {
             Block b = (Block)pendingBlocks[i];
             TreeSet containingNodes = (TreeSet) blocksMap.get(b);
-            DatanodeInfo node = (DatanodeInfo) containingNodes.first();
+            DatanodeDescriptor node = (DatanodeDescriptor) containingNodes.first();
             for (Iterator it = node.getBlockIterator(); it.hasNext(); ) {
                 Block cur = (Block) it.next();
                 if (b.getBlockId() == cur.getBlockId()) {
@@ -700,7 +706,7 @@
                 TreeSet containingNodes = (TreeSet) blocksMap.get(b);
                 if (containingNodes != null) {
                     for (Iterator it = containingNodes.iterator(); it.hasNext(); ) {
-                        DatanodeInfo node = (DatanodeInfo) it.next();
+                        DatanodeDescriptor node = (DatanodeDescriptor) it.next();
                         Vector invalidateSet = (Vector) recentInvalidateSets.get(node.getStorageID());
                         if (invalidateSet == null) {
                             invalidateSet = new Vector();
@@ -795,7 +801,7 @@
                 Vector v = new Vector();
                 if (containingNodes != null) {
                   for (Iterator it =containingNodes.iterator(); it.hasNext();) {
-                    DatanodeInfo cur = (DatanodeInfo) it.next();
+                    DatanodeDescriptor cur = (DatanodeDescriptor) it.next();
                     v.add(new UTF8( cur.getHost() ));
                   }
                 }
@@ -1045,8 +1051,8 @@
           + " storage " + nodeReg.getStorageID() );
 
       nodeReg.registrationID = getRegistrationID();
-      DatanodeInfo nodeS = (DatanodeInfo)datanodeMap.get(nodeReg.getStorageID());
-      DatanodeInfo nodeN = getDatanodeByName( nodeReg.getName() );
+      DatanodeDescriptor nodeS = (DatanodeDescriptor)datanodeMap.get(nodeReg.getStorageID());
+      DatanodeDescriptor nodeN = getDatanodeByName( nodeReg.getName() );
       
       if( nodeN != null && nodeS != null && nodeN == nodeS ) {
         // The same datanode has been just restarted to serve the same data 
@@ -1078,7 +1084,7 @@
         }
         // register new datanode
         datanodeMap.put(nodeReg.getStorageID(), 
-                        new DatanodeInfo( nodeReg ) ) ;
+                        new DatanodeDescriptor( nodeReg ) ) ;
         NameNode.stateChangeLog.debug(
             "BLOCK* NameSystem.registerDatanode: "
             + "node registered." );
@@ -1137,13 +1143,13 @@
         synchronized (datanodeMap) {
           long capacityDiff = 0;
           long remainingDiff = 0;
-          DatanodeInfo nodeinfo = getDatanode( nodeID );
+          DatanodeDescriptor nodeinfo = getDatanode( nodeID );
           deaddatanodeMap.remove(nodeID.getName());
 
           if (nodeinfo == null) {
             NameNode.stateChangeLog.debug("BLOCK* NameSystem.gotHeartbeat: "
                     +"brand-new heartbeat from "+nodeID.getName() );
-            nodeinfo = new DatanodeInfo(nodeID, capacity, remaining);
+            nodeinfo = new DatanodeDescriptor(nodeID, capacity, remaining);
             datanodeMap.put(nodeinfo.getStorageID(), nodeinfo);
             capacityDiff = capacity;
             remainingDiff = remaining;
@@ -1179,12 +1185,12 @@
 
     /**
      * remove a datanode info
-     * @param name: datanode name
+     * @param nodeID datanode ID
      * @author hairong
      */
     synchronized public void removeDatanode( DatanodeID nodeID ) 
     throws IOException {
-      DatanodeInfo nodeInfo = getDatanode( nodeID );
+      DatanodeDescriptor nodeInfo = getDatanode( nodeID );
       if (nodeInfo != null) {
         removeDatanode( nodeInfo );
       } else {
@@ -1195,10 +1201,10 @@
   
   /**
    * remove a datanode info
-   * @param nodeInfo: datanode info
+   * @param nodeInfo datanode info
    * @author hairong
    */
-    private void removeDatanode( DatanodeInfo nodeInfo ) {
+    private void removeDatanode( DatanodeDescriptor nodeInfo ) {
       heartbeats.remove(nodeInfo);
       datanodeMap.remove(nodeInfo.getStorageID());
       deaddatanodeMap.put(nodeInfo.getName(), nodeInfo);
@@ -1219,17 +1225,19 @@
      */
     synchronized void heartbeatCheck() {
       synchronized (heartbeats) {
-        DatanodeInfo nodeInfo = null;
+        DatanodeDescriptor nodeInfo = null;
 
         while ((heartbeats.size() > 0) &&
-               ((nodeInfo = (DatanodeInfo) heartbeats.first()) != null) &&
-               (nodeInfo.lastUpdate() < System.currentTimeMillis() - EXPIRE_INTERVAL))
{
+               ((nodeInfo = (DatanodeDescriptor) heartbeats.first()) != null) &&
+               (nodeInfo.isDead())) {
           NameNode.stateChangeLog.info("BLOCK* NameSystem.heartbeatCheck: "
               + "lost heartbeat from " + nodeInfo.getName());
           removeDatanode( nodeInfo );
+          /* SHV
           if (heartbeats.size() > 0) {
-              nodeInfo = (DatanodeInfo) heartbeats.first();
+              nodeInfo = (DatanodeDescriptor) heartbeats.first();
           }
+          */
         }
       }
     }
@@ -1243,7 +1251,7 @@
                                             ) throws IOException {
         NameNode.stateChangeLog.debug("BLOCK* NameSystem.processReport: "
           +"from "+nodeID.getName()+" "+newReport.length+" blocks" );
-        DatanodeInfo node = getDatanode( nodeID );
+        DatanodeDescriptor node = getDatanode( nodeID );
 
         //
         // Modify the (block-->datanode) map, according to the difference
@@ -1313,7 +1321,7 @@
      * Modify (block-->datanode) map.  Remove block from set of 
      * needed replications if this takes care of the problem.
      */
-    synchronized void addStoredBlock(Block block, DatanodeInfo node) {
+    synchronized void addStoredBlock(Block block, DatanodeDescriptor node) {
         TreeSet containingNodes = (TreeSet) blocksMap.get(block);
         if (containingNodes == null) {
             containingNodes = new TreeSet();
@@ -1369,7 +1377,7 @@
         return;
       Vector nonExcess = new Vector();
       for (Iterator it = containingNodes.iterator(); it.hasNext(); ) {
-          DatanodeInfo cur = (DatanodeInfo) it.next();
+          DatanodeDescriptor cur = (DatanodeDescriptor) it.next();
           TreeSet excessBlocks = (TreeSet) excessReplicateMap.get(cur.getStorageID());
           if (excessBlocks == null || ! excessBlocks.contains(block)) {
               nonExcess.add(cur);
@@ -1390,7 +1398,7 @@
     void chooseExcessReplicates(Vector nonExcess, Block b, short replication) {
         while (nonExcess.size() - replication > 0) {
             int chosenNode = r.nextInt(nonExcess.size());
-            DatanodeInfo cur = (DatanodeInfo) nonExcess.elementAt(chosenNode);
+            DatanodeDescriptor cur = (DatanodeDescriptor) nonExcess.elementAt(chosenNode);
             nonExcess.removeElementAt(chosenNode);
 
             TreeSet excessBlocks = (TreeSet) excessReplicateMap.get(cur.getStorageID());
@@ -1426,7 +1434,7 @@
      * Modify (block-->datanode) map.  Possibly generate 
      * replication tasks, if the removed block is still valid.
      */
-    synchronized void removeStoredBlock(Block block, DatanodeInfo node) {
+    synchronized void removeStoredBlock(Block block, DatanodeDescriptor node) {
         NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: "
                 +block.getBlockName() + " from "+node.getName() );
         TreeSet containingNodes = (TreeSet) blocksMap.get(block);
@@ -1471,7 +1479,7 @@
     public synchronized void blockReceived( DatanodeID nodeID,  
                                             Block block
                                           ) throws IOException {
-        DatanodeInfo node = getDatanode( nodeID );
+        DatanodeDescriptor node = getDatanode( nodeID );
         if (node == null) {
             NameNode.stateChangeLog.warn("BLOCK* NameSystem.blockReceived: "
              + block.getBlockName() + " is received from an unrecorded node " 
@@ -1509,14 +1517,14 @@
 
     /**
      */
-    public DatanodeInfo[] datanodeReport() {
-        DatanodeInfo results[] = null;
+    public DatanodeDescriptor[] datanodeReport() {
+        DatanodeDescriptor results[] = null;
         synchronized (heartbeats) {
             synchronized (datanodeMap) {
-                results = new DatanodeInfo[datanodeMap.size()];
+                results = new DatanodeDescriptor[datanodeMap.size()];
                 int i = 0;
                 for (Iterator it = datanodeMap.values().iterator(); it.hasNext(); ) {
-                    DatanodeInfo cur = (DatanodeInfo) it.next();
+                    DatanodeDescriptor cur = (DatanodeDescriptor) it.next();
                     results[i++] = cur;
                 }
             }
@@ -1537,9 +1545,9 @@
     }
     /** 
      */
-    public DatanodeInfo getDataNodeInfo(String name) {
+    public DatanodeDescriptor getDataNodeInfo(String name) {
         UTF8 src = new UTF8(name);
-        return (DatanodeInfo)datanodeMap.get(src);
+        return (DatanodeDescriptor)datanodeMap.get(src);
     }
     /** 
      */
@@ -1591,11 +1599,11 @@
      *
      * The Array that we return consists of two objects:
      * The 1st elt is an array of Blocks.
-     * The 2nd elt is a 2D array of DatanodeInfo objs, identifying the
+     * The 2nd elt is a 2D array of DatanodeDescriptor objs, identifying the
      *     target sequence for the Block at the appropriate index.
      *
      */
-    public synchronized Object[] pendingTransfers(DatanodeInfo srcNode,
+    public synchronized Object[] pendingTransfers(DatanodeDescriptor srcNode,
                                                   int xmitsInProgress) {
     synchronized (neededReplications) {
       Object results[] = null;
@@ -1630,7 +1638,7 @@
             // not be scheduled for removal on that node
             if (containingNodes.contains(srcNode)
                 && (excessBlocks == null || ! excessBlocks.contains(block))) {
-              DatanodeInfo targets[] = chooseTargets(
+              DatanodeDescriptor targets[] = chooseTargets(
                   Math.min( fileINode.getReplication() - containingNodes.size(),
                             this.maxReplicationStreams - xmitsInProgress), 
                   containingNodes, null, blockSize);
@@ -1654,8 +1662,8 @@
           int i = 0;
           for (Iterator it = replicateBlocks.iterator(); it.hasNext(); i++) {
             Block block = (Block) it.next();
-            DatanodeInfo targets[] = 
-                      (DatanodeInfo[]) replicateTargetSets.elementAt(i);
+            DatanodeDescriptor targets[] = 
+                      (DatanodeDescriptor[]) replicateTargetSets.elementAt(i);
             TreeSet containingNodes = (TreeSet) blocksMap.get(block);
 
             if (containingNodes.size() + targets.length >= 
@@ -1684,10 +1692,10 @@
           //
           // Build returned objects from above lists
           //
-          DatanodeInfo targetMatrix[][] = 
-                        new DatanodeInfo[replicateTargetSets.size()][];
+          DatanodeDescriptor targetMatrix[][] = 
+                        new DatanodeDescriptor[replicateTargetSets.size()][];
           for (i = 0; i < targetMatrix.length; i++) {
-            targetMatrix[i] = (DatanodeInfo[]) replicateTargetSets.elementAt(i);
+            targetMatrix[i] = (DatanodeDescriptor[]) replicateTargetSets.elementAt(i);
           }
 
           results = new Object[2];
@@ -1705,10 +1713,10 @@
      * @param desiredReplicates
      *          number of duplicates wanted.
      * @param forbiddenNodes
-     *          of DatanodeInfo instances that should not be considered targets.
-     * @return array of DatanodeInfo instances uses as targets.
+     *          of DatanodeDescriptor instances that should not be considered targets.
+     * @return array of DatanodeDescriptor instances uses as targets.
      */
-    DatanodeInfo[] chooseTargets(int desiredReplicates, TreeSet forbiddenNodes,
+    DatanodeDescriptor[] chooseTargets(int desiredReplicates, TreeSet forbiddenNodes,
                                  UTF8 clientMachine, long blockSize) {
         if (desiredReplicates > datanodeMap.size()) {
           LOG.warn("Replication requested of "+desiredReplicates
@@ -1721,14 +1729,14 @@
         Vector targets = new Vector();
 
         for (int i = 0; i < desiredReplicates; i++) {
-            DatanodeInfo target = chooseTarget(forbiddenNodes, alreadyChosen, 
+            DatanodeDescriptor target = chooseTarget(forbiddenNodes, alreadyChosen, 
                                                clientMachine, blockSize);
             if (target == null)
               break; // calling chooseTarget again won't help
             targets.add(target);
             alreadyChosen.add(target);
         }
-        return (DatanodeInfo[]) targets.toArray(new DatanodeInfo[targets.size()]);
+        return (DatanodeDescriptor[]) targets.toArray(new DatanodeDescriptor[targets.size()]);
     }
 
     /**
@@ -1738,12 +1746,12 @@
      * Right now it chooses randomly from available boxes.  In future could 
      * choose according to capacity and load-balancing needs (or even 
      * network-topology, to avoid inter-switch traffic).
-     * @param forbidden1 DatanodeInfo targets not allowed, null allowed.
-     * @param forbidden2 DatanodeInfo targets not allowed, null allowed.
-     * @return DatanodeInfo instance to use or null if something went wrong
+     * @param forbidden1 DatanodeDescriptor targets not allowed, null allowed.
+     * @param forbidden2 DatanodeDescriptor targets not allowed, null allowed.
+     * @return DatanodeDescriptor instance to use or null if something went wrong
      * (a log message is emitted if null is returned).
      */
-    DatanodeInfo chooseTarget(TreeSet forbidden1, TreeSet forbidden2, 
+    DatanodeDescriptor chooseTarget(TreeSet forbidden1, TreeSet forbidden2, 
                               UTF8 clientMachine, long blockSize) {
         //
         // Check if there are any available targets at all
@@ -1760,13 +1768,13 @@
         TreeSet forbiddenMachines = new TreeSet();
         if (forbidden1 != null) {
             for (Iterator it = forbidden1.iterator(); it.hasNext(); ) {
-                DatanodeInfo cur = (DatanodeInfo) it.next();
+                DatanodeDescriptor cur = (DatanodeDescriptor) it.next();
                 forbiddenMachines.add(cur.getHost());
             }
         }
         if (forbidden2 != null) {
             for (Iterator it = forbidden2.iterator(); it.hasNext(); ) {
-                DatanodeInfo cur = (DatanodeInfo) it.next();
+                DatanodeDescriptor cur = (DatanodeDescriptor) it.next();
                 forbiddenMachines.add(cur.getHost());
             }
         }
@@ -1776,7 +1784,7 @@
         //
         Vector targetList = new Vector();
         for (Iterator it = datanodeMap.values().iterator(); it.hasNext(); ) {
-            DatanodeInfo node = (DatanodeInfo) it.next();
+            DatanodeDescriptor node = (DatanodeDescriptor) it.next();
             if (! forbiddenMachines.contains(node.getHost())) {
                 targetList.add(node);
             }
@@ -1793,7 +1801,7 @@
             //
             if (clientMachine != null && clientMachine.getLength() > 0) {
                 for (Iterator it = targetList.iterator(); it.hasNext(); ) {
-                    DatanodeInfo node = (DatanodeInfo) it.next();
+                    DatanodeDescriptor node = (DatanodeDescriptor) it.next();
                     if (clientMachine.equals(node.getHost())) {
                         if (node.getRemaining() > blockSize * MIN_BLOCKS_FOR_WRITE) {
                             return node;
@@ -1806,7 +1814,7 @@
             // Otherwise, choose node according to target capacity
             //
             for (Iterator it = targetList.iterator(); it.hasNext(); ) {
-                DatanodeInfo node = (DatanodeInfo) it.next();
+                DatanodeDescriptor node = (DatanodeDescriptor) it.next();
                 if (node.getRemaining() > blockSize * MIN_BLOCKS_FOR_WRITE) {
                     return node;
                 }
@@ -1818,7 +1826,7 @@
             // a last resort, pick the first valid one we can find.
             //
             for (Iterator it = targetList.iterator(); it.hasNext(); ) {
-                DatanodeInfo node = (DatanodeInfo) it.next();
+                DatanodeDescriptor node = (DatanodeDescriptor) it.next();
                 if (node.getRemaining() > blockSize) {
                     return node;
                 }
@@ -1886,12 +1894,12 @@
      * Get data node by storage ID.
      * 
      * @param nodeID
-     * @return DatanodeInfo or null if the node is not found.
+     * @return DatanodeDescriptor or null if the node is not found.
      * @throws IOException
      */
-    public DatanodeInfo getDatanode( DatanodeID nodeID ) throws IOException {
+    public DatanodeDescriptor getDatanode( DatanodeID nodeID ) throws IOException {
       UnregisteredDatanodeException e = null;
-      DatanodeInfo node = (DatanodeInfo) datanodeMap.get(nodeID.getStorageID());
+      DatanodeDescriptor node = (DatanodeDescriptor) datanodeMap.get(nodeID.getStorageID());
       if (node == null) 
         return null;
       if (!node.getName().equals(nodeID.getName())) {
@@ -1911,12 +1919,12 @@
      * Otherwise an additional tree-like structure will be required.
      * 
      * @param name
-     * @return DatanodeInfo if found or null otherwise 
+     * @return DatanodeDescriptor if found or null otherwise 
      * @throws IOException
      */
-    public DatanodeInfo getDatanodeByName( String name ) throws IOException {
+    public DatanodeDescriptor getDatanodeByName( String name ) throws IOException {
       for (Iterator it = datanodeMap.values().iterator(); it.hasNext(); ) {
-        DatanodeInfo node = (DatanodeInfo) it.next();
+        DatanodeDescriptor node = (DatanodeDescriptor) it.next();
         if( node.getName().equals(name) )
            return node;
       }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java?rev=417566&r1=417565&r2=417566&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java Tue Jun 27 12:32:11 2006
@@ -397,7 +397,7 @@
         // Ask to perform pending transfers, if any
         //
         Object xferResults[] = namesystem.pendingTransfers(
-                       new DatanodeInfo( nodeReg ), xmitsInProgress );
+                       new DatanodeDescriptor( nodeReg ), xmitsInProgress );
         if (xferResults != null) {
             return new BlockCommand((Block[]) xferResults[0], (DatanodeInfo[][]) xferResults[1]);
         }

Modified: lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp?rev=417566&r1=417565&r2=417566&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp (original)
+++ lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp Tue Jun 27 12:32:11 2006
@@ -20,9 +20,9 @@
     String uGb = DFSShell.limitDecimal((1.0 * u)/(1024*1024*1024), 2);
     String percentUsed = DFSShell.limitDecimal(((1.0 * u)/c)*100, 2);
     out.print("<td style=\"vertical-align: top;\"> <b>" + 
-              d.getName().toString() +
+              d.getName() +
               "</b>&nbsp;<br><i><b>LastContact:</b>" +

-              new Date(d.lastUpdate())+ ";&nbsp;");
+              new Date(d.getLastUpdate())+ ";&nbsp;");
     out.print("<b>Total raw bytes:</b>&nbsp;" + c + "(" + cGb + 
               "&nbsp;GB);&nbsp;");
     out.print("<b>Percent used:</b>&nbsp;" + percentUsed);
@@ -53,7 +53,7 @@
         out.print("<tr>");
         generateLiveNodeData(out, l);
         out.print("<td style=\"vertical-align: top;\">" + 
-                  d.getName().toString() +
+                  d.getName() +
                   "<br></td>");
         out.print("</tr>");
       }
@@ -69,7 +69,7 @@
           DatanodeInfo d = (DatanodeInfo)dead.elementAt(i);
           out.print("<td style=\"vertical-align: top;\"><br></td>");
           out.print("<td style=\"vertical-align: top;\">" + 
-                    d.getName().toString() +
+                    d.getName() +
                     "<br></td>");
         }
         out.print("</tr>");



Mime
View raw message