hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r410635 [2/2] - in /lucene/hadoop/trunk: ./ src/java/org/apache/hadoop/dfs/ src/test/org/apache/hadoop/fs/ src/test/org/apache/hadoop/test/
Date Wed, 31 May 2006 18:52:50 GMT
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java?rev=410635&r1=410634&r2=410635&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java Wed May 31 11:52:49 2006
@@ -145,37 +145,6 @@
     }
 
     /**
-     * The exception that happens when you ask to create a file that already
-     * is being created, but is not closed yet.
-     * @author Owen O'Malley
-     */
-    public static class AlreadyBeingCreatedException extends IOException {
-      public AlreadyBeingCreatedException(String msg) {
-        super(msg);
-      }
-    }
-    
-    /**
-     * The lease that was being used to create this file has expired.
-     * @author Owen O'Malley
-     */
-    public static class LeaseExpiredException extends IOException {
-      public LeaseExpiredException(String msg) {
-        super(msg);
-      }
-    }
-    
-    /**
-     * The file has not finished being written to enough datanodes yet.
-     * @author Owen O'Malley
-     */
-    public static class NotReplicatedYetException extends IOException {
-      public NotReplicatedYetException(String msg) {
-        super(msg);
-      }
-    }
-    
-    /**
      */
     public LocatedBlock create(String src, 
                                String clientName, 
@@ -230,7 +199,7 @@
                 +targets.length + " locations" );
 
         for (int i = 0; i < targets.length; i++) {
-            namesystem.blockReceived(b, targets[i].getName());
+            namesystem.blockReceived( targets[i], b );
         }
     }
 
@@ -379,14 +348,26 @@
     ////////////////////////////////////////////////////////////////
     // DatanodeProtocol
     ////////////////////////////////////////////////////////////////
+    /** 
+     */
+    public DatanodeRegistration register( DatanodeRegistration nodeReg
+                                        ) throws IOException {
+      verifyVersion( nodeReg.getVersion() );
+      namesystem.registerDatanode( nodeReg );
+      return nodeReg;
+    }
+    
     /**
      * Data node notify the name node that it is alive 
      * Return a block-oriented command for the datanode to execute.
      * This will be either a transfer or a delete operation.
      */
-    public BlockCommand sendHeartbeat(String sender, long capacity, long remaining,
-            int xmitsInProgress) {
-        namesystem.gotHeartbeat(new UTF8(sender), capacity, remaining);        
+    public BlockCommand sendHeartbeat(DatanodeRegistration nodeReg,
+                                      long capacity, 
+                                      long remaining,
+                                      int xmitsInProgress) throws IOException {
+        verifyRequest( nodeReg );
+        namesystem.gotHeartbeat( nodeReg, capacity, remaining );
         
         //
         // Only ask datanodes to perform block operations (transfer, delete) 
@@ -408,7 +389,8 @@
         //
         // Ask to perform pending transfers, if any
         //
-        Object xferResults[] = namesystem.pendingTransfers(new DatanodeInfo(new UTF8(sender)),
xmitsInProgress);
+        Object xferResults[] = namesystem.pendingTransfers(
+                       new DatanodeInfo( nodeReg ), xmitsInProgress );
         if (xferResults != null) {
             return new BlockCommand((Block[]) xferResults[0], (DatanodeInfo[][]) xferResults[1]);
         }
@@ -419,39 +401,70 @@
         // a block report.  This is just a small fast removal of blocks that have
         // just been removed.
         //
-        Block blocks[] = namesystem.blocksToInvalidate(new UTF8(sender));
+        Block blocks[] = namesystem.blocksToInvalidate( nodeReg );
         if (blocks != null) {
             return new BlockCommand(blocks);
         }
         return null;
     }
 
-    public Block[] blockReport(String sender, Block blocks[]) {
+    public Block[] blockReport( DatanodeRegistration nodeReg,
+                                Block blocks[]) throws IOException {
+        verifyRequest( nodeReg );
         stateChangeLog.fine("*BLOCK* NameNode.blockReport: "
-                +"from "+sender+" "+blocks.length+" blocks" );
+                +"from "+nodeReg.getName()+" "+blocks.length+" blocks" );
         if( firstBlockReportTime==0)
               firstBlockReportTime=System.currentTimeMillis();
 
-        return namesystem.processReport(blocks, new UTF8(sender));
+        return namesystem.processReport( nodeReg, blocks );
      }
 
-    public void blockReceived(String sender, Block blocks[]) {
+    public void blockReceived(DatanodeRegistration nodeReg, 
+                              Block blocks[]) throws IOException {
+        verifyRequest( nodeReg );
         stateChangeLog.fine("*BLOCK* NameNode.blockReceived: "
-                +"from "+sender+" "+blocks.length+" blocks." );
+                +"from "+nodeReg.getName()+" "+blocks.length+" blocks." );
         for (int i = 0; i < blocks.length; i++) {
-            namesystem.blockReceived(blocks[i], new UTF8(sender));
+            namesystem.blockReceived( nodeReg, blocks[i] );
         }
     }
 
     /**
      */
-    public void errorReport(String sender, int errorCode, String msg) {
-        // Log error message from datanode
-        LOG.warning("Report from " + sender + ": " + msg);
-        if( errorCode == DatanodeProtocol.DISK_ERROR ) {
-            namesystem.rmDataNodeByName(new UTF8(sender));            
-        }
-            
+    public void errorReport(DatanodeRegistration nodeReg,
+                            int errorCode, 
+                            String msg) throws IOException {
+      // Log error message from datanode
+      verifyRequest( nodeReg );
+      LOG.warning("Report from " + nodeReg.getName() + ": " + msg);
+      if( errorCode == DatanodeProtocol.DISK_ERROR ) {
+          namesystem.removeDatanode( nodeReg );            
+      }
+    }
+
+    /** 
+     * Verify request.
+     * 
+     * Verifies correctness of the datanode version and registration ID.
+     * 
+     * @param nodeReg data node registration
+     * @throws IOException
+     */
+    public void verifyRequest( DatanodeRegistration nodeReg ) throws IOException {
+      verifyVersion( nodeReg.getVersion() );
+      if( ! namesystem.getRegistrationID().equals( nodeReg.getRegistrationID() ))
+          throw new UnregisteredDatanodeException( nodeReg );
+    }
+    
+    /**
+     * Verify version.
+     * 
+     * @param version
+     * @throws IOException
+     */
+    public void verifyVersion( int version ) throws IOException {
+      if( version != DFS_CURRENT_VERSION )
+        throw new IncorrectVersionException( version, "data node" );
     }
 
     /**

Added: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NotReplicatedYetException.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NotReplicatedYetException.java?rev=410635&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NotReplicatedYetException.java (added)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NotReplicatedYetException.java Wed
May 31 11:52:49 2006
@@ -0,0 +1,13 @@
+package org.apache.hadoop.dfs;
+
+import java.io.IOException;
+
+/**
+ * The file has not finished being written to enough datanodes yet.
+ * @author Owen O'Malley
+ */
+public class NotReplicatedYetException extends IOException {
+  public NotReplicatedYetException(String msg) {
+    super(msg);
+  }
+}

Added: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/UnregisteredDatanodeException.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/UnregisteredDatanodeException.java?rev=410635&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/UnregisteredDatanodeException.java
(added)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/UnregisteredDatanodeException.java
Wed May 31 11:52:49 2006
@@ -0,0 +1,25 @@
+package org.apache.hadoop.dfs;
+
+import java.io.IOException;
+
+
+/**
+ * This exception is thrown when a datanode that has not previously 
+ * registered is trying to access the name node.
+ * 
+ * @author Konstantin Shvachko
+ */
+class UnregisteredDatanodeException extends IOException {
+
+  public UnregisteredDatanodeException( DatanodeID nodeID ) {
+    super("Unregistered data node: " + nodeID.getName() );
+  }
+
+  public UnregisteredDatanodeException( DatanodeID nodeID, 
+                                        DatanodeInfo storedNode ) {
+    super("Data node " + nodeID.getName() 
+        + "is attempting to report storage ID "
+        + nodeID.getStorageID() + ". Expecting " 
+        + storedNode.getStorageID() + ".");
+  }
+}

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java?rev=410635&r1=410634&r2=410635&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java Wed May 31 11:52:49 2006
@@ -51,7 +51,7 @@
  * <li>total number of bytes processed</li>
  * <li>throughput in mb/sec (total number of bytes / sum of processing times)</li>
  * <li>average i/o rate in mb/sec per file</li>
- * <li>standard i/o rate deviation</li>
+ * <li>standard deviation of i/o rate </li>
  * </ul>
  *
  * @author Konstantin Shvachko
@@ -309,11 +309,11 @@
       System.exit(-1);
     }
     for (int i = 0; i < args.length; i++) {       // parse command line
-      if (args[i].startsWith("-r")) {
+      if (args[i].startsWith("-read")) {
         testType = TEST_TYPE_READ;
-      } else if (args[i].startsWith("-w")) {
+      } else if (args[i].equals("-write")) {
         testType = TEST_TYPE_WRITE;
-      } else if (args[i].startsWith("-clean")) {
+      } else if (args[i].equals("-clean")) {
         testType = TEST_TYPE_CLEANUP;
       } else if (args[i].startsWith("-seq")) {
         isSequential = true;
@@ -410,7 +410,7 @@
       "Total MBytes processed: " + size/MEGA,
       "     Throughput mb/sec: " + size * 1000.0 / (time * MEGA),
       "Average IO rate mb/sec: " + med,
-      " Std IO rate deviation: " + stdDev,
+      " IO rate std deviation: " + stdDev,
       "    Test exec time sec: " + (float)execTime / 1000,
       "" };
 

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/test/AllTestDriver.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/test/AllTestDriver.java?rev=410635&r1=410634&r2=410635&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/test/AllTestDriver.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/test/AllTestDriver.java Wed May 31 11:52:49
2006
@@ -50,6 +50,7 @@
 	    pgd.addClass("testsequencefileinputformat", TestSequenceFileInputFormat.class, "A test
for sequence file input format.");
 	    pgd.addClass("testtextinputformat", TestTextInputFormat.class, "A test for text input
format.");
       pgd.addClass("TestDFSIO", TestDFSIO.class, "Distributed i/o benchmark.");
+      pgd.addClass("DistributedFSCheck", TestDFSIO.class, "Distributed checkup of the file
system consistency.");
 	    pgd.driver(argv);
 	}
 	catch(Throwable e){



Mime
View raw message