hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From tomwh...@apache.org
Subject svn commit: r527401 - in /lucene/hadoop/trunk: ./ src/java/org/apache/hadoop/dfs/ src/test/org/apache/hadoop/dfs/
Date Wed, 11 Apr 2007 07:08:33 GMT
Author: tomwhite
Date: Wed Apr 11 00:08:31 2007
New Revision: 527401

URL: http://svn.apache.org/viewvc?view=rev&rev=527401
Log:
HADOOP-1211.  Remove deprecated constructor and unused static members in DataNode class. 
Contributed by Konstantin Shvachko.

Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeRegistration.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestCheckpoint.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=527401&r1=527400&r2=527401
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Wed Apr 11 00:08:31 2007
@@ -144,6 +144,9 @@
     unit test configuration.  Using the default is more robust and
     has almost the same run time.  (Arun C Murthy via tomwhite)
 
+45. HADOOP-1211.  Remove deprecated constructor and unused static 
+    members in DataNode class.  (Konstantin Shvachko via tomwhite)
+
 
 Release 0.12.3 - 2007-04-06
 

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java?view=diff&rev=527401&r1=527400&r2=527401
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java Wed Apr 11 00:08:31 2007
@@ -277,8 +277,8 @@
     }
 
     private NamespaceInfo handshake() throws IOException {
-      NamespaceInfo nsInfo;
-      while( true ) {
+      NamespaceInfo nsInfo = new NamespaceInfo();
+      while (shouldRun) {
         try {
           nsInfo = namenode.versionRequest();
           break;

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeRegistration.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeRegistration.java?view=diff&rev=527401&r1=527400&r2=527401
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeRegistration.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeRegistration.java Wed Apr 11
00:08:31 2007
@@ -46,20 +46,6 @@
   }
 
   /**
-   * Create DatanodeRegistration
-   * @deprecated 
-   * use {@link #DatanodeRegistration(String, int, DataStorage)} instead
-   */
-  public DatanodeRegistration(int version, 
-                              String nodeName, 
-                              String storageID,
-                              int infoPort,
-                              String registrationID ) {
-    super( nodeName, storageID, infoPort );
-    this.storageInfo = new StorageInfo();
-  }
-
-  /**
    */
   public int getVersion() {
     return storageInfo.getLayoutVersion();

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java?view=diff&rev=527401&r1=527400&r2=527401
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java Wed Apr 11 00:08:31
2007
@@ -256,7 +256,6 @@
     class FSVolume {
       static final double USABLE_DISK_PCT_DEFAULT = 0.98f; 
 
-      private File dir; // TODO this field is redundant equals this.dataDir.dir.getParent()
       private FSDir dataDir;
       private File tmpDir;
       private DF usage;
@@ -267,9 +266,9 @@
         this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
         this.usableDiskPct = conf.getFloat("dfs.datanode.du.pct",
             (float) USABLE_DISK_PCT_DEFAULT);
-        this.dir = currentDir.getParentFile();
+        File parent = currentDir.getParentFile();
         this.dataDir = new FSDir( currentDir );
-        this.tmpDir = new File(dir, "tmp");
+        this.tmpDir = new File(parent, "tmp");
         if (tmpDir.exists()) {
           FileUtil.fullyDelete(tmpDir);
         }
@@ -278,7 +277,7 @@
             throw new IOException("Mkdirs failed to create " + tmpDir.toString());
           }
         }
-        this.usage = new DF(dir, conf);
+        this.usage = new DF(parent, conf);
       }
       
       long getCapacity() throws IOException {
@@ -342,7 +341,7 @@
       }
       
       public String toString() {
-        return dir.getAbsolutePath();
+        return dataDir.dir.getAbsolutePath();
       }
     }
     

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?view=diff&rev=527401&r1=527400&r2=527401
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Wed Apr 11 00:08:31
2007
@@ -1102,7 +1102,8 @@
      */
     private synchronized void dumpRecentInvalidateSets(PrintWriter out) {
       Collection<Collection<Block>> values = recentInvalidateSets.values();
-      Iterator it = recentInvalidateSets.entrySet().iterator();
+      Iterator<Map.Entry<String,Collection<Block>>> it = 
+                                recentInvalidateSets.entrySet().iterator();
       if (values.size() == 0) {
         out.println("Metasave: Blocks waiting deletion: 0");
         return;
@@ -1110,10 +1111,10 @@
       out.println("Metasave: Blocks waiting deletion from " +
                    values.size() + " datanodes.");
       while (it.hasNext()) {
-        Map.Entry entry = (Map.Entry) it.next();
+        Map.Entry<String,Collection<Block>> entry = it.next();
         String storageId = (String) entry.getKey();
         DatanodeDescriptor node = datanodeMap.get(storageId);
-        Collection<Block> blklist = (Collection<Block>) entry.getValue();
+        Collection<Block> blklist = entry.getValue();
         if (blklist.size() > 0) {
           out.print(node.getName());
           for (Iterator jt = blklist.iterator(); jt.hasNext();) {

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java?view=diff&rev=527401&r1=527400&r2=527401
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java Wed Apr 11 00:08:31 2007
@@ -224,19 +224,6 @@
       init( bindAddress, port, conf );
     }
 
-    /** Return the configured directories where name data is stored. 
-     * @deprecated
-     */
-    static File[] getDirs(Configuration conf) {
-      String[] dirNames = conf.getStrings("dfs.name.dir");
-      if (dirNames == null) { dirNames = new String[] {"/tmp/hadoop/dfs/name"}; }
-      File[] dirs = new File[dirNames.length];
-      for (int idx = 0; idx < dirs.length; idx++) {
-        dirs[idx] = new File(dirNames[idx]);
-      }
-      return dirs;
-    }
-
     /**
      * Wait for service to finish.
      * (Normally, it runs forever.)

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java?view=diff&rev=527401&r1=527400&r2=527401
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java Wed Apr 11 00:08:31 2007
@@ -23,8 +23,6 @@
 import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.nio.channels.FileLock;
-import java.util.AbstractList;
-import java.util.ArrayList;
 import java.util.List;
 import java.util.Iterator;
 import java.util.Properties;

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java?view=diff&rev=527401&r1=527400&r2=527401
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java Wed Apr 11 00:08:31
2007
@@ -20,6 +20,8 @@
 import java.io.*;
 import java.net.*;
 import java.util.ArrayList;
+import java.util.Collection;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.dfs.FSConstants.StartupOption;
 import org.apache.hadoop.fs.*;
@@ -300,11 +302,11 @@
   /**
    * Get the directories where the namenode stores its state.
    */
-  public File[] getNameDirs() {
-    return NameNode.getDirs(conf);
+  public Collection<File> getNameDirs() {
+    return FSNamesystem.getNamespaceDirs(conf);
   }
 
-   /**
+  /**
    * Wait until the cluster is active and running.
    */
   public void waitActive() throws IOException {

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestCheckpoint.java?view=diff&rev=527401&r1=527400&r2=527401
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestCheckpoint.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestCheckpoint.java Wed Apr 11 00:08:31
2007
@@ -19,6 +19,7 @@
 
 import junit.framework.TestCase;
 import java.io.*;
+import java.util.Collection;
 import java.util.Random;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -68,12 +69,12 @@
   /**
    * put back the old namedir
    */
-  private void resurrectNameDir(File[] namedirs) 
+  private void resurrectNameDir(File namedir) 
   throws IOException {
-    String parentdir = namedirs[0].getParent();
-    String name = namedirs[0].getName();
+    String parentdir = namedir.getParent();
+    String name = namedir.getName();
     File oldname =  new File(parentdir, name + ".old");
-    if (!oldname.renameTo(namedirs[0])) {
+    if (!oldname.renameTo(namedir)) {
       assertTrue(false);
     }
   }
@@ -81,12 +82,12 @@
   /**
    * remove one namedir
    */
-  private void removeOneNameDir(File[] namedirs) 
+  private void removeOneNameDir(File namedir) 
   throws IOException {
-    String parentdir = namedirs[0].getParent();
-    String name = namedirs[0].getName();
+    String parentdir = namedir.getParent();
+    String name = namedir.getName();
     File newname =  new File(parentdir, name + ".old");
-    if (!namedirs[0].renameTo(newname)) {
+    if (!namedir.renameTo(newname)) {
       assertTrue(false);
     }
   }
@@ -94,27 +95,28 @@
   /*
    * Verify that namenode does not startup if one namedir is bad.
    */
-  private void testNamedirError(Configuration conf, File[] namedirs) 
+  private void testNamedirError(Configuration conf, Collection<File> namedirs) 
   throws IOException {
     System.out.println("Starting testNamedirError");
-    Path file1 = new Path("checkpoint.dat");
     MiniDFSCluster cluster = null;
 
-    if (namedirs.length <= 1) {
+    if (namedirs.size() <= 1) {
       return;
     }
     
     //
     // Remove one namedir & Restart cluster. This should fail.
     //
-    removeOneNameDir(namedirs);
+    File first = namedirs.iterator().next();
+    removeOneNameDir(first);
     try {
       cluster = new MiniDFSCluster(conf, 0, false, null);
+      cluster.shutdown();
       assertTrue(false);
     } catch (Throwable t) {
       // no nothing
     }
-    resurrectNameDir(namedirs); // put back namedir
+    resurrectNameDir(first); // put back namedir
   }
 
   /*
@@ -241,7 +243,7 @@
   public void testCheckpoint() throws IOException {
     Path file1 = new Path("checkpoint.dat");
     Path file2 = new Path("checkpoint2.dat");
-    File[] namedirs = null;
+    Collection<File> namedirs = null;
 
     Configuration conf = new Configuration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);



Mime
View raw message