hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1124376 - in /hadoop/hdfs/branches/HDFS-1073: ./ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/
Date Wed, 18 May 2011 18:48:32 GMT
Author: todd
Date: Wed May 18 18:48:31 2011
New Revision: 1124376

URL: http://svn.apache.org/viewvc?rev=1124376&view=rev
Log:
HDFS-1725. Set storage directories only at FSImage construction. Contributed by Ivan Kelly.

Modified:
    hadoop/hdfs/branches/HDFS-1073/CHANGES.HDFS-1073.txt
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
    hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java

Modified: hadoop/hdfs/branches/HDFS-1073/CHANGES.HDFS-1073.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/CHANGES.HDFS-1073.txt?rev=1124376&r1=1124375&r2=1124376&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/CHANGES.HDFS-1073.txt (original)
+++ hadoop/hdfs/branches/HDFS-1073/CHANGES.HDFS-1073.txt Wed May 18 18:48:31 2011
@@ -25,3 +25,5 @@ HDFS-1801. Remove use of timestamps to i
 HDFS-1930. TestDFSUpgrade failing in HDFS-1073 branch (todd)
 HDFS-1800. Extend image checksumming to function with multiple fsimage files
            per directory. (todd)
+HDFS-1725. Set storage directories only at FSImage construction (Ivan Kelly
+           via todd)

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java?rev=1124376&r1=1124375&r2=1124376&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
Wed May 18 18:48:31 2011
@@ -21,8 +21,6 @@ import java.io.BufferedInputStream;
 import java.io.DataInputStream;
 import java.io.File;
 import java.io.IOException;
-import java.net.URI;
-import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 import java.util.zip.CheckedInputStream;
@@ -34,13 +32,12 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import static org.apache.hadoop.hdfs.server.common.Util.now;
-import org.apache.hadoop.hdfs.server.namenode.FSImage;
-import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.conf.Configuration;
 
 /**
  * Extension of FSImage for the backup node.
@@ -68,9 +65,12 @@ public class BackupImage extends FSImage
   }
 
   /**
+   * Construct a backup image.
+   * @param conf Configuration
+   * @throws IOException if storage cannot be initialised.
    */
-  BackupImage() {
-    super();
+  BackupImage(Configuration conf) throws IOException {
+    super(conf);
     storage.setDisablePreUpgradableLayoutCheck(true);
     jsState = JSpoolState.OFF;
   }
@@ -81,13 +81,9 @@ public class BackupImage extends FSImage
    * Read VERSION and fstime files if exist.<br>
    * Do not load image or edits.
    *
-   * @param imageDirs list of image directories as URI.
-   * @param editsDirs list of edits directories URI.
    * @throws IOException if the node should shutdown.
    */
-  void recoverCreateRead(Collection<URI> imageDirs,
-                         Collection<URI> editsDirs) throws IOException {
-    storage.setStorageDirectories(imageDirs, editsDirs);
+  void recoverCreateRead() throws IOException {
     for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
       StorageDirectory sd = it.next();
       StorageState curState;
@@ -135,9 +131,9 @@ public class BackupImage extends FSImage
 
     // unlock, close and rename storage directories
     storage.unlockAll();
+    
     // recover from unsuccessful checkpoint if necessary
-    recoverCreateRead(storage.getImageDirectories(),
-                      storage.getEditsDirectories());
+    recoverCreateRead();
     // rename and recreate
     for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
       StorageDirectory sd = it.next();

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1124376&r1=1124375&r2=1124376&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
Wed May 18 18:48:31 2011
@@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
-import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.CheckpointStates;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.ipc.RPC;
@@ -119,10 +118,9 @@ public class BackupNode extends NameNode
 
   @Override // NameNode
   protected void loadNamesystem(Configuration conf) throws IOException {
-    BackupImage bnImage = new BackupImage();
+    BackupImage bnImage = new BackupImage(conf);
     this.namesystem = new FSNamesystem(conf, bnImage);
-    bnImage.recoverCreateRead(FSNamesystem.getNamespaceDirs(conf),
-                              FSNamesystem.getNamespaceEditsDirs(conf));
+    bnImage.recoverCreateRead();
   }
 
   @Override // NameNode

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1124376&r1=1124375&r2=1124376&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
Wed May 18 18:48:31 2011
@@ -20,9 +20,7 @@ package org.apache.hadoop.hdfs.server.na
 import java.io.Closeable;
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.net.URI;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.List;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.Condition;
@@ -147,18 +145,22 @@ class FSDirectory implements Closeable {
     return getFSNamesystem().blockManager;
   }
 
-  void loadFSImage(Collection<URI> dataDirs,
-                   Collection<URI> editsDirs,
-                   StartupOption startOpt) 
+  /**
+   * Load the filesystem image into memory.
+   *
+   * @param startOpt Startup type as specified by the user.
+   * @throws IOException If image or editlog cannot be read.
+   */
+  void loadFSImage(StartupOption startOpt) 
       throws IOException {
     // format before starting up if requested
     if (startOpt == StartupOption.FORMAT) {
-      fsImage.getStorage().setStorageDirectories(dataDirs, editsDirs);
-      fsImage.getStorage().format(fsImage.getStorage().determineClusterId()); // reuse current
id
+      fsImage.getStorage().format(fsImage.getStorage().determineClusterId());// reuse current
id
+
       startOpt = StartupOption.REGULAR;
     }
     try {
-      if (fsImage.recoverTransitionRead(dataDirs, editsDirs, startOpt)) {
+      if (fsImage.recoverTransitionRead(startOpt)) {
         fsImage.saveNamespace(true);
       }
       FSEditLog editLog = fsImage.getEditLog();

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1124376&r1=1124375&r2=1124376&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
Wed May 18 18:48:31 2011
@@ -77,7 +77,7 @@ public class FSImage implements NNStorag
   private boolean isUpgradeFinalized = false;
   protected MD5Hash newImageDigest = null;
 
-  protected NNStorage storage = null;
+  protected NNStorage storage;
 
   /**
    * URIs for importing an image from a checkpoint. In the default case,
@@ -86,7 +86,7 @@ public class FSImage implements NNStorag
   private Collection<URI> checkpointDirs;
   private Collection<URI> checkpointEditsDirs;
 
-  private Configuration conf;
+  final private Configuration conf;
 
   /**
    * Can fs-image be rolled?
@@ -94,67 +94,63 @@ public class FSImage implements NNStorag
   volatile protected CheckpointStates ckptState = FSImage.CheckpointStates.START; 
 
   /**
+   * Construct an FSImage.
+   * @param conf Configuration
+   * @see #FSImage(Configuration conf, FSNamesystem ns, 
+   *               Collection imageDirs, Collection editsDirs) 
+   * @throws IOException if default directories are invalid.
    */
-  FSImage() {
-    this((FSNamesystem)null);
+  public FSImage(Configuration conf) throws IOException {
+    this(conf, (FSNamesystem)null);
   }
 
   /**
-   * Constructor
+   * Construct an FSImage
    * @param conf Configuration
+   * @param ns The FSNamesystem using this image.
+   * @see #FSImage(Configuration conf, FSNamesystem ns, 
+   *               Collection imageDirs, Collection editsDirs) 
+   * @throws IOException if default directories are invalid.
    */
-  FSImage(Configuration conf) throws IOException {
-    this();
-    this.conf = conf; // TODO we have too many constructors, this is a mess
-
-    if(conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, 
-        DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT)) {
-      NameNode.LOG.info("set FSImage.restoreFailedStorage");
-      storage.setRestoreFailedStorage(true);
-    }
-    setCheckpointDirectories(FSImage.getCheckpointDirs(conf, null),
-        FSImage.getCheckpointEditsDirs(conf, null));
+  private FSImage(Configuration conf, FSNamesystem ns) throws IOException {
+    this(conf, ns,
+         FSNamesystem.getNamespaceDirs(conf),
+         FSNamesystem.getNamespaceEditsDirs(conf));
   }
 
-  private FSImage(FSNamesystem ns) {
-    this.conf = new Configuration();
-    
-    storage = new NNStorage(conf);
+  /**
+   * Construct the FSImage. Set the default checkpoint directories.
+   *
+   * Setup storage and initialize the edit log.
+   *
+   * @param conf Configuration
+   * @param ns The FSNamesystem using this image.
+   * @param imageDirs Directories the image can be stored in.
+   * @param editsDirs Directories the editlog can be stored in.
+   * @throws IOException if directories are invalid.
+   */
+  protected FSImage(Configuration conf, FSNamesystem ns,
+                    Collection<URI> imageDirs, Collection<URI> editsDirs)
+      throws IOException {
+    this.conf = conf;
+    setCheckpointDirectories(FSImage.getCheckpointDirs(conf, null),
+                             FSImage.getCheckpointEditsDirs(conf, null));
+
+    storage = new NNStorage(conf, imageDirs, editsDirs);
     if (ns != null) {
       storage.setUpgradeManager(ns.upgradeManager);
     }
     storage.registerListener(this);
 
+    if(conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY,
+                       DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT)) {
+      storage.setRestoreFailedStorage(true);
+    }
+
     this.editLog = new FSEditLog(storage);
     setFSNamesystem(ns);
   }
 
-  /**
-   * @throws IOException 
-   */
-  FSImage(Collection<URI> fsDirs, Collection<URI> fsEditsDirs) 
-      throws IOException {
-    this();
-    storage.setStorageDirectories(fsDirs, fsEditsDirs);
-  }
-
-  public FSImage(StorageInfo storageInfo, String bpid) {
-    storage = new NNStorage(storageInfo, bpid);
-  }
-
-  /**
-   * Represents an Image (image and edit file).
-   * @throws IOException 
-   */
-  FSImage(URI imageDir) throws IOException {
-    this();
-    ArrayList<URI> dirs = new ArrayList<URI>(1);
-    ArrayList<URI> editsDirs = new ArrayList<URI>(1);
-    dirs.add(imageDir);
-    editsDirs.add(imageDir);
-    storage.setStorageDirectories(dirs, editsDirs);
-  }
-  
   protected FSNamesystem getFSNamesystem() {
     return namesystem;
   }
@@ -178,20 +174,19 @@ public class FSImage implements NNStorag
    * Perform fs state transition if necessary depending on the namespace info.
    * Read storage info. 
    * 
-   * @param dataDirs
-   * @param startOpt startup option
    * @throws IOException
    * @return true if the image needs to be saved or false otherwise
    */
-  boolean recoverTransitionRead(Collection<URI> dataDirs,
-                                Collection<URI> editsDirs,
-                                StartupOption startOpt)
+  boolean recoverTransitionRead(StartupOption startOpt)
       throws IOException {
     assert startOpt != StartupOption.FORMAT : 
       "NameNode formatting should be performed before reading the image";
     
+    Collection<URI> imageDirs = storage.getImageDirectories();
+    Collection<URI> editsDirs = storage.getEditsDirectories();
+
     // none of the data dirs exist
-    if((dataDirs.size() == 0 || editsDirs.size() == 0) 
+    if((imageDirs.size() == 0 || editsDirs.size() == 0) 
                              && startOpt != StartupOption.IMPORT)  
       throw new IOException(
           "All specified directories are not accessible or do not exist.");
@@ -206,7 +201,6 @@ public class FSImage implements NNStorag
       throw new IOException("Cannot import image from a checkpoint. "
                             + "\"dfs.namenode.checkpoint.dir\" is not set." );
     
-    storage.setStorageDirectories(dataDirs, editsDirs);
     // 1. For each data directory calculate its state and 
     // check whether all is consistent before transitioning.
     Map<StorageDirectory, StorageState> dataDirStates = 
@@ -431,7 +425,7 @@ public class FSImage implements NNStorag
     // a previous fs states in at least one of the storage directories.
     // Directories that don't have previous state do not rollback
     boolean canRollback = false;
-    FSImage prevState = new FSImage(getFSNamesystem());
+    FSImage prevState = new FSImage(conf, getFSNamesystem());
     prevState.getStorage().layoutVersion = FSConstants.LAYOUT_VERSION;
     for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
       StorageDirectory sd = it.next();
@@ -509,15 +503,15 @@ public class FSImage implements NNStorag
    */
   void doImportCheckpoint() throws IOException {
     FSNamesystem fsNamesys = getFSNamesystem();
-    FSImage ckptImage = new FSImage(fsNamesys);
+    FSImage ckptImage = new FSImage(conf, fsNamesys,
+                                    checkpointDirs, checkpointEditsDirs);
     // replace real image with the checkpoint image
     FSImage realImage = fsNamesys.getFSImage();
     assert realImage == this;
     fsNamesys.dir.fsImage = ckptImage;
     // load from the checkpoint dirs
     try {
-      ckptImage.recoverTransitionRead(checkpointDirs, checkpointEditsDirs,
-                                              StartupOption.REGULAR);
+      ckptImage.recoverTransitionRead(StartupOption.REGULAR);
     } finally {
       ckptImage.close();
     }

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1124376&r1=1124375&r2=1124376&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Wed May 18 18:48:31 2011
@@ -330,8 +330,7 @@ public class FSNamesystem implements FSC
     if(fsImage == null) {
       this.dir = new FSDirectory(this, conf);
       StartupOption startOpt = NameNode.getStartupOption(conf);
-      this.dir.loadFSImage(getNamespaceDirs(conf),
-                           getNamespaceEditsDirs(conf), startOpt);
+      this.dir.loadFSImage(startOpt);
       long timeTakenToLoadFSImage = now() - systemStart;
       LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
       NameNode.getNameNodeMetrics().fsImageLoadTime.set(
@@ -422,8 +421,9 @@ public class FSNamesystem implements FSC
           + propertyName + "\" in hdfs-site.xml;" +
           "\n\t\t- use Backup Node as a persistent and up-to-date storage " +
           "of the file system meta-data.");
-    } else if (dirNames.isEmpty())
-      dirNames.add("file:///tmp/hadoop/dfs/name");
+    } else if (dirNames.isEmpty()) {
+      dirNames = Collections.singletonList("file:///tmp/hadoop/dfs/name");
+    }
     return Util.stringCollectionAsURIs(dirNames);
   }
 

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java?rev=1124376&r1=1124375&r2=1124376&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
Wed May 18 18:48:31 2011
@@ -62,6 +62,7 @@ import org.apache.hadoop.net.DNS;
 
 import com.google.common.base.Charsets;
 import com.google.common.base.Preconditions;
+import com.google.common.annotations.VisibleForTesting;
 
 /**
  * NNStorage is responsible for management of the StorageDirectories used by
@@ -174,12 +175,19 @@ public class NNStorage extends Storage i
   /**
    * Construct the NNStorage.
    * @param conf Namenode configuration.
+   * @param imageDirs Directories the image can be stored in.
+   * @param editsDirs Directories the editlog can be stored in.
+   * @throws IOException if any directories are inaccessible.
    */
-  public NNStorage(Configuration conf) {
+  public NNStorage(Configuration conf, 
+                   Collection<URI> imageDirs, Collection<URI> editsDirs) 
+      throws IOException {
     super(NodeType.NAME_NODE);
 
     storageDirs = new CopyOnWriteArrayList<StorageDirectory>();
     this.listeners = new CopyOnWriteArrayList<NNStorageListener>();
+    
+    setStorageDirectories(imageDirs, editsDirs);
   }
 
   /**
@@ -297,9 +305,11 @@ public class NNStorage extends Storage i
   }
 
   /**
-   * Set the storage directories which will be used. NNStorage.close() should
-   * be called before this to ensure any previous storage directories have been
-   * freed.
+   * Set the storage directories which will be used. This should only ever be
+   * called from inside NNStorage. However, it needs to remain package private
+   * for testing, as StorageDirectories need to be reinitialised after using
+   * Mockito.spy() on this class, as Mockito doesn't work well with inner
+   * classes, such as StorageDirectory in this case.
    *
    * Synchronized due to initialization of storageDirs and removedStorageDirs.
    *
@@ -307,6 +317,7 @@ public class NNStorage extends Storage i
    * @param fsEditsDirs Locations to store edit logs.
    * @throws IOException
    */
+  @VisibleForTesting
   synchronized void setStorageDirectories(Collection<URI> fsNameDirs,
                                           Collection<URI> fsEditsDirs)
       throws IOException {

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1124376&r1=1124375&r2=1124376&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
Wed May 18 18:48:31 2011
@@ -1479,7 +1479,7 @@ public class NameNode implements Namenod
       }
     }
 
-    FSImage fsImage = new FSImage(dirsToFormat, editDirsToFormat);
+    FSImage fsImage = new FSImage(conf, null, dirsToFormat, editDirsToFormat);
     FSNamesystem nsys = new FSNamesystem(fsImage, conf);
     
     // if clusterID is not provided - see if you can find the current one
@@ -1505,11 +1505,7 @@ public class NameNode implements Namenod
   private static boolean finalize(Configuration conf,
                                boolean isConfirmationNeeded
                                ) throws IOException {
-    Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
-    Collection<URI> editDirsToFormat = 
-                               FSNamesystem.getNamespaceEditsDirs(conf);
-    FSNamesystem nsys = new FSNamesystem(new FSImage(dirsToFormat,
-                                         editDirsToFormat), conf);
+    FSNamesystem nsys = new FSNamesystem(new FSImage(conf), conf);
     System.err.print(
         "\"finalize\" will remove the previous state of the files system.\n"
         + "Recent upgrade will become permanent.\n"

Modified: hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1124376&r1=1124375&r2=1124376&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
Wed May 18 18:48:31 2011
@@ -23,7 +23,6 @@ import java.net.InetSocketAddress;
 import java.net.URI;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Date;
 import java.util.Iterator;
@@ -172,8 +171,8 @@ public class SecondaryNameNode implement
                                   "/tmp/hadoop/dfs/namesecondary");
     checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, 
                                   "/tmp/hadoop/dfs/namesecondary");    
-    checkpointImage = new CheckpointStorage(conf);
-    checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);
+    checkpointImage = new CheckpointStorage(conf, checkpointDirs, checkpointEditsDirs);
+    checkpointImage.recoverCreate();
 
     // Initialize other scheduling parameters from the configuration
     checkpointPeriod = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 
@@ -468,9 +467,8 @@ public class SecondaryNameNode implement
   }
 
   private void startCheckpoint() throws IOException {
-    checkpointImage.getStorage().unlockAll();
     checkpointImage.getEditLog().close();
-    checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);
+    checkpointImage.recoverCreate();
     checkpointImage.startCheckpoint();
   }
 
@@ -609,26 +607,29 @@ public class SecondaryNameNode implement
 
   static class CheckpointStorage extends FSImage {
     /**
+     * Construct a checkpoint image.
+     * @param conf Node configuration.
+     * @param imageDirs URIs of storage for image.
+     * @param editDirs URIs of storage for edit logs.
+     * @throws IOException If storage cannot be access.
      */
-    CheckpointStorage(Configuration conf) throws IOException {
-      super(conf);
+    CheckpointStorage(Configuration conf, 
+                      Collection<URI> imageDirs,
+                      Collection<URI> editsDirs) throws IOException {
+      super(conf, (FSNamesystem)null, imageDirs, editsDirs);
     }
 
     /**
      * Analyze checkpoint directories.
      * Create directories if they do not exist.
-     * Recover from an unsuccessful checkpoint is necessary. 
-     * 
-     * @param dataDirs
-     * @param editsDirs
+     * Recover from an unsuccessful checkpoint is necessary.
+     *
      * @throws IOException
      */
-    void recoverCreate(Collection<URI> dataDirs,
-                       Collection<URI> editsDirs) throws IOException {
-      Collection<URI> tempDataDirs = new ArrayList<URI>(dataDirs);
-      Collection<URI> tempEditsDirs = new ArrayList<URI>(editsDirs);
-      storage.close();
-      storage.setStorageDirectories(tempDataDirs, tempEditsDirs);
+    void recoverCreate() throws IOException {
+      storage.attemptRestoreRemovedStorage();
+      storage.unlockAll();
+
       for (Iterator<StorageDirectory> it = 
                    storage.dirIterator(); it.hasNext();) {
         StorageDirectory sd = it.next();

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1124376&r1=1124375&r2=1124376&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
Wed May 18 18:48:31 2011
@@ -25,8 +25,10 @@ import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.RandomAccessFile;
+import java.net.URI;
 import java.util.Arrays;
 import java.util.Random;
+import java.util.Collections;
 import java.util.zip.CRC32;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -379,8 +381,10 @@ public class UpgradeUtilities {
     for (int i = 0; i < parent.length; i++) {
       File versionFile = new File(parent[i], "VERSION");
       FileUtil.fullyDelete(versionFile);
-      storage = new NNStorage(conf);
-      storage.setStorageInfo(version);
+      storage = new NNStorage(conf, 
+                              Collections.<URI>emptyList(), 
+                              Collections.<URI>emptyList());
+
       StorageDirectory sd = storage.new StorageDirectory(parent[i].getParentFile());
       sd.write(versionFile);
       versionFiles[i] = versionFile;

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=1124376&r1=1124375&r2=1124376&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
Wed May 18 18:48:31 2011
@@ -19,6 +19,10 @@ package org.apache.hadoop.hdfs.server.na
 
 import java.io.File;
 import java.io.IOException;
+import java.util.List;
+import java.util.Collections;
+import java.net.URI;
+import org.apache.hadoop.conf.Configuration;
 
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
@@ -189,7 +193,12 @@ public class CreateEditsLog {
       }
     }
     
-    FSImage fsImage = new FSImage(editsLogDir.getAbsoluteFile().toURI());
+    List<URI> imagedirs = Collections.singletonList(
+        editsLogDir.getAbsoluteFile().toURI());
+    List<URI> editsdirs = Collections.singletonList(
+        editsLogDir.getAbsoluteFile().toURI());
+    FSImage fsImage = new FSImage(new Configuration(), 
+                                  (FSNamesystem)null, imagedirs, editsdirs);
     FileNameGenerator nameGenerator = new FileNameGenerator(BASE_PATH, 100);
 
     FSEditLog editLog = fsImage.getEditLog();

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java?rev=1124376&r1=1124375&r2=1124376&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
Wed May 18 18:48:31 2011
@@ -89,7 +89,7 @@ public class TestClusterId {
     // see if cluster id not empty.
     Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(config);
     Collection<URI> editsToFormat = new ArrayList<URI>(0);
-    FSImage fsImage = new FSImage(dirsToFormat, editsToFormat);
+    FSImage fsImage = new FSImage(config, null, dirsToFormat, editsToFormat);
     
     Iterator<StorageDirectory> sdit = 
       fsImage.getStorage().dirIterator(NNStorage.NameNodeDirType.IMAGE);

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java?rev=1124376&r1=1124375&r2=1124376&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
Wed May 18 18:48:31 2011
@@ -23,6 +23,7 @@ import static org.mockito.Matchers.anyOb
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
+import java.io.File;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
@@ -33,6 +34,8 @@ import org.apache.hadoop.hdfs.protocol.F
 import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -59,7 +62,7 @@ public class TestFsLimits {
   
   private static class TestFSDirectory extends FSDirectory {
     public TestFSDirectory() throws IOException {
-      super(new FSImage(), getMockNamesystem(), conf);
+      super(new FSImage(conf), getMockNamesystem(), conf);
       setReady(fsIsReady);
     }
     
@@ -71,8 +74,12 @@ public class TestFsLimits {
   }
 
   @Before
-  public void setUp() {
+  public void setUp() throws IOException {
     conf = new Configuration();
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+             fileAsURI(new File(MiniDFSCluster.getBaseDirectory(),
+                                "namenode")).toString());
+
     rootInode = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME, perms, 0L, 0L);
     inodes = new INode[]{ rootInode, null };
     fs = null;

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java?rev=1124376&r1=1124375&r2=1124376&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
Wed May 18 18:48:31 2011
@@ -275,16 +275,6 @@ public class TestSaveNamespace {
     GenericTestUtils.formatNamenode(conf);
     FSNamesystem fsn = new FSNamesystem(conf);
 
-    // Replace the FSImage with a spy
-    final FSImage originalImage = fsn.dir.fsImage;
-    originalImage.getStorage().close();
-
-    FSImage spyImage = spy(originalImage);
-    spyImage.getStorage().setStorageDirectories(
-        FSNamesystem.getNamespaceDirs(conf), 
-        FSNamesystem.getNamespaceEditsDirs(conf));
-    fsn.dir.fsImage = spyImage;
-
     try {
       doAnEdit(fsn, 1);
       CheckpointSignature sig = fsn.rollEditLog();
@@ -297,7 +287,6 @@ public class TestSaveNamespace {
       fsn.saveNamespace();
 
       // Now shut down and restart the NN
-      originalImage.close();
       fsn.close();
       fsn = null;
 



Mime
View raw message