hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r761006 - in /hadoop/core/trunk: ./ src/hdfs/org/apache/hadoop/hdfs/server/namenode/ src/test/org/apache/hadoop/hdfs/server/namenode/
Date Wed, 01 Apr 2009 18:42:50 GMT
Author: szetszwo
Date: Wed Apr  1 18:42:49 2009
New Revision: 761006

URL: http://svn.apache.org/viewvc?rev=761006&view=rev
Log:
HADOOP-2413. Remove the static variable FSNamesystem.fsNamesystemObject.  (Konstantin Shvachko
via szetszwo)

Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=761006&r1=761005&r2=761006&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Wed Apr  1 18:42:49 2009
@@ -353,6 +353,9 @@
     HADOOP-5464. DFSClient did not treat write timeout of 0 properly.
     (Raghu Angadi)
 
+    HADOOP-2413. Remove the static variable FSNamesystem.fsNamesystemObject.
+    (Konstantin Shvachko via szetszwo)
+
 Release 0.20.0 - Unreleased
 
   INCOMPATIBLE CHANGES

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java?rev=761006&r1=761005&r2=761006&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java Wed
Apr  1 18:42:49 2009
@@ -188,11 +188,6 @@
     renameCheckpoint();
   }
 
-  private FSNamesystem getFSNamesystem() {
-    // HADOOP-5119 should get rid of this.
-    return FSNamesystem.getFSNamesystem();
-  }
-
   private Object getFSDirectoryRootLock() {
     return getFSNamesystem().dir.rootDir;
   }
@@ -232,7 +227,7 @@
           waitSpoolEnd();
           // update NameSpace in memory
           backupInputStream.setBytes(data);
-          FSEditLog.loadEditRecords(getLayoutVersion(),
+          editLog.loadEditRecords(getLayoutVersion(),
                     backupInputStream.getDataInputStream(), true);
           getFSNamesystem().dir.updateCountForINodeWithQuota(); // inefficient!
           break;
@@ -352,11 +347,11 @@
       // load edits.new
       EditLogFileInputStream edits = new EditLogFileInputStream(jSpoolFile);
       DataInputStream in = edits.getDataInputStream();
-      numEdits += FSEditLog.loadFSEdits(in, false);
+      numEdits += editLog.loadFSEdits(in, false);
   
       // first time reached the end of spool
       jsState = JSpoolState.WAIT;
-      numEdits += FSEditLog.loadEditRecords(getLayoutVersion(), in, true);
+      numEdits += editLog.loadEditRecords(getLayoutVersion(), in, true);
       getFSNamesystem().dir.updateCountForINodeWithQuota();
       edits.close();
     }

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=761006&r1=761005&r2=761006&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Wed
Apr  1 18:42:49 2009
@@ -45,7 +45,6 @@
  *************************************************/
 class FSDirectory implements Closeable {
 
-  final FSNamesystem namesystem;
   INodeDirectoryWithQuota rootDir;
   FSImage fsImage;  
   private boolean ready = false;
@@ -64,14 +63,18 @@
   }
 
   FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) {
+    fsImage.setFSNamesystem(ns);
     rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
         ns.createFsOwnerPermissions(new FsPermission((short)0755)),
         Integer.MAX_VALUE, -1);
     this.fsImage = fsImage;
-    namesystem = ns;
     initialize(conf);
   }
     
+  private FSNamesystem getFSNamesystem() {
+    return fsImage.getFSNamesystem();
+  }
+
   private void initialize(Configuration conf) {
     MetricsContext metricsContext = MetricsUtil.getContext("dfs");
     directoryMetrics = MetricsUtil.createRecord(metricsContext, "FSDirectory");
@@ -199,7 +202,7 @@
           // Add file->block mapping
           INodeFile newF = (INodeFile)newNode;
           for (int i = 0; i < nrBlocks; i++) {
-            newF.setBlock(i, namesystem.blocksMap.addINode(blocks[i], newF));
+            newF.setBlock(i, getFSNamesystem().blocksMap.addINode(blocks[i], newF));
           }
         }
       } catch (IOException e) {
@@ -247,7 +250,7 @@
         // Add file->block mapping
         INodeFile newF = (INodeFile)newNode;
         for (int i = 0; i < nrBlocks; i++) {
-          newF.setBlock(i, namesystem.blocksMap.addINode(blocks[i], newF));
+          newF.setBlock(i, getFSNamesystem().blocksMap.addINode(blocks[i], newF));
         }
       }
     }
@@ -269,8 +272,8 @@
                   fileNode.getPreferredBlockSize()*fileNode.getReplication());
       
       // associate the new list of blocks with this file
-      namesystem.blocksMap.addINode(block, fileNode);
-      BlockInfo blockInfo = namesystem.blocksMap.getStoredBlock(block);
+      getFSNamesystem().blocksMap.addINode(block, fileNode);
+      BlockInfo blockInfo = getFSNamesystem().blocksMap.getStoredBlock(block);
       fileNode.addBlock(blockInfo);
 
       NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
@@ -321,9 +324,9 @@
     synchronized (rootDir) {
       // modify file-> block and blocksMap
       fileNode.removeBlock(block);
-      namesystem.blocksMap.removeINode(block);
+      getFSNamesystem().blocksMap.removeINode(block);
       // If block is removed from blocksMap remove it from corruptReplicasMap
-      namesystem.corruptReplicas.removeFromCorruptReplicasMap(block);
+      getFSNamesystem().corruptReplicas.removeFromCorruptReplicasMap(block);
 
       // write modified block locations to log
       fsImage.getEditLog().logOpenFile(path, fileNode);
@@ -631,7 +634,7 @@
           ArrayList<Block> v = new ArrayList<Block>();
           int filesRemoved = targetNode.collectSubtreeBlocksAndClear(v);
           incrDeletedFileCount(filesRemoved);
-          namesystem.removePathAndBlocks(src, v);
+          getFSNamesystem().removePathAndBlocks(src, v);
           if (NameNode.stateChangeLog.isDebugEnabled()) {
             NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
               +src+" is removed");
@@ -692,7 +695,7 @@
       
       int index = 0;
       for (Block b : newnode.getBlocks()) {
-        BlockInfo info = namesystem.blocksMap.addINode(b, newnode);
+        BlockInfo info = getFSNamesystem().blocksMap.addINode(b, newnode);
         newnode.setBlock(index, info); // inode refers to the block in BlocksMap
         index++;
       }
@@ -940,7 +943,7 @@
         }
         // Directory creation also count towards FilesCreated
         // to match count of files_deleted metric. 
-        if (namesystem != null)
+        if (getFSNamesystem() != null)
           NameNode.getNameNodeMetrics().numFilesCreated.inc();
         fsImage.getEditLog().logMkDir(cur, inodes[i]);
         NameNode.stateChangeLog.debug(
@@ -1244,7 +1247,7 @@
 
       // if the last access time update was within the last precision interval, then
       // no need to store access time
-      if (atime <= inodeTime + namesystem.getAccessTimePrecision() && !force)
{
+      if (atime <= inodeTime + getFSNamesystem().getAccessTimePrecision() && !force)
{
         status =  false;
       } else {
         inode.setAccessTime(atime);
@@ -1259,7 +1262,7 @@
    */
   void reset() {
     rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
-        namesystem.createFsOwnerPermissions(new FsPermission((short)0755)),
+        getFSNamesystem().createFsOwnerPermissions(new FsPermission((short)0755)),
         Integer.MAX_VALUE, -1);
   }
 

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=761006&r1=761005&r2=761006&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Wed Apr
 1 18:42:49 2009
@@ -529,7 +529,7 @@
    * This is where we apply edits that we've been writing to disk all
    * along.
    */
-  static int loadFSEdits(EditLogInputStream edits) throws IOException {
+  int loadFSEdits(EditLogInputStream edits) throws IOException {
     DataInputStream in = edits.getDataInputStream();
     long startTime = FSNamesystem.now();
     int numEdits = loadFSEdits(in, true);
@@ -539,8 +539,7 @@
     return numEdits;
   }
 
-  static int loadFSEdits(DataInputStream in,
-                         boolean closeOnExit) throws IOException {
+  int loadFSEdits(DataInputStream in, boolean closeOnExit) throws IOException {
     int numEdits = 0;
     int logVersion = 0;
 
@@ -576,9 +575,9 @@
     return numEdits;
   }
 
-  static int loadEditRecords(int logVersion, DataInputStream in,
+  int loadEditRecords(int logVersion, DataInputStream in,
                              boolean closeOnExit) throws IOException {
-    FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
+    FSNamesystem fsNamesys = fsimage.getFSNamesystem();
     FSDirectory fsDir = fsNamesys.dir;
     int numEdits = 0;
     String clientName = null;
@@ -773,7 +772,7 @@
         case OP_SET_GENSTAMP: {
           numOpSetGenStamp++;
           long lw = in.readLong();
-          fsDir.namesystem.setGenerationStamp(lw);
+          fsNamesys.setGenerationStamp(lw);
           break;
         } 
         case OP_DATANODE_ADD: {
@@ -883,8 +882,8 @@
     }
   }
   
-  static short adjustReplication(short replication) {
-    FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
+  short adjustReplication(short replication) {
+    FSNamesystem fsNamesys = fsimage.getFSNamesystem();
     short minReplication = fsNamesys.getMinReplication();
     if (replication<minReplication) {
       replication = minReplication;

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=761006&r1=761005&r2=761006&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java Wed Apr
 1 18:42:49 2009
@@ -114,6 +114,7 @@
     }
   }
 
+  protected FSNamesystem namesystem = null;
   protected long checkpointTime = -1L;  // The age of the image
   protected FSEditLog editLog = null;
   private boolean isUpgradeFinalized = false;
@@ -122,15 +123,7 @@
    * flag that controls if we try to restore failed storages
    */
   private boolean restoreFailedStorage = false;
-  public void setRestoreFailedStorage(boolean val) {
-    LOG.info("set restore failed storage to " + val);
-    restoreFailedStorage=val;
-  }
-  
-  public boolean getRestoreFailedStorage() {
-    return restoreFailedStorage;
-  }
-  
+
   /**
    * list of failed (and thus removed) storages
    */
@@ -156,8 +149,13 @@
   /**
    */
   FSImage() {
+    this((FSNamesystem)null);
+  }
+
+  FSImage(FSNamesystem ns) {
     super(NodeType.NAME_NODE);
     this.editLog = new FSEditLog(this);
+    setFSNamesystem(ns);
   }
 
   /**
@@ -183,6 +181,23 @@
     setStorageDirectories(dirs, editsDirs);
   }
   
+  protected FSNamesystem getFSNamesystem() {
+    return namesystem;
+  }
+
+  void setFSNamesystem(FSNamesystem ns) {
+    namesystem = ns;
+  }
+
+  public void setRestoreFailedStorage(boolean val) {
+    LOG.info("set restore failed storage to " + val);
+    restoreFailedStorage=val;
+  }
+  
+  public boolean getRestoreFailedStorage() {
+    return restoreFailedStorage;
+  }
+  
   void setStorageDirectories(Collection<File> fsNameDirs,
                              Collection<File> fsEditsDirs) {
     this.storageDirs = new ArrayList<StorageDirectory>();
@@ -488,7 +503,7 @@
     // a previous fs states in at least one of the storage directories.
     // Directories that don't have previous state do not rollback
     boolean canRollback = false;
-    FSImage prevState = new FSImage();
+    FSImage prevState = new FSImage(getFSNamesystem());
     prevState.layoutVersion = FSConstants.LAYOUT_VERSION;
     for (Iterator<StorageDirectory> it = 
                        dirIterator(); it.hasNext();) {
@@ -564,8 +579,8 @@
    * @throws IOException
    */
   void doImportCheckpoint() throws IOException {
-    FSImage ckptImage = new FSImage();
-    FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
+    FSNamesystem fsNamesys = getFSNamesystem();
+    FSImage ckptImage = new FSImage(fsNamesys);
     // replace real image with the checkpoint image
     FSImage realImage = fsNamesys.getFSImage();
     assert realImage == this;
@@ -903,7 +918,7 @@
     assert this.getLayoutVersion() < 0 : "Negative layout version is expected.";
     assert curFile != null : "curFile is null";
 
-    FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
+    FSNamesystem fsNamesys = getFSNamesystem();
     FSDirectory fsDir = fsNamesys.dir;
 
     //
@@ -946,7 +961,7 @@
       needToSave = (imgVersion != FSConstants.LAYOUT_VERSION);
 
       // read file info
-      short replication = FSNamesystem.getFSNamesystem().getDefaultReplication();
+      short replication = fsNamesys.getDefaultReplication();
 
       LOG.info("Number of files = " + numFiles);
 
@@ -959,7 +974,7 @@
         long blockSize = 0;
         path = readString(in);
         replication = in.readShort();
-        replication = FSEditLog.adjustReplication(replication);
+        replication = editLog.adjustReplication(replication);
         modificationTime = in.readLong();
         if (imgVersion <= -17) {
           atime = in.readLong();
@@ -1069,16 +1084,16 @@
     int numEdits = 0;
     EditLogFileInputStream edits = 
       new EditLogFileInputStream(getImageFile(sd, NameNodeFile.EDITS));
-    numEdits = FSEditLog.loadFSEdits(edits);
+    numEdits = editLog.loadFSEdits(edits);
     edits.close();
     File editsNew = getImageFile(sd, NameNodeFile.EDITS_NEW);
     if (editsNew.exists() && editsNew.length() > 0) {
       edits = new EditLogFileInputStream(editsNew);
-      numEdits += FSEditLog.loadFSEdits(edits);
+      numEdits += editLog.loadFSEdits(edits);
       edits.close();
     }
     // update the counts.
-    FSNamesystem.getFSNamesystem().dir.updateCountForINodeWithQuota();    
+    getFSNamesystem().dir.updateCountForINodeWithQuota();    
     return numEdits;
   }
 
@@ -1086,7 +1101,7 @@
    * Save the contents of the FS image to the file.
    */
   void saveFSImage(File newFile) throws IOException {
-    FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
+    FSNamesystem fsNamesys = getFSNamesystem();
     FSDirectory fsDir = fsNamesys.dir;
     long startTime = FSNamesystem.now();
     //
@@ -1737,22 +1752,24 @@
   }
 
   private boolean getDistributedUpgradeState() {
-    return FSNamesystem.getFSNamesystem().getDistributedUpgradeState();
+    FSNamesystem ns = getFSNamesystem();
+    return ns == null ? false : ns.getDistributedUpgradeState();
   }
 
   private int getDistributedUpgradeVersion() {
-    return FSNamesystem.getFSNamesystem().getDistributedUpgradeVersion();
+    FSNamesystem ns = getFSNamesystem();
+    return ns == null ? 0 : ns.getDistributedUpgradeVersion();
   }
 
   private void setDistributedUpgradeState(boolean uState, int uVersion) {
-    FSNamesystem.getFSNamesystem().upgradeManager.setUpgradeState(uState, uVersion);
+    getFSNamesystem().upgradeManager.setUpgradeState(uState, uVersion);
   }
 
   private void verifyDistributedUpgradeProgress(StartupOption startOpt
                                                 ) throws IOException {
     if(startOpt == StartupOption.ROLLBACK || startOpt == StartupOption.IMPORT)
       return;
-    UpgradeManager um = FSNamesystem.getFSNamesystem().upgradeManager;
+    UpgradeManager um = getFSNamesystem().upgradeManager;
     assert um != null : "FSNameSystem.upgradeManager is null.";
     if(startOpt != StartupOption.UPGRADE) {
       if(um.getUpgradeState())
@@ -1767,11 +1784,11 @@
   }
 
   private void initializeDistributedUpgrade() throws IOException {
-    UpgradeManagerNamenode um = FSNamesystem.getFSNamesystem().upgradeManager;
+    UpgradeManagerNamenode um = getFSNamesystem().upgradeManager;
     if(! um.initializeUpgrade())
       return;
     // write new upgrade state into disk
-    FSNamesystem.getFSNamesystem().getFSImage().writeAll();
+    writeAll();
     NameNode.LOG.info("\n   Distributed upgrade for NameNode version " 
         + um.getUpgradeVersion() + " to current LV " 
         + FSConstants.LAYOUT_VERSION + " is initialized.");

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=761006&r1=761005&r2=761006&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed
Apr  1 18:42:49 2009
@@ -246,7 +246,6 @@
   private long missingBlocksInCurIter = 0;
   private long missingBlocksInPrevIter = 0; 
 
-  private static FSNamesystem fsNamesystemObject;
   private SafeModeInfo safeMode;  // safe mode information
   private Host2NodesMap host2DataNodeMap = new Host2NodesMap();
     
@@ -419,7 +418,6 @@
    */
   private void setConfigurationParameters(Configuration conf) 
                                           throws IOException {
-    fsNamesystemObject = this;
     try {
       fsOwner = UnixUserGroupInformation.login(conf);
     } catch (LoginException e) {
@@ -482,15 +480,6 @@
     return defaultPermission;
   }
   
-  /** Return the FSNamesystem object
-   * @deprecated FSNamesystem object should be obtained from the container
-   *             object such as a NameNode object. 
-   */
-  @Deprecated
-  public static FSNamesystem getFSNamesystem() {
-    return fsNamesystemObject;
-  } 
-
   NamespaceInfo getNamespaceInfo() {
     return new NamespaceInfo(dir.fsImage.getNamespaceID(),
                              dir.fsImage.getCTime(),

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=761006&r1=761005&r2=761006&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Wed
Apr  1 18:42:49 2009
@@ -19,17 +19,12 @@
 
 import junit.framework.TestCase;
 import java.io.*;
-import java.util.Collection;
 import java.util.Iterator;
-import java.util.Random;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.permission.*;
 
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.io.ArrayWritable;
-import org.apache.hadoop.io.UTF8;
-import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLog.EditLogFileInputStream;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
@@ -39,31 +34,32 @@
  * This class tests the creation and validation of a checkpoint.
  */
 public class TestEditLog extends TestCase {
-  static final int numDatanodes = 1;
+  static final int NUM_DATA_NODES = 1;
 
-  // This test creates numThreads threads and each thread does
-  // 2 * numberTransactions Transactions concurrently.
-  int numberTransactions = 100;
-  int numThreads = 100;
+  // This test creates NUM_THREADS threads and each thread does
+  // 2 * NUM_TRANSACTIONS Transactions concurrently.
+  static final int NUM_TRANSACTIONS = 100;
+  static final int NUM_THREADS = 100;
 
   //
   // an object that does a bunch of transactions
   //
   static class Transactions implements Runnable {
-    FSEditLog editLog;
+    FSNamesystem namesystem;
     int numTransactions;
     short replication = 3;
     long blockSize = 64;
 
-    Transactions(FSEditLog editlog, int num) {
-      editLog = editlog;
+    Transactions(FSNamesystem ns, int num) {
+      namesystem = ns;
       numTransactions = num;
     }
 
     // add a bunch of transactions.
     public void run() {
-      PermissionStatus p = FSNamesystem.getFSNamesystem(
-          ).createFsOwnerPermissions(new FsPermission((short)0777));
+      PermissionStatus p = namesystem.createFsOwnerPermissions(
+                                          new FsPermission((short)0777));
+      FSEditLog editLog = namesystem.getEditLog();
 
       for (int i = 0; i < numTransactions; i++) {
         try {
@@ -86,74 +82,71 @@
   public void testEditLog() throws IOException {
 
     // start a cluster 
-
-    Collection<File> namedirs = null;
-    Collection<File> editsdirs = null;
     Configuration conf = new Configuration();
-    MiniDFSCluster cluster = new MiniDFSCluster(0, conf, numDatanodes, 
-                                                true, true, null, null);
-    cluster.waitActive();
-    FileSystem fileSys = cluster.getFileSystem();
-    int numdirs = 0;
+    MiniDFSCluster cluster = null;
+    FileSystem fileSys = null;
 
     try {
-      namedirs = cluster.getNameDirs();
-      editsdirs = cluster.getNameEditsDirs();
-    } finally {
-      fileSys.close();
-      cluster.shutdown();
-    }
-
-    for (Iterator it = namedirs.iterator(); it.hasNext(); ) {
-      File dir = (File)it.next();
-      System.out.println(dir);
-      numdirs++;
-    }
-
-    FSImage fsimage = new FSImage(namedirs, editsdirs);
-    FSEditLog editLog = fsimage.getEditLog();
-
-    // set small size of flush buffer
-    editLog.setBufferCapacity(2048);
-    editLog.close();
-    editLog.open();
-  
-    // Create threads and make them run transactions concurrently.
-    Thread threadId[] = new Thread[numThreads];
-    for (int i = 0; i < numThreads; i++) {
-      Transactions trans = new Transactions(editLog, numberTransactions);
-      threadId[i] = new Thread(trans, "TransactionThread-" + i);
-      threadId[i].start();
-    }
-
-    // wait for all transactions to get over
-    for (int i = 0; i < numThreads; i++) {
-      try {
-        threadId[i].join();
-      } catch (InterruptedException e) {
-        i--;      // retry 
+      cluster = new MiniDFSCluster(conf, NUM_DATA_NODES, true, null);
+      cluster.waitActive();
+      fileSys = cluster.getFileSystem();
+      final FSNamesystem namesystem = cluster.getNamesystem();
+  
+      for (Iterator<File> it = cluster.getNameDirs().iterator(); it.hasNext(); ) {
+        File dir = it.next();
+        System.out.println(dir);
       }
-    } 
+  
+      FSImage fsimage = namesystem.getFSImage();
+      FSEditLog editLog = fsimage.getEditLog();
+  
+      // set small size of flush buffer
+      FSEditLog.setBufferCapacity(2048);
+      editLog.close();
+      editLog.open();
     
-    editLog.close();
-
-    // Verify that we can read in all the transactions that we have written.
-    // If there were any corruptions, it is likely that the reading in
-    // of these transactions will throw an exception.
-    //
-    for (Iterator<StorageDirectory> it = 
-            fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
-      File editFile = FSImage.getImageFile(it.next(), NameNodeFile.EDITS);
-      System.out.println("Verifying file: " + editFile);
-      int numEdits = FSEditLog.loadFSEdits(new EditLogFileInputStream(editFile));
-      int numLeases = FSNamesystem.getFSNamesystem().leaseManager.countLease();
-      System.out.println("Number of outstanding leases " + numLeases);
-      assertEquals(0, numLeases);
-      assertTrue("Verification for " + editFile + " failed. " +
-                 "Expected " + (numThreads * 2 * numberTransactions) + " transactions. "+
-                 "Found " + numEdits + " transactions.",
-                 numEdits == numThreads * 2 * numberTransactions);
-
+      // Create threads and make them run transactions concurrently.
+      Thread threadId[] = new Thread[NUM_THREADS];
+      for (int i = 0; i < NUM_THREADS; i++) {
+        Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS);
+        threadId[i] = new Thread(trans, "TransactionThread-" + i);
+        threadId[i].start();
+      }
+  
+      // wait for all transactions to get over
+      for (int i = 0; i < NUM_THREADS; i++) {
+        try {
+          threadId[i].join();
+        } catch (InterruptedException e) {
+          i--;      // retry 
+        }
+      } 
+      
+      editLog.close();
+      editLog.open();
+  
+      // Verify that we can read in all the transactions that we have written.
+      // If there were any corruptions, it is likely that the reading in
+      // of these transactions will throw an exception.
+      //
+      for (Iterator<StorageDirectory> it = 
+              fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
+        File editFile = FSImage.getImageFile(it.next(), NameNodeFile.EDITS);
+        System.out.println("Verifying file: " + editFile);
+        int numEdits = namesystem.getEditLog().loadFSEdits(
+                                  new EditLogFileInputStream(editFile));
+        int numLeases = namesystem.leaseManager.countLease();
+        System.out.println("Number of outstanding leases " + numLeases);
+        assertEquals(0, numLeases);
+        assertTrue("Verification for " + editFile + " failed. " +
+                   "Expected " + (NUM_THREADS * 2 * NUM_TRANSACTIONS) + " transactions. "+
+                   "Found " + numEdits + " transactions.",
+                   numEdits == NUM_THREADS * 2 * NUM_TRANSACTIONS);
+  
+      }
+    } finally {
+      if(fileSys != null) fileSys.close();
+      if(cluster != null) cluster.shutdown();
     }
   }
 }

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java?rev=761006&r1=761005&r2=761006&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java
(original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java
Wed Apr  1 18:42:49 2009
@@ -60,7 +60,7 @@
       // TODO Auto-generated catch block
       e.printStackTrace();
     }
-    FSNamesystem fsNamesystem = FSNamesystem.getFSNamesystem();
+    FSNamesystem fsNamesystem = namenode.getNamesystem();
     replicator = fsNamesystem.replicator;
     cluster = fsNamesystem.clusterMap;
     // construct network topology



Mime
View raw message