hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From s..@apache.org
Subject svn commit: r936130 - in /hadoop/hdfs/trunk: ./ .eclipse.templates/ ivy/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/
Date Wed, 21 Apr 2010 01:06:00 GMT
Author: shv
Date: Wed Apr 21 01:05:59 2010
New Revision: 936130

URL: http://svn.apache.org/viewvc?rev=936130&view=rev
Log:
HDFS-909. Revert commit 935770.

Removed:
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
Modified:
    hadoop/hdfs/trunk/.eclipse.templates/.classpath
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/ivy/libraries.properties
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java

Modified: hadoop/hdfs/trunk/.eclipse.templates/.classpath
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/.eclipse.templates/.classpath?rev=936130&r1=936129&r2=936130&view=diff
==============================================================================
--- hadoop/hdfs/trunk/.eclipse.templates/.classpath (original)
+++ hadoop/hdfs/trunk/.eclipse.templates/.classpath Wed Apr 21 01:05:59 2010
@@ -34,7 +34,7 @@
 	<classpathentry kind="lib" path="build/ivy/lib/Hadoop-Hdfs/common/slf4j-api-1.5.8.jar"/>
 	<classpathentry kind="lib" path="build/ivy/lib/Hadoop-Hdfs/test/slf4j-log4j12-1.4.3.jar"/>
 	<classpathentry kind="lib" path="build/ivy/lib/Hadoop-Hdfs/common/xmlenc-0.52.jar"/>
-	<classpathentry kind="lib" path="build/ivy/lib/Hadoop-Hdfs/test/mockito-all-1.8.2.jar"/>
+	<classpathentry kind="lib" path="build/ivy/lib/Hadoop-Hdfs/test/mockito-all-1.8.0.jar"/>

 	<classpathentry kind="lib" path="build/ivy/lib/Hadoop-Hdfs/common/aspectjrt-1.6.5.jar"/>
 	<classpathentry kind="lib" path="build/ivy/lib/hdfsproxy/common/cactus.core.framework.uberjar.javaEE.14-1.8.0.jar"/>
 	<classpathentry kind="lib" path="build/ivy/lib/hdfsproxy/common/cactus.integration.ant-1.8.0.jar"/>

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=936130&r1=936129&r2=936130&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Wed Apr 21 01:05:59 2010
@@ -831,9 +831,6 @@ Release 0.20.3 - Unreleased
     HDFS-1041. DFSClient.getFileChecksum(..) should retry if connection to
     the first datanode fails.  (szetszwo)
 
-    HDFS-909. Wait until edits syncing is finishes before purging edits.
-    (Todd Lipcon via shv)
-
 Release 0.20.2 - Unreleased
 
   IMPROVEMENTS

Modified: hadoop/hdfs/trunk/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/ivy/libraries.properties?rev=936130&r1=936129&r2=936130&view=diff
==============================================================================
--- hadoop/hdfs/trunk/ivy/libraries.properties (original)
+++ hadoop/hdfs/trunk/ivy/libraries.properties Wed Apr 21 01:05:59 2010
@@ -77,4 +77,4 @@ xerces.version=1.4.4
 
 aspectj.version=1.6.5
 
-mockito-all.version=1.8.2
+mockito-all.version=1.8.0

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=936130&r1=936129&r2=936130&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Wed Apr
21 01:05:59 2010
@@ -113,8 +113,7 @@ public class FSEditLog {
   private long lastPrintTime;
 
   // is a sync currently running?
-  private volatile boolean isSyncRunning;
-
+  private boolean isSyncRunning;
 
   // these are statistics counters.
   private long numTransactions;        // number of transactions
@@ -160,14 +159,6 @@ public class FSEditLog {
     return editStreams == null ? 0 : editStreams.size();
   }
 
-  /**
-   * Return the currently active edit streams.
-   * This should be used only by unit tests.
-   */
-  ArrayList<EditLogOutputStream> getEditStreams() {
-    return editStreams;
-  }
-
   boolean isOpen() {
     return getNumEditStreams() > 0;
   }
@@ -210,8 +201,6 @@ public class FSEditLog {
   }
 
   synchronized void createEditLogFile(File name) throws IOException {
-    waitForSyncToFinish();
-
     EditLogOutputStream eStream = new EditLogFileOutputStream(name,
         sizeOutputFlushBuffer);
     eStream.create();
@@ -222,7 +211,12 @@ public class FSEditLog {
    * Shutdown the file store.
    */
   synchronized void close() {
-    waitForSyncToFinish();
+    while (isSyncRunning) {
+      try {
+        wait(1000);
+      } catch (InterruptedException ie) { 
+      }
+    }
     if (editStreams == null || editStreams.isEmpty()) {
       return;
     }
@@ -889,52 +883,9 @@ public class FSEditLog {
       metrics.transactions.inc((end-start));
   }
 
-  /**
-   * Blocks until all ongoing edits have been synced to disk.
-   * This differs from logSync in that it waits for edits that have been
-   * written by other threads, not just edits from the calling thread.
-   *
-   * NOTE: this should be done while holding the FSNamesystem lock, or
-   * else more operations can start writing while this is in progress.
-   */
-  void logSyncAll() throws IOException {
-    // Record the most recent transaction ID as our own id
-    synchronized (this) {
-      TransactionId id = myTransactionId.get();
-      id.txid = txid;
-    }
-    // Then make sure we're synced up to this point
-    logSync();
-  }
-  
-  /**
-   * Sync all modifications done by this thread.
-   *
-   * The internal concurrency design of this class is as follows:
-   *   - Log items are written synchronized into an in-memory buffer,
-   *     and each assigned a transaction ID.
-   *   - When a thread (client) would like to sync all of its edits, logSync()
-   *     uses a ThreadLocal transaction ID to determine what edit number must
-   *     be synced to.
-   *   - The isSyncRunning volatile boolean tracks whether a sync is currently
-   *     under progress.
-   *
-   * The data is double-buffered within each edit log implementation so that
-   * in-memory writing can occur in parallel with the on-disk writing.
-   *
-   * Each sync occurs in three steps:
-   *   1. synchronized, it swaps the double buffer and sets the isSyncRunning
-   *      flag.
-   *   2. unsynchronized, it flushes the data to storage
-   *   3. synchronized, it resets the flag and notifies anyone waiting on the
-   *      sync.
-   *
-   * The lack of synchronization on step 2 allows other threads to continue
-   * to write into the memory buffer while the sync is in progress.
-   * Because this step is unsynchronized, actions that need to avoid
-   * concurrency with sync() should be synchronized and also call
-   * waitForSyncToFinish() before assuming they are running alone.
-   */
+  //
+  // Sync all modifications done by this thread.
+  //
   public void logSync() throws IOException {
     ArrayList<EditLogOutputStream> errorStreams = null;
     long syncStart = 0;
@@ -1268,7 +1219,6 @@ public class FSEditLog {
    * Closes the current edit log and opens edits.new. 
    */
   synchronized void rollEditLog() throws IOException {
-    waitForSyncToFinish();
     Iterator<StorageDirectory> it = fsimage.dirIterator(NameNodeDirType.EDITS);
     if(!it.hasNext()) 
       return;
@@ -1301,8 +1251,6 @@ public class FSEditLog {
    * @throws IOException
    */
   synchronized void divertFileStreams(String dest) throws IOException {
-    waitForSyncToFinish();
-
     assert getNumEditStreams() >= getNumEditsDirs() :
       "Inconsistent number of streams";
     ArrayList<EditLogOutputStream> errorStreams = null;
@@ -1339,25 +1287,10 @@ public class FSEditLog {
    * Reopens the edits file.
    */
   synchronized void purgeEditLog() throws IOException {
-    waitForSyncToFinish();
     revertFileStreams(
         Storage.STORAGE_DIR_CURRENT + "/" + NameNodeFile.EDITS_NEW.getName());
   }
 
-
-  /**
-   * The actual sync activity happens while not synchronized on this object.
-   * Thus, synchronized activities that require that they are not concurrent
-   * with file operations should wait for any running sync to finish.
-   */
-  synchronized void waitForSyncToFinish() {
-    while (isSyncRunning) {
-      try {
-        wait(1000);
-      } catch (InterruptedException ie) {}
-    }
-  }
-
   /**
    * Revert file streams from file edits.new back to file edits.<p>
    * Close file streams, which are currently writing into getRoot()/source.
@@ -1367,8 +1300,6 @@ public class FSEditLog {
    * @throws IOException
    */
   synchronized void revertFileStreams(String source) throws IOException {
-    waitForSyncToFinish();
-
     assert getNumEditStreams() >= getNumEditsDirs() :
       "Inconsistent number of streams";
     ArrayList<EditLogOutputStream> errorStreams = null;
@@ -1380,8 +1311,7 @@ public class FSEditLog {
       EditLogOutputStream eStream = itE.next();
       StorageDirectory sd = itD.next();
       if(!eStream.getName().startsWith(sd.getRoot().getPath()))
-        throw new IOException("Inconsistent order of edit streams: " + eStream +
-                              " does not start with " + sd.getRoot().getPath());
+        throw new IOException("Inconsistent order of edit streams: " + eStream);
       try {
         // close old stream
         closeStream(eStream);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=936130&r1=936129&r2=936130&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed
Apr 21 01:05:59 2010
@@ -3855,10 +3855,6 @@ public class FSNamesystem implements FSC
    * @throws IOException
    */
   synchronized void enterSafeMode() throws IOException {
-    // Ensure that any concurrent operations have been fully synced
-    // before entering safe mode. This ensures that the FSImage
-    // is entirely stable on disk as soon as we're in safe mode.
-    getEditLog().logSyncAll();
     if (!isInSafeMode()) {
       safeMode = new SafeModeInfo();
       return;

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=936130&r1=936129&r2=936130&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Wed Apr 21
01:05:59 2010
@@ -256,7 +256,7 @@ public class MiniDFSCluster {
       
       System.out.println("HDFS using RPCEngine: "+rpcEngineName);
       try {
-        Class rpcEngine = conf.getClassByName(rpcEngineName);
+        Class<?> rpcEngine = conf.getClassByName(rpcEngineName);
         setRpcEngine(conf, NamenodeProtocols.class, rpcEngine);
         setRpcEngine(conf, NamenodeProtocol.class, rpcEngine);
         setRpcEngine(conf, ClientProtocol.class, rpcEngine);
@@ -318,7 +318,7 @@ public class MiniDFSCluster {
     }
   }
   
-  private void setRpcEngine(Configuration conf, Class protocol, Class engine) {
+  private void setRpcEngine(Configuration conf, Class<?> protocol, Class<?> engine)
{
     conf.setClass("rpc.engine."+protocol.getName(), engine, Object.class);
   }
 
@@ -444,8 +444,9 @@ public class MiniDFSCluster {
           throw new IOException("Mkdirs failed to create directory for DataNode "
                                 + i + ": " + dir1 + " or " + dir2);
         }
-        dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
-                    fileAsURI(dir1) + "," + fileAsURI(dir2));
+        String dirs = fileAsURI(dir1) + "," + fileAsURI(dir2);
+        dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
+        conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
       }
       if (simulatedCapacities != null) {
         dnConf.setBoolean("dfs.datanode.simulateddatastorage", true);



Mime
View raw message