hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1078173 - in /hadoop/hdfs/branches/HDFS-1052: ./ src/c++/libhdfs/ src/contrib/hdfsproxy/ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/...
Date Fri, 04 Mar 2011 22:17:41 GMT
Author: suresh
Date: Fri Mar  4 22:17:40 2011
New Revision: 1078173

URL: http://svn.apache.org/viewvc?rev=1078173&view=rev
Log:
Merging chagnes r1035795:r1035920 from trunk to federation

Added:
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
      - copied unchanged from r1035920, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java
      - copied unchanged from r1035842, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java
Modified:
    hadoop/hdfs/branches/HDFS-1052/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
    hadoop/hdfs/branches/HDFS-1052/build.xml   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/c++/libhdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/contrib/hdfsproxy/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/java/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/java/hdfs-default.xml
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
  (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/webapps/datanode/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/webapps/secondary/   (props changed)

Propchange: hadoop/hdfs/branches/HDFS-1052/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 22:17:40 2011
@@ -1,4 +1,4 @@
 /hadoop/core/branches/branch-0.19/hdfs:713112
 /hadoop/hdfs/branches/HDFS-265:796829-820463
 /hadoop/hdfs/branches/branch-0.21:820487
-/hadoop/hdfs/trunk:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1036738,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1036738,1052823,1060619,1061067,1062020

Modified: hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/CHANGES.txt?rev=1078173&r1=1078172&r2=1078173&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-1052/CHANGES.txt Fri Mar  4 22:17:40 2011
@@ -442,6 +442,18 @@ Release 0.22.0 - Unreleased
 
     HDFS-1387. Update HDFS permissions guide for security. (Todd Lipcon via eli)
 
+    HDFS-455. Make NN and DN handle in a intuitive way comma-separated 
+    configuration strings. (Michele Catasta via eli)
+
+    HDFS-1071. savenamespace should write the fsimage to all configured 
+    fs.name.dir in parallel (Dmytro Molkov via jghoman)
+ 
+    HDFS-1055. Improve thread naming for DataXceivers. 
+    (Todd Lipcon and Ramkumar Vadali via eli).
+
+    HDFS-718. Configuration parameter to prevent accidental formatting of 
+    HDFS filesystem. (Andrew Ryan via jghoman)
+
   OPTIMIZATIONS
 
     HDFS-1140. Speedup INode.getPathComponents. (Dmytro Molkov via shv)

Propchange: hadoop/hdfs/branches/HDFS-1052/build.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 22:17:40 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/build.xml:779102
 /hadoop/hdfs/branches/HDFS-265/build.xml:796829-820463
 /hadoop/hdfs/branches/branch-0.21/build.xml:820487
-/hadoop/hdfs/trunk/build.xml:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/build.xml:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1052823,1060619,1061067,1062020

Propchange: hadoop/hdfs/branches/HDFS-1052/src/c++/libhdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 22:17:40 2011
@@ -1,3 +1,3 @@
 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
 /hadoop/core/trunk/src/c++/libhdfs:776175-784663
-/hadoop/hdfs/trunk/src/c++/libhdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/src/c++/libhdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1052823,1060619,1061067,1062020

Propchange: hadoop/hdfs/branches/HDFS-1052/src/contrib/hdfsproxy/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 22:17:40 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/src/contrib/hdfsproxy:776175-784663
 /hadoop/hdfs/branches/HDFS-265/src/contrib/hdfsproxy:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy:820487
-/hadoop/hdfs/trunk/src/contrib/hdfsproxy:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/src/contrib/hdfsproxy:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1052823,1060619,1061067,1062020

Propchange: hadoop/hdfs/branches/HDFS-1052/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 22:17:40 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
 /hadoop/hdfs/branches/HDFS-265/src/java:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/java:820487
-/hadoop/hdfs/trunk/src/java:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/src/java:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1052823,1060619,1061067,1062020

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/hdfs-default.xml?rev=1078173&r1=1078172&r2=1078173&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/hdfs-default.xml (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/hdfs-default.xml Fri Mar  4 22:17:40 2011
@@ -554,4 +554,13 @@ creations/deletions), or "all".</descrip
   </description>
 </property>
 
+<property>
+  <name>dfs.namenode.support.allow.format</name>
+  <value>true</value>
+  <description>Does HDFS namenode allow itself to be formatted?
+               You may consider setting this to false for any production
+               cluster, to avoid any possibility of formatting a running DFS.
+  </description>
+</property>
+
 </configuration>

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1078173&r1=1078172&r2=1078173&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Fri
Mar  4 22:17:40 2011
@@ -93,6 +93,8 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-server.xml";
   public static final String  DFS_NAMENODE_NAME_DIR_RESTORE_KEY = "dfs.namenode.name.dir.restore";
   public static final boolean DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT = false;
+  public static final String  DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY = "dfs.namenode.support.allow.format";
+  public static final boolean DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT = true;
   public static final String  DFS_LIST_LIMIT = "dfs.ls.limit";
   public static final int     DFS_LIST_LIMIT_DEFAULT = 1000;
   public static final String  DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY = "dfs.datanode.failed.volumes.tolerated";

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1078173&r1=1078172&r2=1078173&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
Fri Mar  4 22:17:40 2011
@@ -2027,7 +2027,7 @@ public class DataNode extends Configured
 
   static Collection<URI> getStorageDirs(Configuration conf) {
     Collection<String> dirNames =
-      conf.getStringCollection(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
+      conf.getTrimmedStringCollection(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
     return Util.stringCollectionAsURIs(dirNames);
   }
 

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1078173&r1=1078172&r2=1078173&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
Fri Mar  4 22:17:40 2011
@@ -88,6 +88,20 @@ class DataXceiver extends DataTransferPr
     }
   }
 
+  /**
+   * Update the current thread's name to contain the current status.
+   * Use this only after this receiver has started on its thread, i.e.,
+   * outside the constructor.
+   */
+  private void updateCurrentThreadName(String status) {
+    StringBuilder sb = new StringBuilder();
+    sb.append("DataXceiver for client ").append(remoteAddress);
+    if (status != null) {
+      sb.append(" [").append(status).append("]");
+    }
+    Thread.currentThread().setName(sb.toString());
+  }
+
   /** Return the datanode object. */
   DataNode getDataNode() {return datanode;}
 
@@ -95,6 +109,8 @@ class DataXceiver extends DataTransferPr
    * Read/write data from/to the DataXceiveServer.
    */
   public void run() {
+    updateCurrentThreadName("Waiting for operation");
+
     DataInputStream in=null; 
     try {
       in = new DataInputStream(
@@ -119,6 +135,7 @@ class DataXceiver extends DataTransferPr
         LOG.debug(datanode.getMachineName() + ":Number of active connections is: "
             + datanode.getXceiverCount());
       }
+      updateCurrentThreadName("Cleaning up");
       IOUtils.closeStream(in);
       IOUtils.closeSocket(s);
       dataXceiverServer.childSockets.remove(s);
@@ -166,6 +183,8 @@ class DataXceiver extends DataTransferPr
             dnR.getStorageID(), block, "%d")
         : dnR + " Served block " + block + " to " +
             s.getInetAddress();
+
+    updateCurrentThreadName("Sending block " + block);
     try {
       try {
         blockSender = new BlockSender(block, startOffset, length,
@@ -212,6 +231,7 @@ class DataXceiver extends DataTransferPr
       long newGs, long minBytesRcvd, long maxBytesRcvd,
       String client, DatanodeInfo srcDataNode, DatanodeInfo[] targets,
       Token<BlockTokenIdentifier> blockToken) throws IOException {
+    updateCurrentThreadName("Receiving block " + block + " client=" + client);
 
     if (LOG.isDebugEnabled()) {
       LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() +
@@ -427,11 +447,13 @@ class DataXceiver extends DataTransferPr
       }
     }
 
+    updateCurrentThreadName("Reading metadata for block " + block);
     final MetaDataInputStream metadataIn = 
       datanode.data.getMetaDataInputStream(block);
     final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(
         metadataIn, BUFFER_SIZE));
 
+    updateCurrentThreadName("Getting checksum for block " + block);
     try {
       //read metadata file
       final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
@@ -470,6 +492,7 @@ class DataXceiver extends DataTransferPr
   @Override
   protected void opCopyBlock(DataInputStream in, ExtendedBlock block,
       Token<BlockTokenIdentifier> blockToken) throws IOException {
+    updateCurrentThreadName("Copying block " + block);
     // Read in the header
     if (datanode.isBlockTokenEnabled) {
       try {
@@ -545,6 +568,8 @@ class DataXceiver extends DataTransferPr
   protected void opReplaceBlock(DataInputStream in,
       ExtendedBlock block, String sourceID, DatanodeInfo proxySource,
       Token<BlockTokenIdentifier> blockToken) throws IOException {
+    updateCurrentThreadName("Replacing block " + block + " from " + sourceID);
+
     /* read header */
     block.setNumBytes(dataXceiverServer.estimateBlockSize);
     if (datanode.isBlockTokenEnabled) {

Propchange: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 22:17:40 2011
@@ -4,4 +4,4 @@
 /hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:776175-785643,785929-786278
 /hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:820487
-/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1052823,1060619,1061067,1062020

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1078173&r1=1078172&r2=1078173&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
Fri Mar  4 22:17:40 2011
@@ -21,6 +21,7 @@ import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Iterator;
+import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -268,7 +269,7 @@ public class FSEditLog {
    *  except fsimage.processIOError)
    */
   synchronized void processIOError(
-      ArrayList<EditLogOutputStream> errorStreams,
+      List<EditLogOutputStream> errorStreams,
       boolean propagate) {
     
     if (errorStreams == null || errorStreams.size() == 0) {

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1078173&r1=1078172&r2=1078173&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
Fri Mar  4 22:17:40 2011
@@ -40,6 +40,7 @@ import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -179,7 +180,13 @@ public class FSImage extends Storage {
   /**
    * Used for saving the image to disk
    */
-  static private final FsPermission FILE_PERM = new FsPermission((short)0);
+  static private final ThreadLocal<FsPermission> FILE_PERM =
+                          new ThreadLocal<FsPermission>() {
+                            @Override
+                            protected FsPermission initialValue() {
+                              return new FsPermission((short) 0);
+                            }
+                          };
   static private final byte[] PATH_SEPARATOR = DFSUtil.string2Bytes(Path.SEPARATOR);
 
   private static final Random R = new Random();
@@ -909,30 +916,34 @@ public class FSImage extends Storage {
    * @param propagate - flag, if set - then call corresponding EditLog stream's 
    * processIOError function.
    */
-  void processIOError(ArrayList<StorageDirectory> sds, boolean propagate) {
+  void processIOError(List<StorageDirectory> sds, boolean propagate) {
     ArrayList<EditLogOutputStream> al = null;
-    for(StorageDirectory sd:sds) {
-      // if has a stream assosiated with it - remove it too..
-      if (propagate && sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
-        EditLogOutputStream eStream = editLog.getEditsStream(sd);
-        if(al == null) al = new ArrayList<EditLogOutputStream>(1);
-        al.add(eStream);
-      }
-      
-      for (Iterator<StorageDirectory> it = dirIterator(); it.hasNext();) {
-        StorageDirectory sd1 = it.next();
-        if (sd.equals(sd1)) {
-          //add storage to the removed list
-          LOG.warn("FSImage:processIOError: removing storage: "
-              + sd.getRoot().getPath());
-          try {
-            sd1.unlock(); //unlock before removing (in case it will be restored)
-          } catch (Exception e) {
-            // nothing
+    synchronized (sds) {
+      for (StorageDirectory sd : sds) {
+        // if has a stream assosiated with it - remove it too..
+        if (propagate && sd.getStorageDirType().isOfType(NameNodeDirType.EDITS))
{
+          EditLogOutputStream eStream = editLog.getEditsStream(sd);
+          if (al == null)
+            al = new ArrayList<EditLogOutputStream>(1);
+          al.add(eStream);
+        }
+
+        for (Iterator<StorageDirectory> it = dirIterator(); it.hasNext();) {
+          StorageDirectory sd1 = it.next();
+          if (sd.equals(sd1)) {
+            // add storage to the removed list
+            LOG.warn("FSImage:processIOError: removing storage: "
+                + sd.getRoot().getPath());
+            try {
+              sd1.unlock(); // unlock before removing (in case it will be
+                            // restored)
+            } catch (Exception e) {
+              // nothing
+            }
+            removedStorageDirs.add(sd1);
+            it.remove();
+            break;
           }
-          removedStorageDirs.add(sd1);
-          it.remove();
-          break;
         }
       }
     }
@@ -1469,6 +1480,53 @@ public class FSImage extends Storage {
     this.imageDigest = digest;
   }
   /**
+   * FSImageSaver is being run in a separate thread when saving
+   * FSImage. There is one thread per each copy of the image.
+   *
+   * FSImageSaver assumes that it was launched from a thread that holds
+   * FSNamesystem lock and waits for the execution of FSImageSaver thread
+   * to finish.
+   * This way we are guraranteed that the namespace is not being updated
+   * while multiple instances of FSImageSaver are traversing it
+   * and writing it out.
+   */
+  private class FSImageSaver implements Runnable {
+    private StorageDirectory sd;
+    private List<StorageDirectory> errorSDs;
+    
+    FSImageSaver(StorageDirectory sd, List<StorageDirectory> errorSDs) {
+      this.sd = sd;
+      this.errorSDs = errorSDs;
+    }
+    
+    public void run() {
+      try {
+        saveCurrent(sd);
+      } catch (IOException ie) {
+        LOG.error("Unable to save image for " + sd.getRoot(), ie);
+        errorSDs.add(sd);              
+      }
+    }
+    
+    public String toString() {
+      return "FSImageSaver for " + sd.getRoot() +
+             " of type " + sd.getStorageDirType();
+    }
+  }
+  
+  private void waitForThreads(List<Thread> threads) {
+    for (Thread thread : threads) {
+      while (thread.isAlive()) {
+        try {
+          thread.join();
+        } catch (InterruptedException iex) {
+          LOG.error("Caught exception while waiting for thread " +
+                    thread.getName() + " to finish. Retrying join");
+        }        
+      }
+    }
+  }
+  /**
    * Save the contents of the FS image and create empty edits.
    * 
    * In order to minimize the recovery effort in case of failure during
@@ -1488,7 +1546,8 @@ public class FSImage extends Storage {
     editLog.close();
     if(renewCheckpointTime)
       this.checkpointTime = now();
-    ArrayList<StorageDirectory> errorSDs = new ArrayList<StorageDirectory>();
+    List<StorageDirectory> errorSDs =
+      Collections.synchronizedList(new ArrayList<StorageDirectory>());
 
     // mv current -> lastcheckpoint.tmp
     for (Iterator<StorageDirectory> it = dirIterator(); it.hasNext();) {
@@ -1501,17 +1560,18 @@ public class FSImage extends Storage {
       }
     }
 
+    List<Thread> saveThreads = new ArrayList<Thread>();
     // save images into current
     for (Iterator<StorageDirectory> it = dirIterator(NameNodeDirType.IMAGE);
                                                               it.hasNext();) {
       StorageDirectory sd = it.next();
-      try {
-        saveCurrent(sd);
-      } catch(IOException ie) {
-        LOG.error("Unable to save image for " + sd.getRoot(), ie);
-        errorSDs.add(sd);
-      }
+      FSImageSaver saver = new FSImageSaver(sd, errorSDs);
+      Thread saveThread = new Thread(saver, saver.toString());
+      saveThreads.add(saveThread);
+      saveThread.start();
     }
+    waitForThreads(saveThreads);
+    saveThreads.clear();
 
     // -NOTE-
     // If NN has image-only and edits-only storage directories and fails here 
@@ -1522,18 +1582,17 @@ public class FSImage extends Storage {
     // to the old state contained in their lastcheckpoint.tmp.
     // The edits directories should be discarded during startup because their
     // checkpointTime is older than that of image directories.
-
     // recreate edits in current
     for (Iterator<StorageDirectory> it = dirIterator(NameNodeDirType.EDITS);
                                                               it.hasNext();) {
-      StorageDirectory sd = it.next();
-      try {
-        saveCurrent(sd);
-      } catch(IOException ie) {
-        LOG.error("Unable to save edits for " + sd.getRoot(), ie);
-        errorSDs.add(sd);
-      }
+      final StorageDirectory sd = it.next();
+      FSImageSaver saver = new FSImageSaver(sd, errorSDs);
+      Thread saveThread = new Thread(saver, saver.toString());
+      saveThreads.add(saveThread);
+      saveThread.start();
     }
+    waitForThreads(saveThreads);
+
     // mv lastcheckpoint.tmp -> previous.checkpoint
     for (Iterator<StorageDirectory> it = dirIterator(); it.hasNext();) {
       StorageDirectory sd = it.next();
@@ -1711,6 +1770,7 @@ public class FSImage extends Storage {
     int nameLen = name.position();
     out.writeShort(nameLen);
     out.write(name.array(), name.arrayOffset(), nameLen);
+    FsPermission filePerm = FILE_PERM.get();
     if (node.isDirectory()) {
       out.writeShort(0);  // replication
       out.writeLong(node.getModificationTime());
@@ -1719,10 +1779,10 @@ public class FSImage extends Storage {
       out.writeInt(-1);   // # of blocks
       out.writeLong(node.getNsQuota());
       out.writeLong(node.getDsQuota());
-      FILE_PERM.fromShort(node.getFsPermissionShort());
+      filePerm.fromShort(node.getFsPermissionShort());
       PermissionStatus.write(out, node.getUserName(),
                              node.getGroupName(),
-                             FILE_PERM);
+                             filePerm);
     } else if (node.isLink()) {
       out.writeShort(0);  // replication
       out.writeLong(0);   // modification time
@@ -1730,10 +1790,10 @@ public class FSImage extends Storage {
       out.writeLong(0);   // preferred block size
       out.writeInt(-2);   // # of blocks
       Text.writeString(out, ((INodeSymlink)node).getLinkValue());
-      FILE_PERM.fromShort(node.getFsPermissionShort());
+      filePerm.fromShort(node.getFsPermissionShort());
       PermissionStatus.write(out, node.getUserName(),
                              node.getGroupName(),
-                             FILE_PERM);      
+                             filePerm);      
     } else {
       INodeFile fileINode = (INodeFile)node;
       out.writeShort(fileINode.getReplication());
@@ -1744,10 +1804,10 @@ public class FSImage extends Storage {
       out.writeInt(blocks.length);
       for (Block blk : blocks)
         blk.write(out);
-      FILE_PERM.fromShort(fileINode.getFsPermissionShort());
+      filePerm.fromShort(fileINode.getFsPermissionShort());
       PermissionStatus.write(out, fileINode.getUserName(),
                              fileINode.getGroupName(),
-                             FILE_PERM);
+                             filePerm);
     }
   }
   

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1078173&r1=1078172&r2=1078173&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Fri Mar  4 22:17:40 2011
@@ -376,7 +376,7 @@ public class FSNamesystem implements FSC
 
   public static Collection<URI> getStorageDirs(Configuration conf,
                                                 String propertyName) {
-    Collection<String> dirNames = conf.getStringCollection(propertyName);
+    Collection<String> dirNames = conf.getTrimmedStringCollection(propertyName);
     StartupOption startOpt = NameNode.getStartupOption(conf);
     if(startOpt == StartupOption.IMPORT) {
       // In case of IMPORT this will get rid of default directories 
@@ -387,7 +387,7 @@ public class FSNamesystem implements FSC
       cE.addResource("core-default.xml");
       cE.addResource("core-site.xml");
       cE.addResource("hdfs-default.xml");
-      Collection<String> dirNames2 = cE.getStringCollection(propertyName);
+      Collection<String> dirNames2 = cE.getTrimmedStringCollection(propertyName);
       dirNames.removeAll(dirNames2);
       if(dirNames.isEmpty())
         LOG.warn("!!! WARNING !!!" +

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1078173&r1=1078172&r2=1078173&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
Fri Mar  4 22:17:40 2011
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT;
+
 import java.io.File;
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -1403,6 +1406,15 @@ public class NameNode implements Namenod
   private static boolean format(Configuration conf,
                                 boolean isConfirmationNeeded)
       throws IOException {
+    if (!conf.getBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, 
+                         DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT)) {
+      throw new IOException("The option " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY
+                             + " is set to false for this filesystem, so it "
+                             + "cannot be formatted. You will need to set "
+                             + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY +" parameter "
+                             + "to true in order to format this filesystem");
+    }
+    
     Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
     Collection<URI> editDirsToFormat = 
                  FSNamesystem.getNamespaceEditsDirs(conf);

Propchange: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 22:17:40 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/src/test/hdfs:776175-785643
 /hadoop/hdfs/branches/HDFS-265/src/test/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/test/hdfs:820487
-/hadoop/hdfs/trunk/src/test/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/src/test/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1052823,1060619,1061067,1062020

Propchange: hadoop/hdfs/branches/HDFS-1052/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 22:17:40 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/src/webapps/datanode:776175-784663
 /hadoop/hdfs/branches/HDFS-265/src/webapps/datanode:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/datanode:820487
-/hadoop/hdfs/trunk/src/webapps/datanode:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/src/webapps/datanode:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1052823,1060619,1061067,1062020

Propchange: hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 22:17:40 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/src/webapps/hdfs:776175-784663
 /hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/hdfs:820487
-/hadoop/hdfs/trunk/src/webapps/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/src/webapps/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1052823,1060619,1061067,1062020

Propchange: hadoop/hdfs/branches/HDFS-1052/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Mar  4 22:17:40 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/src/webapps/secondary:776175-784663
 /hadoop/hdfs/branches/HDFS-265/src/webapps/secondary:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/secondary:820487
-/hadoop/hdfs/trunk/src/webapps/secondary:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1052823,1060619,1061067,1062020
+/hadoop/hdfs/trunk/src/webapps/secondary:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1052823,1060619,1061067,1062020



Mime
View raw message