hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cnaur...@apache.org
Subject svn commit: r1562619 - in /hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/
Date Wed, 29 Jan 2014 22:41:20 GMT
Author: cnauroth
Date: Wed Jan 29 22:41:20 2014
New Revision: 1562619

URL: http://svn.apache.org/r1562619
Log:
HDFS-5771. Track progress when loading fsimage. Contributed by Haohui Mai.

Modified:
    hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5698.txt
    hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
    hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java

Modified: hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5698.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5698.txt?rev=1562619&r1=1562618&r2=1562619&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5698.txt
(original)
+++ hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5698.txt
Wed Jan 29 22:41:20 2014
@@ -29,3 +29,5 @@ HDFS-5698 subtasks
     HDFS-5698 branch. (Haohui Mai via jing9)
 
     HDFS-5797. Implement offline image viewer. (Haohui Mai via jing9)
+
+    HDFS-5771. Track progress when loading fsimage. (Haohui Mai via cnauroth)

Modified: hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java?rev=1562619&r1=1562618&r2=1562619&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
(original)
+++ hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
Wed Jan 29 22:41:20 2014
@@ -26,6 +26,8 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -55,6 +57,7 @@ public final class FSImageFormatPBINode 
   private final static long USER_GROUP_STRID_MASK = (1 << 24) - 1;
   private final static int USER_STRID_OFFSET = 40;
   private final static int GROUP_STRID_OFFSET = 16;
+  private static final Log LOG = LogFactory.getLog(FSImageFormatProtobuf.class);
 
   public final static class Loader {
     public static PermissionStatus loadPermission(long id,
@@ -152,6 +155,7 @@ public final class FSImageFormatPBINode 
     void loadINodeSection(InputStream in) throws IOException {
       INodeSection s = INodeSection.parseDelimitedFrom(in);
       fsn.resetLastInodeId(s.getLastInodeId());
+      LOG.info("Loading " + s.getNumInodes() + " INodes.");
       for (int i = 0; i < s.getNumInodes(); ++i) {
         INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
         if (p.getId() == INodeId.ROOT_INODE_ID) {

Modified: hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java?rev=1562619&r1=1562618&r2=1562619&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
(original)
+++ hadoop/common/branches/HDFS-5698/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
Wed Jan 29 22:41:20 2014
@@ -52,6 +52,10 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FSImageFormatPBSnapshot;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.compress.CompressionCodec;
@@ -101,11 +105,14 @@ public final class FSImageFormatProtobuf
     }
 
     void load(File file) throws IOException {
+      long start = System.currentTimeMillis();
       imgDigest = MD5FileUtils.computeMd5ForFile(file);
       RandomAccessFile raFile = new RandomAccessFile(file, "r");
       FileInputStream fin = new FileInputStream(file);
       try {
         loadInternal(raFile, fin);
+        long end = System.currentTimeMillis();
+        LOG.info("Loaded FSImage in " + (end - start) / 1000 + " seconds.");
       } finally {
         fin.close();
         raFile.close();
@@ -123,8 +130,8 @@ public final class FSImageFormatProtobuf
 
       FSImageFormatPBINode.Loader inodeLoader = new FSImageFormatPBINode.Loader(
           fsn, this);
-      FSImageFormatPBSnapshot.Loader snapshotLoader =
-          new FSImageFormatPBSnapshot.Loader(fsn, this);
+      FSImageFormatPBSnapshot.Loader snapshotLoader = new FSImageFormatPBSnapshot.Loader(
+          fsn, this);
 
       ArrayList<FileSummary.Section> sections = Lists.newArrayList(summary
           .getSectionsList());
@@ -143,6 +150,14 @@ public final class FSImageFormatProtobuf
         }
       });
 
+      StartupProgress prog = NameNode.getStartupProgress();
+      /**
+       * beginStep() and the endStep() calls do not match the boundary of the
+       * sections. This is because that the current implementation only allows
+       * a particular step to be started for once.
+       */
+      Step currentStep = null;
+
       for (FileSummary.Section s : sections) {
         channel.position(s.getOffset());
         InputStream in = new BufferedInputStream(new LimitInputStream(fin,
@@ -152,6 +167,7 @@ public final class FSImageFormatProtobuf
             summary.getCodec(), in);
 
         String n = s.getName();
+
         switch (SectionName.fromString(n)) {
         case NS_INFO:
           loadNameSystemSection(in);
@@ -159,8 +175,11 @@ public final class FSImageFormatProtobuf
         case STRING_TABLE:
           loadStringTableSection(in);
           break;
-        case INODE:
+        case INODE: {
+          currentStep = new Step(StepType.INODES);
+          prog.beginStep(Phase.LOADING_FSIMAGE, currentStep);
           inodeLoader.loadINodeSection(in);
+        }
           break;
         case INODE_DIR:
           inodeLoader.loadINodeDirectorySection(in);
@@ -174,11 +193,20 @@ public final class FSImageFormatProtobuf
         case SNAPSHOT_DIFF:
           snapshotLoader.loadSnapshotDiffSection(in);
           break;
-        case CACHE_MANAGER:
-          loadCacheManagerSection(in);
-          break;
-        case SECRET_MANAGER:
+        case SECRET_MANAGER: {
+          prog.endStep(Phase.LOADING_FSIMAGE, currentStep);
+          Step step = new Step(StepType.DELEGATION_TOKENS);
+          prog.beginStep(Phase.LOADING_FSIMAGE, step);
           loadSecretManagerSection(in);
+          prog.endStep(Phase.LOADING_FSIMAGE, step);
+        }
+          break;
+        case CACHE_MANAGER: {
+          Step step = new Step(StepType.CACHE_POOLS);
+          prog.beginStep(Phase.LOADING_FSIMAGE, step);
+          loadCacheManagerSection(in);
+          prog.endStep(Phase.LOADING_FSIMAGE, step);
+        }
           break;
         default:
           LOG.warn("Unregconized section " + n);
@@ -291,7 +319,7 @@ public final class FSImageFormatProtobuf
       FileOutputStream fout = new FileOutputStream(file);
       fileChannel = fout.getChannel();
       try {
-        saveInternal(fout, compression);
+        saveInternal(fout, compression, file.getAbsolutePath().toString());
       } finally {
         fout.close();
       }
@@ -307,24 +335,27 @@ public final class FSImageFormatProtobuf
     }
 
     private void saveInodes(FileSummary.Builder summary) throws IOException {
-      FSImageFormatPBINode.Saver inodeSaver = new FSImageFormatPBINode.Saver(
-          this, summary);
-      inodeSaver.serializeINodeSection(sectionOutputStream);
-      inodeSaver.serializeINodeDirectorySection(sectionOutputStream);
-      inodeSaver.serializeFilesUCSection(sectionOutputStream);
+      FSImageFormatPBINode.Saver saver = new FSImageFormatPBINode.Saver(this,
+          summary);
+
+      saver.serializeINodeSection(sectionOutputStream);
+      saver.serializeINodeDirectorySection(sectionOutputStream);
+      saver.serializeFilesUCSection(sectionOutputStream);
     }
 
     private void saveSnapshots(FileSummary.Builder summary) throws IOException {
-      FSImageFormatPBSnapshot.Saver snapshotSaver =
-          new FSImageFormatPBSnapshot.Saver(this, summary,
-              context, context.getSourceNamesystem());
+      FSImageFormatPBSnapshot.Saver snapshotSaver = new FSImageFormatPBSnapshot.Saver(
+          this, summary, context, context.getSourceNamesystem());
+
       snapshotSaver.serializeSnapshotSection(sectionOutputStream);
       snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream);
     }
 
     private void saveInternal(FileOutputStream fout,
-        FSImageCompression compression) throws IOException {
+        FSImageCompression compression, String filePath) throws IOException {
+      StartupProgress prog = NameNode.getStartupProgress();
       MessageDigest digester = MD5Hash.getDigester();
+
       underlyingOutputStream = new DigestOutputStream(new BufferedOutputStream(
           fout), digester);
       underlyingOutputStream.write(FSImageUtil.MAGIC_HEADER);
@@ -348,14 +379,27 @@ public final class FSImageFormatProtobuf
       // Some unit tests, such as TestSaveNamespace#testCancelSaveNameSpace
       // depends on this behavior.
       context.checkCancelled();
+
+      Step step = new Step(StepType.INODES, filePath);
+      prog.beginStep(Phase.SAVING_CHECKPOINT, step);
       saveInodes(b);
       saveSnapshots(b);
-      saveStringTableSection(b);
+      prog.endStep(Phase.SAVING_CHECKPOINT, step);
 
+      step = new Step(StepType.DELEGATION_TOKENS, filePath);
+      prog.beginStep(Phase.SAVING_CHECKPOINT, step);
       saveSecretManagerSection(b);
+      prog.endStep(Phase.SAVING_CHECKPOINT, step);
+
+      step = new Step(StepType.CACHE_POOLS, filePath);
+      prog.beginStep(Phase.SAVING_CHECKPOINT, step);
       saveCacheManagerSection(b);
+      prog.endStep(Phase.SAVING_CHECKPOINT, step);
 
-      // Flush the buffered data into the file before appending the header
+      saveStringTableSection(b);
+
+      // We use the underlyingOutputStream to write the header. Therefore flush
+      // the buffered stream (which is potentially compressed) first.
       flushSectionOutputStream();
 
       FileSummary summary = b.build();
@@ -379,7 +423,8 @@ public final class FSImageFormatProtobuf
       commitSection(summary, SectionName.SECRET_MANAGER);
     }
 
-    private void saveCacheManagerSection(FileSummary.Builder summary) throws IOException
{
+    private void saveCacheManagerSection(FileSummary.Builder summary)
+        throws IOException {
       final FSNamesystem fsn = context.getSourceNamesystem();
       CacheManager.PersistState state = fsn.getCacheManager().saveState();
       state.section.writeDelimitedTo(sectionOutputStream);
@@ -393,8 +438,8 @@ public final class FSImageFormatProtobuf
       commitSection(summary, SectionName.CACHE_MANAGER);
     }
 
-    private void saveNameSystemSection(
-        FileSummary.Builder summary) throws IOException {
+    private void saveNameSystemSection(FileSummary.Builder summary)
+        throws IOException {
       final FSNamesystem fsn = context.getSourceNamesystem();
       OutputStream out = sectionOutputStream;
       NameSystemSection.Builder b = NameSystemSection.newBuilder()
@@ -416,7 +461,8 @@ public final class FSImageFormatProtobuf
       commitSection(summary, SectionName.NS_INFO);
     }
 
-    private void saveStringTableSection(FileSummary.Builder summary) throws IOException {
+    private void saveStringTableSection(FileSummary.Builder summary)
+        throws IOException {
       OutputStream out = sectionOutputStream;
       StringTableSection.Builder b = StringTableSection.newBuilder()
           .setNumEntry(stringMap.size());



Mime
View raw message