Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id A1AEC10B7C for ; Tue, 11 Mar 2014 21:45:13 +0000 (UTC) Received: (qmail 7018 invoked by uid 500); 11 Mar 2014 21:45:12 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 6922 invoked by uid 500); 11 Mar 2014 21:45:11 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 6914 invoked by uid 99); 11 Mar 2014 21:45:11 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 11 Mar 2014 21:45:11 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 11 Mar 2014 21:45:03 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id CA6C223889E3; Tue, 11 Mar 2014 21:44:39 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1576513 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/security/token/delegation/ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/server/namenod... Date: Tue, 11 Mar 2014 21:44:39 -0000 To: hdfs-commits@hadoop.apache.org From: wheat9@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140311214439.CA6C223889E3@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: wheat9 Date: Tue Mar 11 21:44:38 2014 New Revision: 1576513 URL: http://svn.apache.org/r1576513 Log: HDFS-6072. Clean up dead code of FSImage. Contributed by Haohui Mai. Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1576513&r1=1576512&r2=1576513&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Mar 11 21:44:38 2014 @@ -401,6 +401,8 @@ Release 2.4.0 - UNRELEASED HDFS-6085. Improve CacheReplicationMonitor log messages a bit (cmccabe) + HDFS-6072. Clean up dead code of FSImage. (wheat9) + OPTIMIZATIONS HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java?rev=1576513&r1=1576512&r2=1576513&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java Tue Mar 11 21:44:38 2014 @@ -18,16 +18,9 @@ package org.apache.hadoop.hdfs.security.token.delegation; -import java.io.DataInput; -import java.io.DataOutputStream; -import java.io.IOException; -import java.io.InterruptedIOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Map.Entry; - +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.protobuf.ByteString; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -50,9 +43,13 @@ import org.apache.hadoop.security.token. import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; import org.apache.hadoop.security.token.delegation.DelegationKey; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.protobuf.ByteString; +import java.io.DataInput; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.Map.Entry; /** * A HDFS specific delegation token secret manager. @@ -214,18 +211,6 @@ public class DelegationTokenSecretManage } } - /** - * Store the current state of the SecretManager for persistence - * - * @param out Output stream for writing into fsimage. - * @param sdPath String storage directory path - * @throws IOException - */ - public synchronized void saveSecretManagerStateCompat(DataOutputStream out, - String sdPath) throws IOException { - serializerCompat.save(out, sdPath); - } - public synchronized SecretManagerState saveSecretManagerState() { SecretManagerSection s = SecretManagerSection.newBuilder() .setCurrentId(currentId) @@ -421,56 +406,6 @@ public class DelegationTokenSecretManage loadCurrentTokens(in); } - private void save(DataOutputStream out, String sdPath) throws IOException { - out.writeInt(currentId); - saveAllKeys(out, sdPath); - out.writeInt(delegationTokenSequenceNumber); - saveCurrentTokens(out, sdPath); - } - - /** - * Private helper methods to save delegation keys and tokens in fsimage - */ - private synchronized void saveCurrentTokens(DataOutputStream out, - String sdPath) throws IOException { - StartupProgress prog = NameNode.getStartupProgress(); - Step step = new Step(StepType.DELEGATION_TOKENS, sdPath); - prog.beginStep(Phase.SAVING_CHECKPOINT, step); - prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size()); - Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); - out.writeInt(currentTokens.size()); - Iterator iter = currentTokens.keySet() - .iterator(); - while (iter.hasNext()) { - DelegationTokenIdentifier id = iter.next(); - id.write(out); - DelegationTokenInformation info = currentTokens.get(id); - out.writeLong(info.getRenewDate()); - counter.increment(); - } - prog.endStep(Phase.SAVING_CHECKPOINT, step); - } - - /* - * Save the current state of allKeys - */ - private synchronized void saveAllKeys(DataOutputStream out, String sdPath) - throws IOException { - StartupProgress prog = NameNode.getStartupProgress(); - Step step = new Step(StepType.DELEGATION_KEYS, sdPath); - prog.beginStep(Phase.SAVING_CHECKPOINT, step); - prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size()); - Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); - out.writeInt(allKeys.size()); - Iterator iter = allKeys.keySet().iterator(); - while (iter.hasNext()) { - Integer key = iter.next(); - allKeys.get(key).write(out); - counter.increment(); - } - prog.endStep(Phase.SAVING_CHECKPOINT, step); - } - /** * Private helper methods to load Delegation tokens from fsimage */ Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java?rev=1576513&r1=1576512&r2=1576513&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java Tue Mar 11 21:44:38 2014 @@ -27,7 +27,6 @@ import static org.apache.hadoop.hdfs.DFS import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT; import java.io.DataInput; -import java.io.DataOutputStream; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -62,10 +61,10 @@ import org.apache.hadoop.hdfs.protocol.C import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats; import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor; @@ -952,18 +951,6 @@ public final class CacheManager { } } - /** - * Saves the current state of the CacheManager to the DataOutput. Used - * to persist CacheManager state in the FSImage. - * @param out DataOutput to persist state - * @param sdPath path of the storage directory - * @throws IOException - */ - public void saveStateCompat(DataOutputStream out, String sdPath) - throws IOException { - serializerCompat.save(out, sdPath); - } - public PersistState saveState() throws IOException { ArrayList pools = Lists .newArrayListWithCapacity(cachePools.size()); @@ -1083,12 +1070,6 @@ public final class CacheManager { } private final class SerializerCompat { - private void save(DataOutputStream out, String sdPath) throws IOException { - out.writeLong(nextDirectiveId); - savePools(out, sdPath); - saveDirectives(out, sdPath); - } - private void load(DataInput in) throws IOException { nextDirectiveId = in.readLong(); // pools need to be loaded first since directives point to their parent pool @@ -1097,42 +1078,6 @@ public final class CacheManager { } /** - * Save cache pools to fsimage - */ - private void savePools(DataOutputStream out, - String sdPath) throws IOException { - StartupProgress prog = NameNode.getStartupProgress(); - Step step = new Step(StepType.CACHE_POOLS, sdPath); - prog.beginStep(Phase.SAVING_CHECKPOINT, step); - prog.setTotal(Phase.SAVING_CHECKPOINT, step, cachePools.size()); - Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); - out.writeInt(cachePools.size()); - for (CachePool pool: cachePools.values()) { - FSImageSerialization.writeCachePoolInfo(out, pool.getInfo(true)); - counter.increment(); - } - prog.endStep(Phase.SAVING_CHECKPOINT, step); - } - - /* - * Save cache entries to fsimage - */ - private void saveDirectives(DataOutputStream out, String sdPath) - throws IOException { - StartupProgress prog = NameNode.getStartupProgress(); - Step step = new Step(StepType.CACHE_ENTRIES, sdPath); - prog.beginStep(Phase.SAVING_CHECKPOINT, step); - prog.setTotal(Phase.SAVING_CHECKPOINT, step, directivesById.size()); - Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); - out.writeInt(directivesById.size()); - for (CacheDirective directive : directivesById.values()) { - FSImageSerialization.writeCacheDirectiveInfo(out, directive.toInfo()); - counter.increment(); - } - prog.endStep(Phase.SAVING_CHECKPOINT, step); - } - - /** * Load cache pools from fsimage */ private void loadPools(DataInput in) Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1576513&r1=1576512&r2=1576513&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Tue Mar 11 21:44:38 2014 @@ -21,20 +21,14 @@ import static org.apache.hadoop.util.Tim import java.io.DataInput; import java.io.DataInputStream; -import java.io.DataOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; -import java.io.FileOutputStream; import java.io.IOException; import java.security.DigestInputStream; -import java.security.DigestOutputStream; import java.security.MessageDigest; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.TreeMap; @@ -56,7 +50,6 @@ import org.apache.hadoop.hdfs.server.blo import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; -import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; @@ -67,7 +60,6 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType; -import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.io.Text; @@ -77,105 +69,8 @@ import com.google.common.base.Preconditi import com.google.common.annotations.VisibleForTesting; /** - * Contains inner classes for reading or writing the on-disk format for - * FSImages. - * - * In particular, the format of the FSImage looks like: - *
- * FSImage {
- *   layoutVersion: int, namespaceID: int, numberItemsInFSDirectoryTree: long,
- *   namesystemGenerationStampV1: long, namesystemGenerationStampV2: long,
- *   generationStampAtBlockIdSwitch:long, lastAllocatedBlockId:
- *   long transactionID: long, snapshotCounter: int, numberOfSnapshots: int,
- *   numOfSnapshottableDirs: int,
- *   {FSDirectoryTree, FilesUnderConstruction, SecretManagerState} (can be compressed)
- * }
- * 
- * FSDirectoryTree (if {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is supported) {
- *   INodeInfo of root, numberOfChildren of root: int
- *   [list of INodeInfo of root's children],
- *   [list of INodeDirectoryInfo of root's directory children]
- * }
- * 
- * FSDirectoryTree (if {@link Feature#FSIMAGE_NAME_OPTIMIZATION} not supported){
- *   [list of INodeInfo of INodes in topological order]
- * }
- * 
- * INodeInfo {
- *   {
- *     localName: short + byte[]
- *   } when {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is supported
- *   or 
- *   {
- *     fullPath: byte[]
- *   } when {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is not supported
- *   replicationFactor: short, modificationTime: long,
- *   accessTime: long, preferredBlockSize: long,
- *   numberOfBlocks: int (-1 for INodeDirectory, -2 for INodeSymLink),
- *   { 
- *     nsQuota: long, dsQuota: long, 
- *     {
- *       isINodeSnapshottable: byte,
- *       isINodeWithSnapshot: byte (if isINodeSnapshottable is false)
- *     } (when {@link Feature#SNAPSHOT} is supported), 
- *     fsPermission: short, PermissionStatus
- *   } for INodeDirectory
- *   or 
- *   {
- *     symlinkString, fsPermission: short, PermissionStatus
- *   } for INodeSymlink
- *   or
- *   {
- *     [list of BlockInfo]
- *     [list of FileDiff]
- *     {
- *       isINodeFileUnderConstructionSnapshot: byte, 
- *       {clientName: short + byte[], clientMachine: short + byte[]} (when 
- *       isINodeFileUnderConstructionSnapshot is true),
- *     } (when {@link Feature#SNAPSHOT} is supported and writing snapshotINode), 
- *     fsPermission: short, PermissionStatus
- *   } for INodeFile
- * }
- * 
- * INodeDirectoryInfo {
- *   fullPath of the directory: short + byte[],
- *   numberOfChildren: int, [list of INodeInfo of children INode],
- *   {
- *     numberOfSnapshots: int,
- *     [list of Snapshot] (when NumberOfSnapshots is positive),
- *     numberOfDirectoryDiffs: int,
- *     [list of DirectoryDiff] (NumberOfDirectoryDiffs is positive),
- *     number of children that are directories,
- *     [list of INodeDirectoryInfo of the directory children] (includes
- *     snapshot copies of deleted sub-directories)
- *   } (when {@link Feature#SNAPSHOT} is supported), 
- * }
- * 
- * Snapshot {
- *   snapshotID: int, root of Snapshot: INodeDirectoryInfo (its local name is 
- *   the name of the snapshot)
- * }
- * 
- * DirectoryDiff {
- *   full path of the root of the associated Snapshot: short + byte[], 
- *   childrenSize: int, 
- *   isSnapshotRoot: byte, 
- *   snapshotINodeIsNotNull: byte (when isSnapshotRoot is false),
- *   snapshotINode: INodeDirectory (when SnapshotINodeIsNotNull is true), Diff 
- * }
- * 
- * Diff {
- *   createdListSize: int, [Local name of INode in created list],
- *   deletedListSize: int, [INode in deleted list: INodeInfo]
- * }
- *
- * FileDiff {
- *   full path of the root of the associated Snapshot: short + byte[], 
- *   fileSize: long, 
- *   snapshotINodeIsNotNull: byte,
- *   snapshotINode: INodeFile (when SnapshotINodeIsNotNull is true), Diff 
- * }
- * 
+ * This class loads and stores the FSImage of the NameNode. The file + * src/main/proto/fsimage.proto describes the on-disk layout of the FSImage. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -683,11 +578,6 @@ public class FSImageFormat { } } - /** @return The FSDirectory of the namesystem where the fsimage is loaded */ - public FSDirectory getFSDirectoryInLoading() { - return namesystem.dir; - } - public INode loadINodeWithLocalName(boolean isSnapshotINode, DataInput in, boolean updateINodeMap) throws IOException { return loadINodeWithLocalName(isSnapshotINode, in, updateINodeMap, null); @@ -920,7 +810,7 @@ public class FSImageFormat { if (path != null && FSDirectory.isReservedName(path) && NameNodeLayoutVersion.supports( LayoutVersion.Feature.ADD_INODE_ID, getLayoutVersion())) { - // TODO: for HDFS-5428, we use reserved path for those INodeFileUC in + // TODO: for HDFS-5428, we use reserved path for those INodeFileUC in // snapshot. If we support INode ID in the layout version, we can use // the inode id to find the oldnode. oldnode = namesystem.dir.getInode(cons.getId()).asFile(); @@ -1117,7 +1007,7 @@ public class FSImageFormat { + " option to automatically rename these paths during upgrade."; /** - * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single + * Same as {@link #renameReservedPathsOnUpgrade}, but for a single * byte array path component. */ private static byte[] renameReservedComponentOnUpgrade(byte[] component, @@ -1138,7 +1028,7 @@ public class FSImageFormat { } /** - * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single + * Same as {@link #renameReservedPathsOnUpgrade}, but for a single * byte array path component. */ private static byte[] renameReservedRootComponentOnUpgrade(byte[] component, @@ -1160,266 +1050,4 @@ public class FSImageFormat { } return component; } - - /** - * A one-shot class responsible for writing an image file. - * The write() function should be called once, after which the getter - * functions may be used to retrieve information about the file that was written. - */ - static class Saver { - private final SaveNamespaceContext context; - /** Set to true once an image has been written */ - private boolean saved = false; - - /** The MD5 checksum of the file that was written */ - private MD5Hash savedDigest; - private final ReferenceMap referenceMap = new ReferenceMap(); - - private final Map snapshotUCMap = - new HashMap(); - - /** @throws IllegalStateException if the instance has not yet saved an image */ - private void checkSaved() { - if (!saved) { - throw new IllegalStateException("FSImageSaver has not saved an image"); - } - } - - /** @throws IllegalStateException if the instance has already saved an image */ - private void checkNotSaved() { - if (saved) { - throw new IllegalStateException("FSImageSaver has already saved an image"); - } - } - - - Saver(SaveNamespaceContext context) { - this.context = context; - } - - /** - * Return the MD5 checksum of the image file that was saved. - */ - MD5Hash getSavedDigest() { - checkSaved(); - return savedDigest; - } - - void save(File newFile, FSImageCompression compression) throws IOException { - checkNotSaved(); - - final FSNamesystem sourceNamesystem = context.getSourceNamesystem(); - final INodeDirectory rootDir = sourceNamesystem.dir.rootDir; - final long numINodes = rootDir.getDirectoryWithQuotaFeature() - .getSpaceConsumed().get(Quota.NAMESPACE); - String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath(); - Step step = new Step(StepType.INODES, sdPath); - StartupProgress prog = NameNode.getStartupProgress(); - prog.beginStep(Phase.SAVING_CHECKPOINT, step); - prog.setTotal(Phase.SAVING_CHECKPOINT, step, numINodes); - Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); - long startTime = now(); - // - // Write out data - // - MessageDigest digester = MD5Hash.getDigester(); - FileOutputStream fout = new FileOutputStream(newFile); - DigestOutputStream fos = new DigestOutputStream(fout, digester); - DataOutputStream out = new DataOutputStream(fos); - try { - out.writeInt(HdfsConstants.NAMENODE_LAYOUT_VERSION); - LayoutFlags.write(out); - // We use the non-locked version of getNamespaceInfo here since - // the coordinating thread of saveNamespace already has read-locked - // the namespace for us. If we attempt to take another readlock - // from the actual saver thread, there's a potential of a - // fairness-related deadlock. See the comments on HDFS-2223. - out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo() - .getNamespaceID()); - out.writeLong(numINodes); - out.writeLong(sourceNamesystem.getGenerationStampV1()); - out.writeLong(sourceNamesystem.getGenerationStampV2()); - out.writeLong(sourceNamesystem.getGenerationStampAtblockIdSwitch()); - out.writeLong(sourceNamesystem.getLastAllocatedBlockId()); - out.writeLong(context.getTxId()); - out.writeLong(sourceNamesystem.getLastInodeId()); - - - sourceNamesystem.getSnapshotManager().write(out); - - // write compression info and set up compressed stream - out = compression.writeHeaderAndWrapStream(fos); - LOG.info("Saving image file " + newFile + - " using " + compression); - - // save the root - saveINode2Image(rootDir, out, false, referenceMap, counter); - // save the rest of the nodes - saveImage(rootDir, out, true, false, counter); - prog.endStep(Phase.SAVING_CHECKPOINT, step); - // Now that the step is finished, set counter equal to total to adjust - // for possible under-counting due to reference inodes. - prog.setCount(Phase.SAVING_CHECKPOINT, step, numINodes); - // save files under construction - // TODO: for HDFS-5428, since we cannot break the compatibility of - // fsimage, we store part of the under-construction files that are only - // in snapshots in this "under-construction-file" section. As a - // temporary solution, we use "/.reserved/.inodes/" as their - // paths, so that when loading fsimage we do not put them into the lease - // map. In the future, we can remove this hack when we can bump the - // layout version. - sourceNamesystem.saveFilesUnderConstruction(out, snapshotUCMap); - - context.checkCancelled(); - sourceNamesystem.saveSecretManagerStateCompat(out, sdPath); - context.checkCancelled(); - sourceNamesystem.getCacheManager().saveStateCompat(out, sdPath); - context.checkCancelled(); - out.flush(); - context.checkCancelled(); - fout.getChannel().force(true); - } finally { - out.close(); - } - - saved = true; - // set md5 of the saved image - savedDigest = new MD5Hash(digester.digest()); - - LOG.info("Image file " + newFile + " of size " + newFile.length() + - " bytes saved in " + (now() - startTime)/1000 + " seconds."); - } - - /** - * Save children INodes. - * @param children The list of children INodes - * @param out The DataOutputStream to write - * @param inSnapshot Whether the parent directory or its ancestor is in - * the deleted list of some snapshot (caused by rename or - * deletion) - * @param counter Counter to increment for namenode startup progress - * @return Number of children that are directory - */ - private int saveChildren(ReadOnlyList children, - DataOutputStream out, boolean inSnapshot, Counter counter) - throws IOException { - // Write normal children INode. - out.writeInt(children.size()); - int dirNum = 0; - int i = 0; - for(INode child : children) { - // print all children first - // TODO: for HDFS-5428, we cannot change the format/content of fsimage - // here, thus even if the parent directory is in snapshot, we still - // do not handle INodeUC as those stored in deleted list - saveINode2Image(child, out, false, referenceMap, counter); - if (child.isDirectory()) { - dirNum++; - } else if (inSnapshot && child.isFile() - && child.asFile().isUnderConstruction()) { - this.snapshotUCMap.put(child.getId(), child.asFile()); - } - if (i++ % 50 == 0) { - context.checkCancelled(); - } - } - return dirNum; - } - - /** - * Save file tree image starting from the given root. - * This is a recursive procedure, which first saves all children and - * snapshot diffs of a current directory and then moves inside the - * sub-directories. - * - * @param current The current node - * @param out The DataoutputStream to write the image - * @param toSaveSubtree Whether or not to save the subtree to fsimage. For - * reference node, its subtree may already have been - * saved before. - * @param inSnapshot Whether the current directory is in snapshot - * @param counter Counter to increment for namenode startup progress - */ - private void saveImage(INodeDirectory current, DataOutputStream out, - boolean toSaveSubtree, boolean inSnapshot, Counter counter) - throws IOException { - // write the inode id of the directory - out.writeLong(current.getId()); - - if (!toSaveSubtree) { - return; - } - - final ReadOnlyList children = current - .getChildrenList(Snapshot.CURRENT_STATE_ID); - int dirNum = 0; - List snapshotDirs = null; - DirectoryWithSnapshotFeature sf = current.getDirectoryWithSnapshotFeature(); - if (sf != null) { - snapshotDirs = new ArrayList(); - sf.getSnapshotDirectory(snapshotDirs); - dirNum += snapshotDirs.size(); - } - - // 2. Write INodeDirectorySnapshottable#snapshotsByNames to record all - // Snapshots - if (current instanceof INodeDirectorySnapshottable) { - INodeDirectorySnapshottable snapshottableNode = - (INodeDirectorySnapshottable) current; - SnapshotFSImageFormat.saveSnapshots(snapshottableNode, out); - } else { - out.writeInt(-1); // # of snapshots - } - - // 3. Write children INode - dirNum += saveChildren(children, out, inSnapshot, counter); - - // 4. Write DirectoryDiff lists, if there is any. - SnapshotFSImageFormat.saveDirectoryDiffList(current, out, referenceMap); - - // Write sub-tree of sub-directories, including possible snapshots of - // deleted sub-directories - out.writeInt(dirNum); // the number of sub-directories - for(INode child : children) { - if(!child.isDirectory()) { - continue; - } - // make sure we only save the subtree under a reference node once - boolean toSave = child.isReference() ? - referenceMap.toProcessSubtree(child.getId()) : true; - saveImage(child.asDirectory(), out, toSave, inSnapshot, counter); - } - if (snapshotDirs != null) { - for (INodeDirectory subDir : snapshotDirs) { - // make sure we only save the subtree under a reference node once - boolean toSave = subDir.getParentReference() != null ? - referenceMap.toProcessSubtree(subDir.getId()) : true; - saveImage(subDir, out, toSave, true, counter); - } - } - } - - /** - * Saves inode and increments progress counter. - * - * @param inode INode to save - * @param out DataOutputStream to receive inode - * @param writeUnderConstruction boolean true if this is under construction - * @param referenceMap ReferenceMap containing reference inodes - * @param counter Counter to increment for namenode startup progress - * @throws IOException thrown if there is an I/O error - */ - private void saveINode2Image(INode inode, DataOutputStream out, - boolean writeUnderConstruction, ReferenceMap referenceMap, - Counter counter) throws IOException { - FSImageSerialization.saveINode2Image(inode, out, writeUnderConstruction, - referenceMap); - // Intentionally do not increment counter for reference inodes, because it - // is too difficult at this point to assess whether or not this is a - // reference that counts toward quota. - if (!(inode instanceof INodeReference)) { - counter.increment(); - } - } - } } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java?rev=1576513&r1=1576512&r2=1576513&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java Tue Mar 11 21:44:38 2014 @@ -17,11 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import java.io.DataInput; -import java.io.DataOutput; -import java.io.DataOutputStream; -import java.io.IOException; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; @@ -36,21 +31,20 @@ import org.apache.hadoop.hdfs.protocol.L import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; -import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; -import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat; -import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; import org.apache.hadoop.hdfs.util.XMLUtils; import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException; import org.apache.hadoop.hdfs.util.XMLUtils.Stanza; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.ShortWritable; -import org.apache.hadoop.io.Text; import org.apache.hadoop.io.WritableUtils; import org.xml.sax.ContentHandler; import org.xml.sax.SAXException; -import com.google.common.base.Preconditions; +import java.io.DataInput; +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.IOException; /** * Static utility functions for serializing various pieces of data in the correct @@ -88,26 +82,6 @@ public class FSImageSerialization { final ShortWritable U_SHORT = new ShortWritable(); final IntWritable U_INT = new IntWritable(); final LongWritable U_LONG = new LongWritable(); - final FsPermission FILE_PERM = new FsPermission((short) 0); - } - - private static void writePermissionStatus(INodeAttributes inode, - DataOutput out) throws IOException { - final FsPermission p = TL_DATA.get().FILE_PERM; - p.fromShort(inode.getFsPermissionShort()); - PermissionStatus.write(out, inode.getUserName(), inode.getGroupName(), p); - } - - private static void writeBlocks(final Block[] blocks, - final DataOutput out) throws IOException { - if (blocks == null) { - out.writeInt(0); - } else { - out.writeInt(blocks.length); - for (Block blk : blocks) { - blk.write(out); - } - } } // Helper function that reads in an INodeUnderConstruction @@ -153,183 +127,6 @@ public class FSImageSerialization { return file; } - // Helper function that writes an INodeUnderConstruction - // into the input stream - // - static void writeINodeUnderConstruction(DataOutputStream out, INodeFile cons, - String path) throws IOException { - writeString(path, out); - out.writeLong(cons.getId()); - out.writeShort(cons.getFileReplication()); - out.writeLong(cons.getModificationTime()); - out.writeLong(cons.getPreferredBlockSize()); - - writeBlocks(cons.getBlocks(), out); - cons.getPermissionStatus().write(out); - - FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature(); - writeString(uc.getClientName(), out); - writeString(uc.getClientMachine(), out); - - out.writeInt(0); // do not store locations of last block - } - - /** - * Serialize a {@link INodeFile} node - * @param node The node to write - * @param out The {@link DataOutputStream} where the fields are written - * @param writeBlock Whether to write block information - */ - public static void writeINodeFile(INodeFile file, DataOutput out, - boolean writeUnderConstruction) throws IOException { - writeLocalName(file, out); - out.writeLong(file.getId()); - out.writeShort(file.getFileReplication()); - out.writeLong(file.getModificationTime()); - out.writeLong(file.getAccessTime()); - out.writeLong(file.getPreferredBlockSize()); - - writeBlocks(file.getBlocks(), out); - SnapshotFSImageFormat.saveFileDiffList(file, out); - - if (writeUnderConstruction) { - if (file.isUnderConstruction()) { - out.writeBoolean(true); - final FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature(); - writeString(uc.getClientName(), out); - writeString(uc.getClientMachine(), out); - } else { - out.writeBoolean(false); - } - } - - writePermissionStatus(file, out); - } - - /** Serialize an {@link INodeFileAttributes}. */ - public static void writeINodeFileAttributes(INodeFileAttributes file, - DataOutput out) throws IOException { - writeLocalName(file, out); - writePermissionStatus(file, out); - out.writeLong(file.getModificationTime()); - out.writeLong(file.getAccessTime()); - - out.writeShort(file.getFileReplication()); - out.writeLong(file.getPreferredBlockSize()); - } - - private static void writeQuota(Quota.Counts quota, DataOutput out) - throws IOException { - out.writeLong(quota.get(Quota.NAMESPACE)); - out.writeLong(quota.get(Quota.DISKSPACE)); - } - - /** - * Serialize a {@link INodeDirectory} - * @param node The node to write - * @param out The {@link DataOutput} where the fields are written - */ - public static void writeINodeDirectory(INodeDirectory node, DataOutput out) - throws IOException { - writeLocalName(node, out); - out.writeLong(node.getId()); - out.writeShort(0); // replication - out.writeLong(node.getModificationTime()); - out.writeLong(0); // access time - out.writeLong(0); // preferred block size - out.writeInt(-1); // # of blocks - - writeQuota(node.getQuotaCounts(), out); - - if (node instanceof INodeDirectorySnapshottable) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - out.writeBoolean(node.isWithSnapshot()); - } - - writePermissionStatus(node, out); - } - - /** - * Serialize a {@link INodeDirectory} - * @param a The node to write - * @param out The {@link DataOutput} where the fields are written - */ - public static void writeINodeDirectoryAttributes( - INodeDirectoryAttributes a, DataOutput out) throws IOException { - writeLocalName(a, out); - writePermissionStatus(a, out); - out.writeLong(a.getModificationTime()); - writeQuota(a.getQuotaCounts(), out); - } - - /** - * Serialize a {@link INodeSymlink} node - * @param node The node to write - * @param out The {@link DataOutput} where the fields are written - */ - private static void writeINodeSymlink(INodeSymlink node, DataOutput out) - throws IOException { - writeLocalName(node, out); - out.writeLong(node.getId()); - out.writeShort(0); // replication - out.writeLong(0); // modification time - out.writeLong(0); // access time - out.writeLong(0); // preferred block size - out.writeInt(-2); // # of blocks - - Text.writeString(out, node.getSymlinkString()); - writePermissionStatus(node, out); - } - - /** Serialize a {@link INodeReference} node */ - private static void writeINodeReference(INodeReference ref, DataOutput out, - boolean writeUnderConstruction, ReferenceMap referenceMap - ) throws IOException { - writeLocalName(ref, out); - out.writeLong(ref.getId()); - out.writeShort(0); // replication - out.writeLong(0); // modification time - out.writeLong(0); // access time - out.writeLong(0); // preferred block size - out.writeInt(-3); // # of blocks - - final boolean isWithName = ref instanceof INodeReference.WithName; - out.writeBoolean(isWithName); - - if (!isWithName) { - Preconditions.checkState(ref instanceof INodeReference.DstReference); - // dst snapshot id - out.writeInt(((INodeReference.DstReference) ref).getDstSnapshotId()); - } else { - out.writeInt(((INodeReference.WithName) ref).getLastSnapshotId()); - } - - final INodeReference.WithCount withCount - = (INodeReference.WithCount)ref.getReferredINode(); - referenceMap.writeINodeReferenceWithCount(withCount, out, - writeUnderConstruction); - } - - /** - * Save one inode's attributes to the image. - */ - public static void saveINode2Image(INode node, DataOutput out, - boolean writeUnderConstruction, ReferenceMap referenceMap) - throws IOException { - if (node.isReference()) { - writeINodeReference(node.asReference(), out, writeUnderConstruction, - referenceMap); - } else if (node.isDirectory()) { - writeINodeDirectory(node.asDirectory(), out); - } else if (node.isSymlink()) { - writeINodeSymlink(node.asSymlink(), out); - } else if (node.isFile()) { - writeINodeFile(node.asFile(), out, writeUnderConstruction); - } - } - // This should be reverted to package private once the ImageLoader // code is moved into this package. This method should not be called // by other code. @@ -429,12 +226,6 @@ public class FSImageSerialization { in.readFully(createdNodeName); return createdNodeName; } - - private static void writeLocalName(INodeAttributes inode, DataOutput out) - throws IOException { - final byte[] name = inode.getLocalNameBytes(); - writeBytes(name, out); - } public static void writeBytes(byte[] data, DataOutput out) throws IOException { Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1576513&r1=1576512&r2=1576513&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Tue Mar 11 21:44:38 2014 @@ -6023,42 +6023,6 @@ public class FSNamesystem implements Nam } /** - * Serializes leases. - */ - void saveFilesUnderConstruction(DataOutputStream out, - Map snapshotUCMap) throws IOException { - // This is run by an inferior thread of saveNamespace, which holds a read - // lock on our behalf. If we took the read lock here, we could block - // for fairness if a writer is waiting on the lock. - synchronized (leaseManager) { - Map nodes = leaseManager.getINodesUnderConstruction(); - for (Map.Entry entry : nodes.entrySet()) { - // TODO: for HDFS-5428, because of rename operations, some - // under-construction files that are - // in the current fs directory can also be captured in the - // snapshotUCMap. We should remove them from the snapshotUCMap. - snapshotUCMap.remove(entry.getValue().getId()); - } - - out.writeInt(nodes.size() + snapshotUCMap.size()); // write the size - for (Map.Entry entry : nodes.entrySet()) { - FSImageSerialization.writeINodeUnderConstruction( - out, entry.getValue(), entry.getKey()); - } - for (Map.Entry entry : snapshotUCMap.entrySet()) { - // for those snapshot INodeFileUC, we use "/.reserved/.inodes/" - // as their paths - StringBuilder b = new StringBuilder(); - b.append(FSDirectory.DOT_RESERVED_PATH_PREFIX) - .append(Path.SEPARATOR).append(FSDirectory.DOT_INODES_STRING) - .append(Path.SEPARATOR).append(entry.getValue().getId()); - FSImageSerialization.writeINodeUnderConstruction( - out, entry.getValue(), b.toString()); - } - } - } - - /** * @return all the under-construction files in the lease map */ Map getFilesUnderConstruction() { @@ -6333,15 +6297,6 @@ public class FSNamesystem implements Nam } getEditLog().logSync(); } - - /** - * @param out save state of the secret manager - * @param sdPath String storage directory path - */ - void saveSecretManagerStateCompat(DataOutputStream out, String sdPath) - throws IOException { - dtSecretManager.saveSecretManagerStateCompat(out, sdPath); - } SecretManagerState saveSecretManagerState() { return dtSecretManager.saveSecretManagerState(); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java?rev=1576513&r1=1576512&r2=1576513&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java Tue Mar 11 21:44:38 2014 @@ -17,17 +17,13 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; -import java.io.DataOutput; -import java.io.IOException; -import java.util.List; - +import com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INodeAttributes; import org.apache.hadoop.hdfs.server.namenode.Quota; -import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; -import com.google.common.base.Preconditions; +import java.util.List; /** * The difference of an inode between in two snapshots. @@ -133,11 +129,4 @@ abstract class AbstractINodeDiff created = getList(ListType.CREATED); - out.writeInt(created.size()); - for (INode node : created) { - // For INode in created list, we only need to record its local name - byte[] name = node.getLocalNameBytes(); - out.writeShort(name.length); - out.write(name); - } - } - - /** Serialize {@link #deleted} */ - private void writeDeleted(DataOutput out, - ReferenceMap referenceMap) throws IOException { - final List deleted = getList(ListType.DELETED); - out.writeInt(deleted.size()); - for (INode node : deleted) { - FSImageSerialization.saveINode2Image(node, out, true, referenceMap); - } - } - - /** Serialize to out */ - private void write(DataOutput out, ReferenceMap referenceMap - ) throws IOException { - writeCreated(out); - writeDeleted(out, referenceMap); - } - /** Get the list of INodeDirectory contained in the deleted list */ private void getDirsInDeleted(List dirList) { for (INode node : getList(ListType.DELETED)) { @@ -348,25 +315,6 @@ public class DirectoryWithSnapshotFeatur } @Override - void write(DataOutput out, ReferenceMap referenceMap) throws IOException { - writeSnapshot(out); - out.writeInt(childrenSize); - - // Write snapshotINode - out.writeBoolean(isSnapshotRoot); - if (!isSnapshotRoot) { - if (snapshotINode != null) { - out.writeBoolean(true); - FSImageSerialization.writeINodeDirectoryAttributes(snapshotINode, out); - } else { - out.writeBoolean(false); - } - } - // Write diff. Node need to write poseriorDiff, since diffs is a list. - diff.write(out, referenceMap); - } - - @Override Quota.Counts destroyDiffAndCollectBlocks(INodeDirectory currentINode, BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { // this diff has been deleted Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java?rev=1576513&r1=1576512&r2=1576513&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java Tue Mar 11 21:44:38 2014 @@ -17,17 +17,13 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; -import java.io.DataOutput; -import java.io.IOException; -import java.util.List; - -import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; import org.apache.hadoop.hdfs.server.namenode.Quota; -import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; + +import java.util.List; /** * The difference of an {@link INodeFile} between two snapshots. @@ -71,20 +67,6 @@ public class FileDiff extends } @Override - void write(DataOutput out, ReferenceMap referenceMap) throws IOException { - writeSnapshot(out); - out.writeLong(fileSize); - - // write snapshotINode - if (snapshotINode != null) { - out.writeBoolean(true); - FSImageSerialization.writeINodeFileAttributes(snapshotINode, out); - } else { - out.writeBoolean(false); - } - } - - @Override Quota.Counts destroyDiffAndCollectBlocks(INodeFile currentINode, BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { return currentINode.getFileWithSnapshotFeature() Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java?rev=1576513&r1=1576512&r2=1576513&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java Tue Mar 11 21:44:38 2014 @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot; import java.io.DataInput; -import java.io.DataOutput; import java.io.IOException; import java.text.SimpleDateFormat; import java.util.Arrays; @@ -31,7 +30,6 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.AclFeature; import org.apache.hadoop.hdfs.server.namenode.FSImageFormat; -import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.util.ReadOnlyList; @@ -216,11 +214,4 @@ public class Snapshot implements Compara public String toString() { return getClass().getSimpleName() + "." + root.getLocalName() + "(id=" + id + ")"; } - - /** Serialize the fields to out */ - void write(DataOutput out) throws IOException { - out.writeInt(id); - // write root - FSImageSerialization.writeINodeDirectory(root, out); - } } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java?rev=1576513&r1=1576512&r2=1576513&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java Tue Mar 11 21:44:38 2014 @@ -29,75 +29,21 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.namenode.FSImageFormat; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.INode; -import org.apache.hadoop.hdfs.server.namenode.INodeAttributes; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes; -import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList; import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff; import org.apache.hadoop.hdfs.util.Diff.ListType; -import org.apache.hadoop.hdfs.util.ReadOnlyList; +import org.apache.hadoop.hdfs.server.namenode.FSImageFormat.Loader; /** * A helper class defining static methods for reading/writing snapshot related * information from/to FSImage. */ public class SnapshotFSImageFormat { - /** - * Save snapshots and snapshot quota for a snapshottable directory. - * @param current The directory that the snapshots belongs to. - * @param out The {@link DataOutput} to write. - * @throws IOException - */ - public static void saveSnapshots(INodeDirectorySnapshottable current, - DataOutput out) throws IOException { - // list of snapshots in snapshotsByNames - ReadOnlyList snapshots = current.getSnapshotsByNames(); - out.writeInt(snapshots.size()); - for (Snapshot s : snapshots) { - // write the snapshot id - out.writeInt(s.getId()); - } - // snapshot quota - out.writeInt(current.getSnapshotQuota()); - } - - /** - * Save SnapshotDiff list for an INodeDirectoryWithSnapshot. - * @param sNode The directory that the SnapshotDiff list belongs to. - * @param out The {@link DataOutput} to write. - */ - private static > - void saveINodeDiffs(final AbstractINodeDiffList diffs, - final DataOutput out, ReferenceMap referenceMap) throws IOException { - // Record the diffs in reversed order, so that we can find the correct - // reference for INodes in the created list when loading the FSImage - if (diffs == null) { - out.writeInt(-1); // no diffs - } else { - final List list = diffs.asList(); - final int size = list.size(); - out.writeInt(size); - for (int i = size - 1; i >= 0; i--) { - list.get(i).write(out, referenceMap); - } - } - } - - public static void saveDirectoryDiffList(final INodeDirectory dir, - final DataOutput out, final ReferenceMap referenceMap - ) throws IOException { - saveINodeDiffs(dir.getDiffs(), out, referenceMap); - } - - public static void saveFileDiffList(final INodeFile file, - final DataOutput out) throws IOException { - saveINodeDiffs(file.getDiffs(), out, null); - } - public static FileDiffList loadFileDiffList(DataInput in, FSImageFormat.Loader loader) throws IOException { final int size = in.readInt(); @@ -250,12 +196,12 @@ public class SnapshotFSImageFormat { } } } - + /** * Load the snapshotINode field of {@link AbstractINodeDiff}. * @param snapshot The Snapshot associated with the {@link AbstractINodeDiff}. * @param in The {@link DataInput} to read. - * @param loader The {@link Loader} instance that this loading procedure is + * @param loader The {@link Loader} instance that this loading procedure is * using. * @return The snapshotINode. */ @@ -318,23 +264,6 @@ public class SnapshotFSImageFormat { * Used to record whether the subtree of the reference node has been saved */ private final Map dirMap = new HashMap(); - - public void writeINodeReferenceWithCount( - INodeReference.WithCount withCount, DataOutput out, - boolean writeUnderConstruction) throws IOException { - final INode referred = withCount.getReferredINode(); - final long id = withCount.getId(); - final boolean firstReferred = !referenceMap.containsKey(id); - out.writeBoolean(firstReferred); - - if (firstReferred) { - FSImageSerialization.saveINode2Image(referred, out, - writeUnderConstruction, this); - referenceMap.put(id, withCount); - } else { - out.writeLong(id); - } - } public boolean toProcessSubtree(long id) { if (dirMap.containsKey(id)) { Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java?rev=1576513&r1=1576512&r2=1576513&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java Tue Mar 11 21:44:38 2014 @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot; import java.io.DataInput; -import java.io.DataOutput; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -286,22 +285,6 @@ public class SnapshotManager implements return snapshottables.values().toArray( new INodeDirectorySnapshottable[snapshottables.size()]); } - - /** - * Write {@link #snapshotCounter}, {@link #numSnapshots}, - * and all snapshots to the DataOutput. - */ - public void write(DataOutput out) throws IOException { - out.writeInt(snapshotCounter); - out.writeInt(numSnapshots.get()); - - // write all snapshots. - for(INodeDirectorySnapshottable snapshottableDir : snapshottables.values()) { - for(Snapshot s : snapshottableDir.getSnapshotsByNames()) { - s.write(out); - } - } - } /** * Read values of {@link #snapshotCounter}, {@link #numSnapshots}, and