hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vinayakum...@apache.org
Subject [16/50] hadoop git commit: HDFS-8559. Erasure Coding: fix non-protobuf fsimage for striped blocks. (Jing Zhao via yliu)
Date Fri, 14 Aug 2015 10:54:30 GMT
HDFS-8559. Erasure Coding: fix non-protobuf fsimage for striped blocks. (Jing Zhao via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb0b12ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb0b12ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb0b12ce

Branch: refs/heads/HDFS-7285-REBASE
Commit: cb0b12ce603afc9ae6f50f7dfa505caa3149dc36
Parents: df589ab
Author: yliu <yliu@apache.org>
Authored: Sun Jun 14 15:39:19 2015 +0800
Committer: Vinayakumar B <vinayakumarb@apache.org>
Committed: Thu Aug 13 17:09:24 2015 +0530

----------------------------------------------------------------------
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt        |  3 +
 .../hdfs/server/namenode/FSImageFormat.java     | 58 +++------------
 .../server/namenode/FSImageSerialization.java   | 76 +++++---------------
 .../hdfs/server/namenode/TestFSImage.java       | 22 +-----
 4 files changed, 33 insertions(+), 126 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb0b12ce/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 2eb8259..1ae3e9b 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -299,3 +299,6 @@
 
     HDFS-8585. Erasure Coding: Remove dataBlockNum and parityBlockNum from
     StripedBlockProto. (Yi Liu via jing9)
+
+    HDFS-8559. Erasure Coding: fix non-protobuf fsimage for striped blocks.
+    (Jing Zhao via yliu)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb0b12ce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index 3083952..d9a74e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -365,12 +365,6 @@ public class FSImageFormat {
           long maxSequentialBlockId = in.readLong();
           namesystem.getBlockIdManager().setLastAllocatedContiguousBlockId(
               maxSequentialBlockId);
-          if (NameNodeLayoutVersion.supports(
-              NameNodeLayoutVersion.Feature.ERASURE_CODING, imgVersion)) {
-            final long maxStripedBlockId = in.readLong();
-            namesystem.getBlockIdManager().setLastAllocatedStripedBlockId(
-                maxStripedBlockId);
-          }
         } else {
 
           long startingGenStamp = namesystem.getBlockIdManager()
@@ -759,31 +753,16 @@ public class FSImageFormat {
       atime = in.readLong();
     }
     final long blockSize = in.readLong();
-    final boolean isStriped = NameNodeLayoutVersion.supports(
-            NameNodeLayoutVersion.Feature.ERASURE_CODING, imgVersion)
-            && (in.readBoolean());
     final int numBlocks = in.readInt();
-    // TODO: ECSchema can be restored from persisted file (HDFS-7859).
-    final ECSchema schema = isStriped ?
-        ErasureCodingSchemaManager.getSystemDefaultSchema() : null;
 
     if (numBlocks >= 0) {
       // file
       
       // read blocks
-      Block[] blocks;
-      if (isStriped) {
-        blocks = new Block[numBlocks];
-        for (int j = 0; j < numBlocks; j++) {
-          blocks[j] = new BlockInfoStriped(new Block(), schema);
-          blocks[j].readFields(in);
-        }
-      } else {
-        blocks = new BlockInfoContiguous[numBlocks];
-        for (int j = 0; j < numBlocks; j++) {
-          blocks[j] = new BlockInfoContiguous(replication);
-          blocks[j].readFields(in);
-        }
+      Block[] blocks = new BlockInfoContiguous[numBlocks];
+      for (int j = 0; j < numBlocks; j++) {
+        blocks[j] = new BlockInfoContiguous(replication);
+        blocks[j].readFields(in);
       }
 
       String clientName = "";
@@ -803,16 +782,8 @@ public class FSImageFormat {
             // convert the last block to BlockUC
             if (blocks.length > 0) {
               Block lastBlk = blocks[blocks.length - 1];
-              if (isStriped){
-                BlockInfoStriped lastStripedBlk = (BlockInfoStriped) lastBlk;
-                blocks[blocks.length - 1]
-                        = new BlockInfoStripedUnderConstruction(lastBlk,
-                                lastStripedBlk.getSchema());
-              } else {
-                blocks[blocks.length - 1]
-                        = new BlockInfoContiguousUnderConstruction(lastBlk,
-                                replication);
-              }
+              blocks[blocks.length - 1] =
+                  new BlockInfoContiguousUnderConstruction(lastBlk, replication);
             }
           }
         }
@@ -825,19 +796,9 @@ public class FSImageFormat {
         counter.increment();
       }
 
-      INodeFile file;
-      if (isStriped) {
-        file = new INodeFile(inodeId, localName, permissions, modificationTime,
-            atime, new BlockInfoContiguous[0], (short) 0, blockSize);
-        file.addStripedBlocksFeature();
-        for (Block block : blocks) {
-          file.getStripedBlocksFeature().addBlock((BlockInfoStriped) block);
-        }
-      } else {
-        file = new INodeFile(inodeId, localName, permissions,
-            modificationTime, atime, (BlockInfoContiguous[]) blocks,
-            replication, blockSize);
-      }
+      INodeFile file = new INodeFile(inodeId, localName, permissions,
+          modificationTime, atime, (BlockInfoContiguous[]) blocks,
+          replication, blockSize);
       if (underConstruction) {
         file.toUnderConstruction(clientName, clientMachine);
       }
@@ -1315,7 +1276,6 @@ public class FSImageFormat {
         out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV2());
         out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampAtblockIdSwitch());
         out.writeLong(sourceNamesystem.getBlockIdManager().getLastAllocatedContiguousBlockId());
-        out.writeLong(sourceNamesystem.getBlockIdManager().getLastAllocatedStripedBlockId());
         out.writeLong(context.getTxId());
         out.writeLong(sourceNamesystem.dir.getLastInodeId());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb0b12ce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
index 39c4038..414b6a7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
@@ -32,13 +32,10 @@ import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
@@ -51,7 +48,6 @@ import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.ShortWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.xml.sax.ContentHandler;
 import org.xml.sax.SAXException;
 
@@ -129,45 +125,22 @@ public class FSImageSerialization {
     short blockReplication = in.readShort();
     long modificationTime = in.readLong();
     long preferredBlockSize = in.readLong();
-    final boolean isStriped = NameNodeLayoutVersion.supports(
-        NameNodeLayoutVersion.Feature.ERASURE_CODING, imgVersion)
-        && (in.readBoolean());
-
-    // TODO: ECSchema can be restored from persisted file (HDFS-7859).
-    final ECSchema schema = isStriped ?
-        ErasureCodingSchemaManager.getSystemDefaultSchema() : null;
 
     int numBlocks = in.readInt();
 
-    final BlockInfoContiguous[] blocksContiguous;
-    BlockInfoStriped[] blocksStriped = null;
-    if (isStriped) {
-      blocksContiguous = new BlockInfoContiguous[0];
-      blocksStriped = new BlockInfoStriped[numBlocks];
-      int i = 0;
-      for (; i < numBlocks - 1; i++) {
-        blocksStriped[i] = new BlockInfoStriped(new Block(), schema);
-        blocksStriped[i].readFields(in);
-      }
-      if (numBlocks > 0) {
-        blocksStriped[i] = new BlockInfoStripedUnderConstruction(new Block(),
-            schema, BlockUCState.UNDER_CONSTRUCTION, null);
-        blocksStriped[i].readFields(in);
-      }
-    } else {
-      blocksContiguous = new BlockInfoContiguous[numBlocks];
-      Block blk = new Block();
-      int i = 0;
-      for (; i < numBlocks-1; i++) {
-        blk.readFields(in);
-        blocksContiguous[i] = new BlockInfoContiguous(blk, blockReplication);
-      }
-      // last block is UNDER_CONSTRUCTION
-      if(numBlocks > 0) {
-        blk.readFields(in);
-        blocksContiguous[i] = new BlockInfoContiguousUnderConstruction(
-                blk, blockReplication, BlockUCState.UNDER_CONSTRUCTION, null);
-      }
+    final BlockInfoContiguous[] blocksContiguous =
+        new BlockInfoContiguous[numBlocks];
+    Block blk = new Block();
+    int i = 0;
+    for (; i < numBlocks - 1; i++) {
+      blk.readFields(in);
+      blocksContiguous[i] = new BlockInfoContiguous(blk, blockReplication);
+    }
+    // last block is UNDER_CONSTRUCTION
+    if(numBlocks > 0) {
+      blk.readFields(in);
+      blocksContiguous[i] = new BlockInfoContiguousUnderConstruction(
+          blk, blockReplication, BlockUCState.UNDER_CONSTRUCTION, null);
     }
 
     PermissionStatus perm = PermissionStatus.read(in);
@@ -181,19 +154,8 @@ public class FSImageSerialization {
 
     // Images in the pre-protobuf format will not have the lazyPersist flag,
     // so it is safe to pass false always.
-    INodeFile file;
-    if (isStriped) {
-      file = new INodeFile(inodeId, name, perm, modificationTime,
-          modificationTime, blocksContiguous, (short) 0, preferredBlockSize);
-      file.addStripedBlocksFeature();
-      for (int i = 0; i < numBlocks; i++) {
-        file.getStripedBlocksFeature().addBlock(blocksStriped[i]);
-      }
-    } else {
-      file = new INodeFile(inodeId, name, perm, modificationTime,
-          modificationTime, blocksContiguous, blockReplication,
-          preferredBlockSize);
-    }
+    INodeFile file = new INodeFile(inodeId, name, perm, modificationTime,
+        modificationTime, blocksContiguous, blockReplication, preferredBlockSize);
     file.toUnderConstruction(clientName, clientMachine);
     return file;
   }
@@ -208,8 +170,7 @@ public class FSImageSerialization {
     out.writeShort(cons.getFileReplication());
     out.writeLong(cons.getModificationTime());
     out.writeLong(cons.getPreferredBlockSize());
-    // whether the file has striped blocks
-    out.writeBoolean(cons.isStriped());
+
     writeBlocks(cons.getBlocks(), out);
     cons.getPermissionStatus().write(out);
 
@@ -234,8 +195,7 @@ public class FSImageSerialization {
     out.writeLong(file.getModificationTime());
     out.writeLong(file.getAccessTime());
     out.writeLong(file.getPreferredBlockSize());
-    // whether the file has striped blocks
-    out.writeBoolean(file.isStriped());
+
     writeBlocks(file.getBlocks(), out);
     SnapshotFSImageFormat.saveFileDiffList(file, out);
 
@@ -348,7 +308,7 @@ public class FSImageSerialization {
     if (!isWithName) {
       Preconditions.checkState(ref instanceof INodeReference.DstReference);
       // dst snapshot id
-      out.writeInt(((INodeReference.DstReference) ref).getDstSnapshotId());
+      out.writeInt(ref.getDstSnapshotId());
     } else {
       out.writeInt(((INodeReference.WithName) ref).getLastSnapshotId());
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb0b12ce/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index e35ea74..5ccb86b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -244,27 +244,11 @@ public class TestFSImage {
         fileByLoaded.getPermissionStatus().getPermission());
     assertEquals(mtime, fileByLoaded.getModificationTime());
     assertEquals(isUC ? mtime : atime, fileByLoaded.getAccessTime());
-    assertEquals(0, fileByLoaded.getContiguousBlocks().length);
-    assertEquals(0, fileByLoaded.getFileReplication());
+    // TODO for striped blocks, we currently save and load them as contiguous
+    // blocks to/from legacy fsimage
+    assertEquals(3, fileByLoaded.getContiguousBlocks().length);
     assertEquals(preferredBlockSize, fileByLoaded.getPreferredBlockSize());
 
-    //check the BlockInfoStriped
-    BlockInfoStriped[] stripedBlksByLoaded =
-        fileByLoaded.getStripedBlocksFeature().getBlocks();
-    assertEquals(3, stripedBlksByLoaded.length);
-    for (int i = 0; i < 3; i++) {
-      assertEquals(stripedBlks[i].getBlockId(),
-          stripedBlksByLoaded[i].getBlockId());
-      assertEquals(stripedBlks[i].getNumBytes(),
-          stripedBlksByLoaded[i].getNumBytes());
-      assertEquals(stripedBlks[i].getGenerationStamp(),
-          stripedBlksByLoaded[i].getGenerationStamp());
-      assertEquals(stripedBlks[i].getDataBlockNum(),
-          stripedBlksByLoaded[i].getDataBlockNum());
-      assertEquals(stripedBlks[i].getParityBlockNum(),
-          stripedBlksByLoaded[i].getParityBlockNum());
-    }
-
     if (isUC) {
       assertEquals(client,
           fileByLoaded.getFileUnderConstructionFeature().getClientName());


Mime
View raw message