Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 280AB17FC6 for ; Sat, 18 Apr 2015 01:11:14 +0000 (UTC) Received: (qmail 73362 invoked by uid 500); 18 Apr 2015 01:11:14 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 73292 invoked by uid 500); 18 Apr 2015 01:11:13 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 73283 invoked by uid 99); 18 Apr 2015 01:11:13 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Sat, 18 Apr 2015 01:11:13 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id B56F0DFF8A; Sat, 18 Apr 2015 01:11:13 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: jing9@apache.org To: common-commits@hadoop.apache.org Message-Id: X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-7937. Erasure Coding: INodeFile quota computation unit tests. Contributed by Kai Sasaki. Date: Sat, 18 Apr 2015 01:11:13 +0000 (UTC) Repository: hadoop Updated Branches: refs/heads/HDFS-7285 81fcf75a3 -> 64adbc02e HDFS-7937. Erasure Coding: INodeFile quota computation unit tests. Contributed by Kai Sasaki. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64adbc02 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64adbc02 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64adbc02 Branch: refs/heads/HDFS-7285 Commit: 64adbc02ef9de8a1509ba70e4075d2613ee296c7 Parents: 81fcf75 Author: Jing Zhao Authored: Fri Apr 17 18:07:07 2015 -0700 Committer: Jing Zhao Committed: Fri Apr 17 18:07:07 2015 -0700 ---------------------------------------------------------------------- .../blockmanagement/BlockInfoStriped.java | 23 +- .../server/namenode/TestStripedINodeFile.java | 229 +++++++++++++++++++ 2 files changed, 250 insertions(+), 2 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/64adbc02/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java index 20b0c5c..9f2f5ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java @@ -203,8 +203,27 @@ public class BlockInfoStriped extends BlockInfo { // In case striped blocks, total usage by this striped blocks should // be the total of data blocks and parity blocks because // `getNumBytes` is the total of actual data block size. - return ((getNumBytes() - 1) / (dataBlockNum * BLOCK_STRIPED_CELL_SIZE) + 1) - * BLOCK_STRIPED_CELL_SIZE * parityBlockNum + getNumBytes(); + + // 0. Calculate the total bytes per stripes + long numBytesPerStripe = dataBlockNum * BLOCK_STRIPED_CELL_SIZE; + if (getNumBytes() % numBytesPerStripe == 0) { + return getNumBytes() / dataBlockNum * getTotalBlockNum(); + } + // 1. Calculate the number of stripes in this block group. + long numStripes = (getNumBytes() - 1) / numBytesPerStripe + 1; + // 2. Calculate the parity cell length in the last stripe. Note that the + // size of parity cells should equal the size of the first cell, if it + // is not full. + long lastStripeParityCellLen = Math.min(getNumBytes() % numBytesPerStripe, + BLOCK_STRIPED_CELL_SIZE); + // 3. Total consumed space is the total of + // - The total of the full cells of data blocks and parity blocks. + // - The remaining of data block which does not make a stripe. + // - The last parity block cells. These size should be same + // to the first cell in this stripe. + return getTotalBlockNum() * (BLOCK_STRIPED_CELL_SIZE * (numStripes - 1)) + + getNumBytes() % numBytesPerStripe + + lastStripeParityCellLen * parityBlockNum; } @Override http://git-wip-us.apache.org/repos/asf/hadoop/blob/64adbc02/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java new file mode 100644 index 0000000..d251c30 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java @@ -0,0 +1,229 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; +import org.apache.hadoop.io.erasurecode.ECSchema; + +import org.junit.Test; + +/** + * This class tests INodeFile with striped feature. + */ +public class TestStripedINodeFile { + public static final Log LOG = LogFactory.getLog(TestINodeFile.class); + + private static final PermissionStatus perm = new PermissionStatus( + "userName", null, FsPermission.getDefault()); + + private static INodeFile createStripedINodeFile() { + return new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, + null, (short)0, 1024L, HdfsConstants.COLD_STORAGE_POLICY_ID); + } + + @Test + public void testBlockStripedFeature() + throws IOException, InterruptedException{ + INodeFile inf = createStripedINodeFile(); + inf.addStripedBlocksFeature(); + assertTrue(inf.isStriped()); + } + + @Test + public void testBlockStripedTotalBlockCount() { + ECSchema defaultSchema = ECSchemaManager.getSystemDefaultSchema(); + Block blk = new Block(1); + BlockInfoStriped blockInfoStriped + = new BlockInfoStriped(blk, + (short)defaultSchema.getNumDataUnits(), + (short)defaultSchema.getNumParityUnits()); + assertEquals(9, blockInfoStriped.getTotalBlockNum()); + } + + @Test + public void testBlockStripedLength() + throws IOException, InterruptedException { + ECSchema defaultSchema = ECSchemaManager.getSystemDefaultSchema(); + INodeFile inf = createStripedINodeFile(); + inf.addStripedBlocksFeature(); + Block blk = new Block(1); + BlockInfoStriped blockInfoStriped + = new BlockInfoStriped(blk, + (short)defaultSchema.getNumDataUnits(), + (short)defaultSchema.getNumParityUnits()); + inf.addBlock(blockInfoStriped); + assertEquals(1, inf.getBlocks().length); + } + + @Test + public void testBlockStripedConsumedSpace() + throws IOException, InterruptedException { + ECSchema defaultSchema = ECSchemaManager.getSystemDefaultSchema(); + INodeFile inf = createStripedINodeFile(); + inf.addStripedBlocksFeature(); + Block blk = new Block(1); + BlockInfoStriped blockInfoStriped + = new BlockInfoStriped(blk, + (short)defaultSchema.getNumDataUnits(), + (short)defaultSchema.getNumParityUnits()); + blockInfoStriped.setNumBytes(1); + inf.addBlock(blockInfoStriped); + // 0. Calculate the total bytes per stripes + // 1. Calculate the number of stripes in this block group. + // 2. Calculate the last remaining length which does not make a stripe. + // 3. Total consumed space is the total of + // a. The total of the full cells of data blocks and parity blocks. + // b. The remaining of data block which does not make a stripe. + // c. The last parity block cells. These size should be same + // to the first cell in this stripe. + // So the total consumed space is the sum of + // a. * ( - 1) * = 0 + // b. % = 1 + // c. * = 1 * 3 + assertEquals(4, inf.storagespaceConsumedWithStriped()); + assertEquals(4, inf.storagespaceConsumed()); + } + + @Test + public void testMultipleBlockStripedConsumedSpace() + throws IOException, InterruptedException { + ECSchema defaultSchema = ECSchemaManager.getSystemDefaultSchema(); + INodeFile inf = createStripedINodeFile(); + inf.addStripedBlocksFeature(); + Block blk1 = new Block(1); + BlockInfoStriped blockInfoStriped1 + = new BlockInfoStriped(blk1, + (short)defaultSchema.getNumDataUnits(), + (short)defaultSchema.getNumParityUnits()); + blockInfoStriped1.setNumBytes(1); + Block blk2 = new Block(2); + BlockInfoStriped blockInfoStriped2 + = new BlockInfoStriped(blk2, + (short)defaultSchema.getNumDataUnits(), + (short)defaultSchema.getNumParityUnits()); + blockInfoStriped2.setNumBytes(1); + inf.addBlock(blockInfoStriped1); + inf.addBlock(blockInfoStriped2); + // This is the double size of one block in above case. + assertEquals(4 * 2, inf.storagespaceConsumedWithStriped()); + assertEquals(4 * 2, inf.storagespaceConsumed()); + } + + @Test + public void testBlockStripedFileSize() + throws IOException, InterruptedException { + ECSchema defaultSchema = ECSchemaManager.getSystemDefaultSchema(); + INodeFile inf = createStripedINodeFile(); + inf.addStripedBlocksFeature(); + Block blk = new Block(1); + BlockInfoStriped blockInfoStriped + = new BlockInfoStriped(blk, + (short)defaultSchema.getNumDataUnits(), + (short)defaultSchema.getNumParityUnits()); + blockInfoStriped.setNumBytes(100); + inf.addBlock(blockInfoStriped); + // Compute file size should return actual data + // size which is retained by this file. + assertEquals(100, inf.computeFileSize()); + assertEquals(100, inf.computeFileSize(false, false)); + } + + @Test + public void testBlockStripedUCFileSize() + throws IOException, InterruptedException { + ECSchema defaultSchema = ECSchemaManager.getSystemDefaultSchema(); + INodeFile inf = createStripedINodeFile(); + inf.addStripedBlocksFeature(); + Block blk = new Block(1); + BlockInfoStripedUnderConstruction bInfoStripedUC + = new BlockInfoStripedUnderConstruction(blk, + (short)defaultSchema.getNumDataUnits(), + (short)defaultSchema.getNumParityUnits()); + bInfoStripedUC.setNumBytes(100); + inf.addBlock(bInfoStripedUC); + assertEquals(100, inf.computeFileSize()); + assertEquals(0, inf.computeFileSize(false, false)); + } + + @Test + public void testBlockStripedComputeQuotaUsage() + throws IOException, InterruptedException { + ECSchema defaultSchema = ECSchemaManager.getSystemDefaultSchema(); + INodeFile inf = createStripedINodeFile(); + inf.addStripedBlocksFeature(); + Block blk = new Block(1); + BlockInfoStriped blockInfoStriped + = new BlockInfoStriped(blk, + (short)defaultSchema.getNumDataUnits(), + (short)defaultSchema.getNumParityUnits()); + blockInfoStriped.setNumBytes(100); + inf.addBlock(blockInfoStriped); + + BlockStoragePolicySuite suite = + BlockStoragePolicySuite.createDefaultSuite(); + QuotaCounts counts = + inf.computeQuotaUsageWithStriped(suite, + new QuotaCounts.Builder().build()); + assertEquals(1, counts.getNameSpace()); + // The total consumed space is the sum of + // a. * ( - 1) * = 0 + // b. % = 100 + // c. * = 100 * 3 + assertEquals(400, counts.getStorageSpace()); + } + + @Test + public void testBlockStripedUCComputeQuotaUsage() + throws IOException, InterruptedException { + ECSchema defaultSchema = ECSchemaManager.getSystemDefaultSchema(); + INodeFile inf = createStripedINodeFile(); + inf.addStripedBlocksFeature(); + Block blk = new Block(1); + BlockInfoStripedUnderConstruction bInfoStripedUC + = new BlockInfoStripedUnderConstruction(blk, + (short)defaultSchema.getNumDataUnits(), + (short)defaultSchema.getNumParityUnits()); + bInfoStripedUC.setNumBytes(100); + inf.addBlock(bInfoStripedUC); + + BlockStoragePolicySuite suite + = BlockStoragePolicySuite.createDefaultSuite(); + QuotaCounts counts + = inf.computeQuotaUsageWithStriped(suite, + new QuotaCounts.Builder().build()); + assertEquals(1024, inf.getPreferredBlockSize()); + assertEquals(1, counts.getNameSpace()); + // Consumed space in the case of BlockInfoStripedUC can be calculated + // by using preferred block size. This is 1024 and total block num + // is 9(= 3 + 6). Consumed storage space should be 1024 * 9 = 9216. + assertEquals(9216, counts.getStorageSpace()); + } +}