Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 0AFA517CAA for ; Mon, 4 May 2015 17:57:40 +0000 (UTC) Received: (qmail 47596 invoked by uid 500); 4 May 2015 17:57:35 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 47417 invoked by uid 500); 4 May 2015 17:57:35 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 46724 invoked by uid 99); 4 May 2015 17:57:35 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 04 May 2015 17:57:35 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 6D9FDE0986; Mon, 4 May 2015 17:57:35 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: zhz@apache.org To: common-commits@hadoop.apache.org Date: Mon, 04 May 2015 17:57:45 -0000 Message-Id: <77e6c66a559746a295c67d05865c141a@git.apache.org> In-Reply-To: <44a3ac286cef4701bdb393c1dbfa1862@git.apache.org> References: <44a3ac286cef4701bdb393c1dbfa1862@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [11/50] hadoop git commit: HDFS-8104 Make hard-coded values consistent with the system default schema first before remove them. Contributed by Kai Zheng HDFS-8104 Make hard-coded values consistent with the system default schema first before remove them. Contributed by Kai Zheng Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/515deb9e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/515deb9e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/515deb9e Branch: refs/heads/HDFS-7285 Commit: 515deb9e72f670d05bf69da7f96ab715e1783d02 Parents: 3e4c3dd Author: Kai Zheng Authored: Fri Apr 10 00:16:28 2015 +0800 Committer: Zhe Zhang Committed: Mon May 4 10:13:17 2015 -0700 ---------------------------------------------------------------------- .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 4 +- .../hadoop/hdfs/TestPlanReadPortions.java | 142 +++++++++++++++++++ .../apache/hadoop/hdfs/TestReadStripedFile.java | 112 --------------- 3 files changed, 145 insertions(+), 113 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/515deb9e/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt index 5078a15..1e695c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt @@ -54,4 +54,6 @@ HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from NameNode (vinayakumarb) - HDFS-8074. Define a system-wide default EC schema. (Kai Zheng) \ No newline at end of file + HDFS-8074. Define a system-wide default EC schema. (Kai Zheng) + + HDFS-8104. Make hard-coded values consistent with the system default schema first before remove them. (Kai Zheng) \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/515deb9e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java new file mode 100644 index 0000000..cf84b30 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java @@ -0,0 +1,142 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.junit.Test; + +import static org.apache.hadoop.hdfs.DFSStripedInputStream.ReadPortion; +import static org.junit.Assert.*; + +public class TestPlanReadPortions { + + // We only support this as num of data blocks. It might be good enough for now + // for the purpose, even not flexible yet for any number in a schema. + private final short GROUP_SIZE = 3; + private final int CELLSIZE = 128 * 1024; + + private void testPlanReadPortions(int startInBlk, int length, + int bufferOffset, int[] readLengths, int[] offsetsInBlock, + int[][] bufferOffsets, int[][] bufferLengths) { + ReadPortion[] results = DFSStripedInputStream.planReadPortions(GROUP_SIZE, + CELLSIZE, startInBlk, length, bufferOffset); + assertEquals(GROUP_SIZE, results.length); + + for (int i = 0; i < GROUP_SIZE; i++) { + assertEquals(readLengths[i], results[i].getReadLength()); + assertEquals(offsetsInBlock[i], results[i].getStartOffsetInBlock()); + final int[] bOffsets = results[i].getOffsets(); + assertArrayEquals(bufferOffsets[i], bOffsets); + final int[] bLengths = results[i].getLengths(); + assertArrayEquals(bufferLengths[i], bLengths); + } + } + + /** + * Test {@link DFSStripedInputStream#planReadPortions} + */ + @Test + public void testPlanReadPortions() { + /** + * start block offset is 0, read cellSize - 10 + */ + testPlanReadPortions(0, CELLSIZE - 10, 0, + new int[]{CELLSIZE - 10, 0, 0}, new int[]{0, 0, 0}, + new int[][]{new int[]{0}, new int[]{}, new int[]{}}, + new int[][]{new int[]{CELLSIZE - 10}, new int[]{}, new int[]{}}); + + /** + * start block offset is 0, read 3 * cellSize + */ + testPlanReadPortions(0, GROUP_SIZE * CELLSIZE, 0, + new int[]{CELLSIZE, CELLSIZE, CELLSIZE}, new int[]{0, 0, 0}, + new int[][]{new int[]{0}, new int[]{CELLSIZE}, new int[]{CELLSIZE * 2}}, + new int[][]{new int[]{CELLSIZE}, new int[]{CELLSIZE}, new int[]{CELLSIZE}}); + + /** + * start block offset is 0, read cellSize + 10 + */ + testPlanReadPortions(0, CELLSIZE + 10, 0, + new int[]{CELLSIZE, 10, 0}, new int[]{0, 0, 0}, + new int[][]{new int[]{0}, new int[]{CELLSIZE}, new int[]{}}, + new int[][]{new int[]{CELLSIZE}, new int[]{10}, new int[]{}}); + + /** + * start block offset is 0, read 5 * cellSize + 10, buffer start offset is 100 + */ + testPlanReadPortions(0, 5 * CELLSIZE + 10, 100, + new int[]{CELLSIZE * 2, CELLSIZE * 2, CELLSIZE + 10}, new int[]{0, 0, 0}, + new int[][]{new int[]{100, 100 + CELLSIZE * GROUP_SIZE}, + new int[]{100 + CELLSIZE, 100 + CELLSIZE * 4}, + new int[]{100 + CELLSIZE * 2, 100 + CELLSIZE * 5}}, + new int[][]{new int[]{CELLSIZE, CELLSIZE}, + new int[]{CELLSIZE, CELLSIZE}, + new int[]{CELLSIZE, 10}}); + + /** + * start block offset is 2, read 3 * cellSize + */ + testPlanReadPortions(2, GROUP_SIZE * CELLSIZE, 100, + new int[]{CELLSIZE, CELLSIZE, CELLSIZE}, + new int[]{2, 0, 0}, + new int[][]{new int[]{100, 100 + GROUP_SIZE * CELLSIZE - 2}, + new int[]{100 + CELLSIZE - 2}, + new int[]{100 + CELLSIZE * 2 - 2}}, + new int[][]{new int[]{CELLSIZE - 2, 2}, + new int[]{CELLSIZE}, + new int[]{CELLSIZE}}); + + /** + * start block offset is 2, read 3 * cellSize + 10 + */ + testPlanReadPortions(2, GROUP_SIZE * CELLSIZE + 10, 0, + new int[]{CELLSIZE + 10, CELLSIZE, CELLSIZE}, + new int[]{2, 0, 0}, + new int[][]{new int[]{0, GROUP_SIZE * CELLSIZE - 2}, + new int[]{CELLSIZE - 2}, + new int[]{CELLSIZE * 2 - 2}}, + new int[][]{new int[]{CELLSIZE - 2, 12}, + new int[]{CELLSIZE}, + new int[]{CELLSIZE}}); + + /** + * start block offset is cellSize * 2 - 1, read 5 * cellSize + 10 + */ + testPlanReadPortions(CELLSIZE * 2 - 1, 5 * CELLSIZE + 10, 0, + new int[]{CELLSIZE * 2, CELLSIZE + 10, CELLSIZE * 2}, + new int[]{CELLSIZE, CELLSIZE - 1, 0}, + new int[][]{new int[]{CELLSIZE + 1, 4 * CELLSIZE + 1}, + new int[]{0, 2 * CELLSIZE + 1, 5 * CELLSIZE + 1}, + new int[]{1, 3 * CELLSIZE + 1}}, + new int[][]{new int[]{CELLSIZE, CELLSIZE}, + new int[]{1, CELLSIZE, 9}, + new int[]{CELLSIZE, CELLSIZE}}); + + /** + * start block offset is cellSize * 6 - 1, read 7 * cellSize + 10 + */ + testPlanReadPortions(CELLSIZE * 6 - 1, 7 * CELLSIZE + 10, 0, + new int[]{CELLSIZE * 3, CELLSIZE * 2 + 9, CELLSIZE * 2 + 1}, + new int[]{CELLSIZE * 2, CELLSIZE * 2, CELLSIZE * 2 - 1}, + new int[][]{new int[]{1, 3 * CELLSIZE + 1, 6 * CELLSIZE + 1}, + new int[]{CELLSIZE + 1, 4 * CELLSIZE + 1, 7 * CELLSIZE + 1}, + new int[]{0, 2 * CELLSIZE + 1, 5 * CELLSIZE + 1}}, + new int[][]{new int[]{CELLSIZE, CELLSIZE, CELLSIZE}, + new int[]{CELLSIZE, CELLSIZE, 9}, + new int[]{1, CELLSIZE, CELLSIZE}}); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/515deb9e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFile.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFile.java index 0032bdd..849e12e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFile.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; -import static org.apache.hadoop.hdfs.DFSStripedInputStream.ReadPortion; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -78,117 +77,6 @@ public class TestReadStripedFile { } } - private void testPlanReadPortions(int startInBlk, int length, - int bufferOffset, int[] readLengths, int[] offsetsInBlock, - int[][] bufferOffsets, int[][] bufferLengths) { - ReadPortion[] results = DFSStripedInputStream.planReadPortions(GROUP_SIZE, - CELLSIZE, startInBlk, length, bufferOffset); - assertEquals(GROUP_SIZE, results.length); - - for (int i = 0; i < GROUP_SIZE; i++) { - assertEquals(readLengths[i], results[i].getReadLength()); - assertEquals(offsetsInBlock[i], results[i].getStartOffsetInBlock()); - final int[] bOffsets = results[i].getOffsets(); - assertArrayEquals(bufferOffsets[i], bOffsets); - final int[] bLengths = results[i].getLengths(); - assertArrayEquals(bufferLengths[i], bLengths); - } - } - - /** - * Test {@link DFSStripedInputStream#planReadPortions} - */ - @Test - public void testPlanReadPortions() { - /** - * start block offset is 0, read cellSize - 10 - */ - testPlanReadPortions(0, CELLSIZE - 10, 0, - new int[]{CELLSIZE - 10, 0, 0}, new int[]{0, 0, 0}, - new int[][]{new int[]{0}, new int[]{}, new int[]{}}, - new int[][]{new int[]{CELLSIZE - 10}, new int[]{}, new int[]{}}); - - /** - * start block offset is 0, read 3 * cellSize - */ - testPlanReadPortions(0, GROUP_SIZE * CELLSIZE, 0, - new int[]{CELLSIZE, CELLSIZE, CELLSIZE}, new int[]{0, 0, 0}, - new int[][]{new int[]{0}, new int[]{CELLSIZE}, new int[]{CELLSIZE * 2}}, - new int[][]{new int[]{CELLSIZE}, new int[]{CELLSIZE}, new int[]{CELLSIZE}}); - - /** - * start block offset is 0, read cellSize + 10 - */ - testPlanReadPortions(0, CELLSIZE + 10, 0, - new int[]{CELLSIZE, 10, 0}, new int[]{0, 0, 0}, - new int[][]{new int[]{0}, new int[]{CELLSIZE}, new int[]{}}, - new int[][]{new int[]{CELLSIZE}, new int[]{10}, new int[]{}}); - - /** - * start block offset is 0, read 5 * cellSize + 10, buffer start offset is 100 - */ - testPlanReadPortions(0, 5 * CELLSIZE + 10, 100, - new int[]{CELLSIZE * 2, CELLSIZE * 2, CELLSIZE + 10}, new int[]{0, 0, 0}, - new int[][]{new int[]{100, 100 + CELLSIZE * GROUP_SIZE}, - new int[]{100 + CELLSIZE, 100 + CELLSIZE * 4}, - new int[]{100 + CELLSIZE * 2, 100 + CELLSIZE * 5}}, - new int[][]{new int[]{CELLSIZE, CELLSIZE}, - new int[]{CELLSIZE, CELLSIZE}, - new int[]{CELLSIZE, 10}}); - - /** - * start block offset is 2, read 3 * cellSize - */ - testPlanReadPortions(2, GROUP_SIZE * CELLSIZE, 100, - new int[]{CELLSIZE, CELLSIZE, CELLSIZE}, - new int[]{2, 0, 0}, - new int[][]{new int[]{100, 100 + GROUP_SIZE * CELLSIZE - 2}, - new int[]{100 + CELLSIZE - 2}, - new int[]{100 + CELLSIZE * 2 - 2}}, - new int[][]{new int[]{CELLSIZE - 2, 2}, - new int[]{CELLSIZE}, - new int[]{CELLSIZE}}); - - /** - * start block offset is 2, read 3 * cellSize + 10 - */ - testPlanReadPortions(2, GROUP_SIZE * CELLSIZE + 10, 0, - new int[]{CELLSIZE + 10, CELLSIZE, CELLSIZE}, - new int[]{2, 0, 0}, - new int[][]{new int[]{0, GROUP_SIZE * CELLSIZE - 2}, - new int[]{CELLSIZE - 2}, - new int[]{CELLSIZE * 2 - 2}}, - new int[][]{new int[]{CELLSIZE - 2, 12}, - new int[]{CELLSIZE}, - new int[]{CELLSIZE}}); - - /** - * start block offset is cellSize * 2 - 1, read 5 * cellSize + 10 - */ - testPlanReadPortions(CELLSIZE * 2 - 1, 5 * CELLSIZE + 10, 0, - new int[]{CELLSIZE * 2, CELLSIZE + 10, CELLSIZE * 2}, - new int[]{CELLSIZE, CELLSIZE - 1, 0}, - new int[][]{new int[]{CELLSIZE + 1, 4 * CELLSIZE + 1}, - new int[]{0, 2 * CELLSIZE + 1, 5 * CELLSIZE + 1}, - new int[]{1, 3 * CELLSIZE + 1}}, - new int[][]{new int[]{CELLSIZE, CELLSIZE}, - new int[]{1, CELLSIZE, 9}, - new int[]{CELLSIZE, CELLSIZE}}); - - /** - * start block offset is cellSize * 6 - 1, read 7 * cellSize + 10 - */ - testPlanReadPortions(CELLSIZE * 6 - 1, 7 * CELLSIZE + 10, 0, - new int[]{CELLSIZE * 3, CELLSIZE * 2 + 9, CELLSIZE * 2 + 1}, - new int[]{CELLSIZE * 2, CELLSIZE * 2, CELLSIZE * 2 - 1}, - new int[][]{new int[]{1, 3 * CELLSIZE + 1, 6 * CELLSIZE + 1}, - new int[]{CELLSIZE + 1, 4 * CELLSIZE + 1, 7 * CELLSIZE + 1}, - new int[]{0, 2 * CELLSIZE + 1, 5 * CELLSIZE + 1}}, - new int[][]{new int[]{CELLSIZE, CELLSIZE, CELLSIZE}, - new int[]{CELLSIZE, CELLSIZE, 9}, - new int[]{1, CELLSIZE, CELLSIZE}}); - } - private LocatedStripedBlock createDummyLocatedBlock() { final long blockGroupID = -1048576; DatanodeInfo[] locs = new DatanodeInfo[TOTAL_SIZE];