hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From z..@apache.org
Subject [06/23] hadoop git commit: HDFS-7285. Erasure Coding Support inside HDFS.
Date Wed, 12 Aug 2015 18:18:43 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc2564af/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
new file mode 100644
index 0000000..f4efbcf
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
@@ -0,0 +1,162 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.offlineImageViewer;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestOfflineImageViewerWithStripedBlocks {
+  private static int dataBlocks = HdfsConstants.NUM_DATA_BLOCKS;
+  private static int parityBlocks = HdfsConstants.NUM_PARITY_BLOCKS;
+
+  private static MiniDFSCluster cluster;
+  private static DistributedFileSystem fs;
+  private static final int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
+  private static final int stripesPerBlock = 3;
+  private static final int blockSize = cellSize * stripesPerBlock;
+
+  @BeforeClass
+  public static void setup() throws IOException {
+    int numDNs = dataBlocks + parityBlocks + 2;
+    Configuration conf = new Configuration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
+    cluster.waitActive();
+    cluster.getFileSystem().getClient().createErasureCodingZone("/", null, 0);
+    fs = cluster.getFileSystem();
+    Path eczone = new Path("/eczone");
+    fs.mkdirs(eczone);
+  }
+
+  @AfterClass
+  public static void tearDown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test(timeout = 60000)
+  public void testFileEqualToOneStripe() throws Exception {
+    int numBytes = cellSize;
+    testFileSize(numBytes);
+  }
+
+  @Test(timeout = 60000)
+  public void testFileLessThanOneStripe() throws Exception {
+    int numBytes = cellSize - 100;
+    testFileSize(numBytes);
+  }
+
+  @Test(timeout = 60000)
+  public void testFileHavingMultipleBlocks() throws Exception {
+    int numBytes = blockSize * 3;
+    testFileSize(numBytes);
+  }
+
+  @Test(timeout = 60000)
+  public void testFileLargerThanABlockGroup1() throws IOException {
+    testFileSize(blockSize * dataBlocks + cellSize + 123);
+  }
+
+  @Test(timeout = 60000)
+  public void testFileLargerThanABlockGroup2() throws IOException {
+    testFileSize(blockSize * dataBlocks * 3 + cellSize * dataBlocks + cellSize
+        + 123);
+  }
+
+  @Test(timeout = 60000)
+  public void testFileFullBlockGroup() throws IOException {
+    testFileSize(blockSize * dataBlocks);
+  }
+
+  @Test(timeout = 60000)
+  public void testFileMoreThanOneStripe() throws Exception {
+    int numBytes = blockSize + blockSize / 2;
+    testFileSize(numBytes);
+  }
+
+  private void testFileSize(int numBytes) throws IOException,
+      UnresolvedLinkException, SnapshotAccessControlException {
+    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+    File orgFsimage = null;
+    Path file = new Path("/eczone/striped");
+    FSDataOutputStream out = fs.create(file, true);
+    byte[] bytes = DFSTestUtil.generateSequentialBytes(0, numBytes);
+    out.write(bytes);
+    out.close();
+
+    // Write results to the fsimage file
+    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
+    fs.saveNamespace();
+
+    // Determine location of fsimage file
+    orgFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
+        .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
+    if (orgFsimage == null) {
+      throw new RuntimeException("Didn't generate or can't find fsimage");
+    }
+    FSImageLoader loader = FSImageLoader.load(orgFsimage.getAbsolutePath());
+    String fileStatus = loader.getFileStatus("/eczone/striped");
+    long expectedFileSize = bytes.length;
+
+    // Verify space consumed present in BlockInfoStriped
+    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
+    INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
+    assertTrue("Invalid block size", fileNode.getBlocks().length > 0);
+    long actualFileSize = 0;
+    for (BlockInfo blockInfo : fileNode.getBlocks()) {
+      assertTrue("Didn't find block striped information",
+          blockInfo instanceof BlockInfoStriped);
+      actualFileSize += blockInfo.getNumBytes();
+    }
+
+    assertEquals("Wrongly computed file size contains striped blocks",
+        expectedFileSize, actualFileSize);
+
+    // Verify space consumed present in filestatus
+    String EXPECTED_FILE_SIZE = "\"length\":"
+        + String.valueOf(expectedFileSize);
+    assertTrue(
+        "Wrongly computed file size contains striped blocks, file status:"
+            + fileStatus + ". Expected file size is : " + EXPECTED_FILE_SIZE,
+        fileStatus.contains(EXPECTED_FILE_SIZE));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc2564af/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
new file mode 100644
index 0000000..5d85073
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
@@ -0,0 +1,279 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.util;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
+import static org.apache.hadoop.hdfs.util.StripedBlockUtil.*;
+
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
+import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Random;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
+/**
+ * Need to cover the following combinations:
+ * 1. Block group size:
+ *  1.1 One byte
+ *  1.2 Smaller than cell
+ *  1.3 One full cell
+ *  1.4 x full cells, where x is smaller than number of data blocks
+ *  1.5 x full cells plus a partial cell
+ *  1.6 One full stripe
+ *  1.7 One full stripe plus a partial cell
+ *  1.8 One full stripe plus x full cells
+ *  1.9 One full stripe plus x full cells plus a partial cell
+ *  1.10 y full stripes, but smaller than full block group size
+ *  1.11 Full block group size
+ *
+ * 2. Byte range start
+ *  2.1 Zero
+ *  2.2 Within first cell
+ *  2.3 End of first cell
+ *  2.4 Start of a middle* cell in the first stripe (* neither first or last)
+ *  2.5 End of middle cell in the first stripe
+ *  2.6 Within a middle cell in the first stripe
+ *  2.7 Start of the last cell in the first stripe
+ *  2.8 Within the last cell in the first stripe
+ *  2.9 End of the last cell in the first stripe
+ *  2.10 Start of a middle stripe
+ *  2.11 Within a middle stripe
+ *  2.12 End of a middle stripe
+ *  2.13 Start of the last stripe
+ *  2.14 Within the last stripe
+ *  2.15 End of the last stripe (last byte)
+ *
+ * 3. Byte range length: same settings as block group size
+ *
+ * We should test in total 11 x 15 x 11 = 1815 combinations
+ * TODO: test parity block logic
+ */
+public class TestStripedBlockUtil {
+  private final short DATA_BLK_NUM = HdfsConstants.NUM_DATA_BLOCKS;
+  private final short PARITY_BLK_NUM = HdfsConstants.NUM_PARITY_BLOCKS;
+  private final short BLK_GROUP_WIDTH = DATA_BLK_NUM + PARITY_BLK_NUM;
+  private final int CELLSIZE = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
+  private final int FULL_STRIPE_SIZE = DATA_BLK_NUM * CELLSIZE;
+  /** number of full stripes in a full block group */
+  private final int BLK_GROUP_STRIPE_NUM = 16;
+  private final ECSchema SCEHMA = ErasureCodingSchemaManager.
+      getSystemDefaultSchema();
+  private final Random random = new Random();
+
+  private int[] blockGroupSizes;
+  private int[] byteRangeStartOffsets;
+  private int[] byteRangeSizes;
+
+  @Before
+  public void setup(){
+    blockGroupSizes = new int[]{1, getDelta(CELLSIZE), CELLSIZE,
+        getDelta(DATA_BLK_NUM) * CELLSIZE,
+        getDelta(DATA_BLK_NUM) * CELLSIZE + getDelta(CELLSIZE),
+        FULL_STRIPE_SIZE, FULL_STRIPE_SIZE + getDelta(CELLSIZE),
+        FULL_STRIPE_SIZE + getDelta(DATA_BLK_NUM) * CELLSIZE,
+        FULL_STRIPE_SIZE + getDelta(DATA_BLK_NUM) * CELLSIZE + getDelta(CELLSIZE),
+        getDelta(BLK_GROUP_STRIPE_NUM) * FULL_STRIPE_SIZE,
+        BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE};
+    byteRangeStartOffsets = new int[] {0, getDelta(CELLSIZE), CELLSIZE - 1};
+    byteRangeSizes = new int[]{1, getDelta(CELLSIZE), CELLSIZE,
+        getDelta(DATA_BLK_NUM) * CELLSIZE,
+        getDelta(DATA_BLK_NUM) * CELLSIZE + getDelta(CELLSIZE),
+        FULL_STRIPE_SIZE, FULL_STRIPE_SIZE + getDelta(CELLSIZE),
+        FULL_STRIPE_SIZE + getDelta(DATA_BLK_NUM) * CELLSIZE,
+        FULL_STRIPE_SIZE + getDelta(DATA_BLK_NUM) * CELLSIZE + getDelta(CELLSIZE),
+        getDelta(BLK_GROUP_STRIPE_NUM) * FULL_STRIPE_SIZE,
+        BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE};
+  }
+
+  private int getDelta(int size) {
+    return 1 + random.nextInt(size - 2);
+  }
+  private byte hashIntToByte(int i) {
+    int BYTE_MASK = 0xff;
+    return (byte) (((i + 13) * 29) & BYTE_MASK);
+  }
+
+  private LocatedStripedBlock createDummyLocatedBlock(int bgSize) {
+    final long blockGroupID = -1048576;
+    DatanodeInfo[] locs = new DatanodeInfo[BLK_GROUP_WIDTH];
+    String[] storageIDs = new String[BLK_GROUP_WIDTH];
+    StorageType[] storageTypes = new StorageType[BLK_GROUP_WIDTH];
+    int[] indices = new int[BLK_GROUP_WIDTH];
+    for (int i = 0; i < BLK_GROUP_WIDTH; i++) {
+      indices[i] = (i + 2) % DATA_BLK_NUM;
+      // Location port always equal to logical index of a block,
+      // for easier verification
+      locs[i] = DFSTestUtil.getLocalDatanodeInfo(indices[i]);
+      storageIDs[i] = locs[i].getDatanodeUuid();
+      storageTypes[i] = StorageType.DISK;
+    }
+    return new LocatedStripedBlock(new ExtendedBlock("pool", blockGroupID,
+        bgSize, 1001), locs, storageIDs, storageTypes, indices, 0, false,
+        null);
+  }
+
+  private byte[][] createInternalBlkBuffers(int bgSize) {
+    byte[][] bufs = new byte[DATA_BLK_NUM + PARITY_BLK_NUM][];
+    int[] pos = new int[DATA_BLK_NUM + PARITY_BLK_NUM];
+    for (int i = 0; i < DATA_BLK_NUM + PARITY_BLK_NUM; i++) {
+      int bufSize = (int) getInternalBlockLength(
+          bgSize, CELLSIZE, DATA_BLK_NUM, i);
+      bufs[i] = new byte[bufSize];
+      pos[i] = 0;
+    }
+    int done = 0;
+    while (done < bgSize) {
+      Preconditions.checkState(done % CELLSIZE == 0);
+      StripingCell cell = new StripingCell(SCEHMA, CELLSIZE, done / CELLSIZE, 0);
+      int idxInStripe = cell.idxInStripe;
+      int size = Math.min(CELLSIZE, bgSize - done);
+      for (int i = 0; i < size; i++) {
+        bufs[idxInStripe][pos[idxInStripe] + i] = hashIntToByte(done + i);
+      }
+      done += size;
+      pos[idxInStripe] += size;
+    }
+
+    return bufs;
+  }
+
+  @Test
+  public void testParseDummyStripedBlock() {
+    LocatedStripedBlock lsb = createDummyLocatedBlock(
+        BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE);
+    LocatedBlock[] blocks = parseStripedBlockGroup(
+        lsb, CELLSIZE, DATA_BLK_NUM, PARITY_BLK_NUM);
+    assertEquals(DATA_BLK_NUM + PARITY_BLK_NUM, blocks.length);
+    for (int i = 0; i < DATA_BLK_NUM; i++) {
+      assertFalse(blocks[i].isStriped());
+      assertEquals(i,
+          BlockIdManager.getBlockIndex(blocks[i].getBlock().getLocalBlock()));
+      assertEquals(0, blocks[i].getStartOffset());
+      assertEquals(1, blocks[i].getLocations().length);
+      assertEquals(i, blocks[i].getLocations()[0].getIpcPort());
+      assertEquals(i, blocks[i].getLocations()[0].getXferPort());
+    }
+  }
+
+  private void verifyInternalBlocks (int numBytesInGroup, int[] expected) {
+    for (int i = 1; i < BLK_GROUP_WIDTH; i++) {
+      assertEquals(expected[i],
+          getInternalBlockLength(numBytesInGroup, CELLSIZE, DATA_BLK_NUM, i));
+    }
+  }
+
+  @Test
+  public void testGetInternalBlockLength () {
+    // A small delta that is smaller than a cell
+    final int delta = 10;
+
+    // Block group is smaller than a cell
+    verifyInternalBlocks(CELLSIZE - delta,
+        new int[] {CELLSIZE - delta, 0, 0, 0, 0, 0,
+            CELLSIZE - delta, CELLSIZE - delta, CELLSIZE - delta});
+
+    // Block group is exactly as large as a cell
+    verifyInternalBlocks(CELLSIZE,
+        new int[] {CELLSIZE, 0, 0, 0, 0, 0,
+            CELLSIZE, CELLSIZE, CELLSIZE});
+
+    // Block group is a little larger than a cell
+    verifyInternalBlocks(CELLSIZE + delta,
+        new int[] {CELLSIZE, delta, 0, 0, 0, 0,
+            CELLSIZE, CELLSIZE, CELLSIZE});
+
+    // Block group contains multiple stripes and ends at stripe boundary
+    verifyInternalBlocks(2 * DATA_BLK_NUM * CELLSIZE,
+        new int[] {2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE,
+            2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE,
+            2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE});
+
+    // Block group contains multiple stripes and ends at cell boundary
+    // (not ending at stripe boundary)
+    verifyInternalBlocks(2 * DATA_BLK_NUM * CELLSIZE + CELLSIZE,
+        new int[] {3 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE,
+            2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE,
+            3 * CELLSIZE, 3 * CELLSIZE, 3 * CELLSIZE});
+
+    // Block group contains multiple stripes and doesn't end at cell boundary
+    verifyInternalBlocks(2 * DATA_BLK_NUM * CELLSIZE - delta,
+        new int[] {2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE,
+            2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE - delta,
+            2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE});
+  }
+
+  /**
+   * Test dividing a byte range into aligned stripes and verify the aligned
+   * ranges can be translated back to the byte range.
+   */
+  @Test
+  public void testDivideByteRangeIntoStripes() {
+    byte[] assembled = new byte[BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE];
+    for (int bgSize : blockGroupSizes) {
+      LocatedStripedBlock blockGroup = createDummyLocatedBlock(bgSize);
+      byte[][] internalBlkBufs = createInternalBlkBuffers(bgSize);
+      for (int brStart : byteRangeStartOffsets) {
+        for (int brSize : byteRangeSizes) {
+          if (brStart + brSize > bgSize) {
+            continue;
+          }
+          AlignedStripe[] stripes = divideByteRangeIntoStripes(SCEHMA,
+              CELLSIZE, blockGroup, brStart, brStart + brSize - 1, assembled, 0);
+
+          for (AlignedStripe stripe : stripes) {
+            for (int i = 0; i < DATA_BLK_NUM; i++) {
+              StripingChunk chunk = stripe.chunks[i];
+              if (chunk == null || chunk.state != StripingChunk.REQUESTED) {
+                continue;
+              }
+              int done = 0;
+              for (int j = 0; j < chunk.byteArray.getLengths().length; j++) {
+                System.arraycopy(internalBlkBufs[i],
+                    (int) stripe.getOffsetInBlock() + done, assembled,
+                    chunk.byteArray.getOffsets()[j],
+                    chunk.byteArray.getLengths()[j]);
+                done += chunk.byteArray.getLengths()[j];
+              }
+            }
+          }
+          for (int i = 0; i < brSize; i++) {
+            if (hashIntToByte(brStart + i) != assembled[i]) {
+              System.out.println("Oops");
+            }
+            assertEquals("Byte at " + (brStart + i) + " should be the same",
+                hashIntToByte(brStart + i), assembled[i]);
+          }
+        }
+      }
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc2564af/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
index 391f190..303d063 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
@@ -65,7 +65,7 @@ public class TestJsonUtil {
     final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
         now, now + 10, new FsPermission((short) 0644), "user", "group",
         DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
-        HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
+        HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0, null, 0);
     final FileStatus fstatus = toFileStatus(status, parent);
     System.out.println("status  = " + status);
     System.out.println("fstatus = " + fstatus);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc2564af/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
new file mode 100644
index 0000000..70020d5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -0,0 +1,377 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="testConf.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+  <!-- Normal mode is test. To run just the commands and dump the output
+       to the log, set it to nocompare -->
+  <mode>test</mode>
+
+  <!--  Comparator types:
+           ExactComparator
+           SubstringComparator
+           RegexpComparator
+           TokenComparator
+           -->
+  <tests>
+
+  <!-- Test help options -->
+    <test>
+      <description>help: help for erasure coding command</description>
+      <test-commands>
+        <ec-admin-command>-help</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Usage: hdfs erasurecode [generic options]</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>help: createZone command</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -help createZone</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^[ \t]*Create a zone to encode files using a specified schema(
)*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-createZone \[-s &lt;schemaName&gt;\] \[-c &lt;cellSize&gt;\]
&lt;path&gt;(.)*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>help: getZone command</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -help getZone</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Get information about the EC zone at specified path</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-getZone &lt;path&gt;(.)*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>help: listSchemas command</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -help listSchemas</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Get the list of ECSchemas supported</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-listSchemas (.)*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+  <!-- Test erasure code commands -->
+    <test>
+      <description>createZone : create a zone to encode files</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /eczone</command>
+        <ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /eczone</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>EC Zone created successfully at NAMENODE/eczone</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>createZone : create a zone twice</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /eczone</command>
+        <ec-admin-command>-fs NAMENODE -createZone /eczone</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -createZone /eczone</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /eczone</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Directory /eczone is already in an erasure coding zone</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>createZone : default schema</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /eczone</command>
+        <ec-admin-command>-fs NAMENODE -createZone /eczone</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -getZone /eczone</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /eczone</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Dir: /eczone, Schema: ECSchema=[Name=RS-6-3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>getZone : get information about the EC zone at specified path not
in zone</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /noec</command>
+        <ec-admin-command>-fs NAMENODE -getZone /noec</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /noec</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Path NAMENODE/noec is not in EC zone</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>getZone : get information about the EC zone at specified path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /eczone</command>
+        <ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -getZone /eczone</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /eczone</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Dir: /eczone, Schema: ECSchema=[Name=RS-6-3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>getZone : get EC zone at specified file path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /eczone</command>
+        <ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command>
+        <command>-fs NAMENODE -touchz /eczone/ecfile</command>
+        <ec-admin-command>-fs NAMENODE -getZone /eczone/ecfile</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /eczone/ecfile</command>
+        <command>-fs NAMENODE -rmdir /eczone</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Dir: /eczone, Schema: ECSchema=[Name=RS-6-3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>listSchemas : get the list of ECSchemas supported</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -listSchemas</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>RS-6-3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+<!-- Test illegal parameters -->
+    <test>
+      <description>createZone : illegal parameters - path is missing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /eczone</command>
+        <ec-admin-command>-fs NAMENODE -createZone</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /eczone</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-createZone: &lt;path&gt; is missing(.)*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>createZone : illegal parameters - schema name is missing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /eczone</command>
+        <ec-admin-command>-fs NAMENODE -createZone -s</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /eczone</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-createZone: option -s requires 1 argument(.)*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>createZone : illegal parameters - too many arguments</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /eczone</command>
+        <ec-admin-command>-fs NAMENODE -createZone /eczone1 /eczone2</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /eczone</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>-createZone: Too many arguments</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>createZone : illegal parameters - invalidschema</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /eczone</command>
+        <ec-admin-command>-fs NAMENODE -createZone -s invalidschema /eczone</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /eczone</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Schema 'invalidschema' does not match any of the supported
schemas. Please select any one of [RS-6-3]</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>createZone : illegal parameters - no such file</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -createZone /eczone</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^createZone: `/eczone': No such file or directory(.)*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>getZone : illegal parameters - path is missing</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -getZone </ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-getZone: &lt;path&gt; is missing(.)*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>getZone : illegal parameters - too many arguments</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -getZone /eczone /eczone</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /eczone</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>-getZone: Too many arguments</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>getZone : illegal parameters - no such file</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -getZone /eczone</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^getZone: `/eczone': No such file or directory(.)*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>listSchemas : illegal parameters - too many parameters</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -listSchemas /eczone</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>-listSchemas: Too many parameters</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+  </tests>
+</configuration>


Mime
View raw message