hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sj...@apache.org
Subject [34/50] [abbrv] hadoop git commit: HDFS-11117. Refactor striped file tests to allow flexibly test erasure coding policy. Contributed by Sammi Chen
Date Tue, 22 Nov 2016 21:33:51 GMT
HDFS-11117. Refactor striped file tests to allow flexibly test erasure coding policy. Contributed by Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6ffa116
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6ffa116
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6ffa116

Branch: refs/heads/HADOOP-13070
Commit: f6ffa11635c47030a91d420da942da1fb425eb49
Parents: c0b1a44
Author: Kai Zheng <kai.zheng@intel.com>
Authored: Fri Nov 18 16:08:59 2016 +0600
Committer: Kai Zheng <kai.zheng@intel.com>
Committed: Fri Nov 18 16:08:59 2016 +0600

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |   9 +-
 .../apache/hadoop/hdfs/StripedFileTestUtil.java | 116 ++++-----
 .../hadoop/hdfs/TestDFSStripedInputStream.java  | 180 ++++++-------
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  12 +-
 .../TestDFSStripedOutputStreamWithFailure.java  | 255 ++++++++++++-------
 ...estDFSStripedOutputStreamWithFailure000.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure010.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure020.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure030.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure040.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure050.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure060.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure070.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure080.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure090.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure100.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure110.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure120.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure130.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure140.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure150.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure160.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure170.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure180.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure190.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure200.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure210.java |   9 +-
 .../hdfs/TestDecommissionWithStriped.java       |  46 ++--
 .../TestErasureCodingPolicyWithSnapshot.java    |   9 +-
 .../apache/hadoop/hdfs/TestFileChecksum.java    |  11 +-
 .../hadoop/hdfs/TestLeaseRecoveryStriped.java   |  72 +++---
 .../hdfs/TestReadStripedFileWithDecoding.java   |  29 ++-
 .../TestReadStripedFileWithMissingBlocks.java   |  29 ++-
 .../hadoop/hdfs/TestReconstructStripedFile.java |  15 +-
 .../hdfs/TestSafeModeWithStripedFile.java       |  20 +-
 .../hadoop/hdfs/TestWriteReadStripedFile.java   |  24 +-
 .../hdfs/TestWriteStripedFileWithFailure.java   |  16 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java    |   3 +-
 .../hdfs/server/balancer/TestBalancer.java      |  23 +-
 .../blockmanagement/TestBlockInfoStriped.java   |  78 +++---
 .../TestBlockTokenWithDFSStriped.java           |  16 +-
 ...constructStripedBlocksWithRackAwareness.java |  58 +++--
 .../TestSequentialBlockGroupId.java             |  11 +-
 .../TestSortLocatedStripedBlock.java            |  45 ++--
 .../hdfs/server/datanode/TestBlockRecovery.java |  57 ++---
 .../TestDataNodeErasureCodingMetrics.java       |  29 ++-
 .../hadoop/hdfs/server/mover/TestMover.java     |  22 +-
 .../TestAddOverReplicatedStripedBlocks.java     |  71 +++---
 .../namenode/TestAddStripedBlockInFBR.java      |  22 +-
 .../server/namenode/TestAddStripedBlocks.java   |  72 +++---
 .../server/namenode/TestFSEditLogLoader.java    |  13 +-
 .../namenode/TestQuotaWithStripedBlocks.java    |  20 +-
 .../namenode/TestReconstructStripedBlocks.java  |  53 ++--
 .../server/namenode/TestStripedINodeFile.java   |   4 +-
 ...TestOfflineImageViewerWithStripedBlocks.java |  27 +-
 .../hadoop/hdfs/util/TestStripedBlockUtil.java  | 138 +++++-----
 56 files changed, 972 insertions(+), 810 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 945d2c8..13e2656 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -111,6 +111,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
@@ -171,8 +172,6 @@ import org.mockito.internal.util.reflection.Whitebox;
 import org.apache.hadoop.util.ToolRunner;
 
 import com.google.common.annotations.VisibleForTesting;
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS;
 
 /** Utilities for HDFS tests */
 public class DFSTestUtil {
@@ -1971,9 +1970,11 @@ public class DFSTestUtil {
       }
     }
 
+    final ErasureCodingPolicy ecPolicy =
+        fs.getErasureCodingPolicy(new Path(file));
     // 2. RECEIVED_BLOCK IBR
     long blockSize = isStripedBlock ?
-        numStripes * BLOCK_STRIPED_CELL_SIZE : len;
+        numStripes * ecPolicy.getCellSize() : len;
     for (int i = 0; i < groupSize; i++) {
       DataNode dn = dataNodes.get(i);
       final Block block = new Block(lastBlock.getBlockId() + i,
@@ -1987,7 +1988,7 @@ public class DFSTestUtil {
       }
     }
     long bytes = isStripedBlock ?
-        numStripes * BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS : len;
+        numStripes * ecPolicy.getCellSize() * ecPolicy.getNumDataUnits() : len;
     lastBlock.setNumBytes(bytes);
     return lastBlock;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index 0b036fd..311ba7c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
-import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem.WebHdfsInputStream;
 import org.apache.hadoop.io.IOUtils;
@@ -57,23 +56,6 @@ import static org.junit.Assert.assertEquals;
 
 public class StripedFileTestUtil {
   public static final Log LOG = LogFactory.getLog(StripedFileTestUtil.class);
-  /*
-   * These values correspond to the values used by the system default erasure
-   * coding policy.
-   */
-  public static final ErasureCodingPolicy TEST_EC_POLICY =
-      ErasureCodingPolicyManager.getSystemDefaultPolicy();
-  public static final short NUM_DATA_BLOCKS =
-      (short) TEST_EC_POLICY.getNumDataUnits();
-  public static final short NUM_PARITY_BLOCKS =
-      (short) TEST_EC_POLICY.getNumParityUnits();
-  public static final int BLOCK_STRIPED_CELL_SIZE =
-      TEST_EC_POLICY.getCellSize();
-
-  static int stripesPerBlock = 4;
-  public static int blockSize = BLOCK_STRIPED_CELL_SIZE * stripesPerBlock;
-  static int numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2;
-  static int BLOCK_GROUP_SIZE = blockSize * NUM_DATA_BLOCKS;
 
   public static byte[] generateBytes(int cnt) {
     byte[] bytes = new byte[cnt];
@@ -96,10 +78,15 @@ public class StripedFileTestUtil {
 
   static void verifyPread(FileSystem fs, Path srcPath,  int fileLength,
       byte[] expected, byte[] buf) throws IOException {
+    final ErasureCodingPolicy ecPolicy =
+        ((DistributedFileSystem)fs).getErasureCodingPolicy(srcPath);
     try (FSDataInputStream in = fs.open(srcPath)) {
-      int[] startOffsets = {0, 1, BLOCK_STRIPED_CELL_SIZE - 102, BLOCK_STRIPED_CELL_SIZE, BLOCK_STRIPED_CELL_SIZE + 102,
-          BLOCK_STRIPED_CELL_SIZE * (NUM_DATA_BLOCKS - 1), BLOCK_STRIPED_CELL_SIZE * (NUM_DATA_BLOCKS - 1) + 102,
-          BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS, fileLength - 102, fileLength - 1};
+      int[] startOffsets = {0, 1, ecPolicy.getCellSize() - 102,
+          ecPolicy.getCellSize(), ecPolicy.getCellSize() + 102,
+          ecPolicy.getCellSize() * (ecPolicy.getNumDataUnits() - 1),
+          ecPolicy.getCellSize() * (ecPolicy.getNumDataUnits() - 1) + 102,
+          ecPolicy.getCellSize() * ecPolicy.getNumDataUnits(),
+          fileLength - 102, fileLength - 1};
       for (int startOffset : startOffsets) {
         startOffset = Math.max(0, Math.min(startOffset, fileLength - 1));
         int remaining = fileLength - startOffset;
@@ -153,8 +140,8 @@ public class StripedFileTestUtil {
     }
   }
 
-  static void verifySeek(FileSystem fs, Path srcPath, int fileLength)
-      throws IOException {
+  static void verifySeek(FileSystem fs, Path srcPath, int fileLength,
+      ErasureCodingPolicy ecPolicy, int blkGroupSize) throws IOException {
     try (FSDataInputStream in = fs.open(srcPath)) {
       // seek to 1/2 of content
       int pos = fileLength / 2;
@@ -168,21 +155,21 @@ public class StripedFileTestUtil {
       pos = 0;
       assertSeekAndRead(in, pos, fileLength);
 
-      if (fileLength > BLOCK_STRIPED_CELL_SIZE) {
+      if (fileLength > ecPolicy.getCellSize()) {
         // seek to cellSize boundary
-        pos = BLOCK_STRIPED_CELL_SIZE - 1;
+        pos = ecPolicy.getCellSize() - 1;
         assertSeekAndRead(in, pos, fileLength);
       }
 
-      if (fileLength > BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS) {
+      if (fileLength > ecPolicy.getCellSize() * ecPolicy.getNumDataUnits()) {
         // seek to striped cell group boundary
-        pos = BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS - 1;
+        pos = ecPolicy.getCellSize() * ecPolicy.getNumDataUnits() - 1;
         assertSeekAndRead(in, pos, fileLength);
       }
 
-      if (fileLength > blockSize * NUM_DATA_BLOCKS) {
+      if (fileLength > blkGroupSize) {
         // seek to striped block group boundary
-        pos = blockSize * NUM_DATA_BLOCKS - 1;
+        pos = blkGroupSize - 1;
         assertSeekAndRead(in, pos, fileLength);
       }
 
@@ -244,13 +231,16 @@ public class StripedFileTestUtil {
    * If the length of blockGroup is less than a full stripe, it returns the the
    * number of actual data internal blocks. Otherwise returns NUM_DATA_BLOCKS.
    */
-  public static short getRealDataBlockNum(int numBytes) {
-    return (short) Math.min(NUM_DATA_BLOCKS,
-        (numBytes - 1) / BLOCK_STRIPED_CELL_SIZE + 1);
+  public static short getRealDataBlockNum(int numBytesInStrip,
+      ErasureCodingPolicy ecPolicy) {
+    return (short) Math.min(ecPolicy.getNumDataUnits(),
+        (numBytesInStrip - 1) / ecPolicy.getCellSize() + 1);
   }
 
-  public static short getRealTotalBlockNum(int numBytes) {
-    return (short) (getRealDataBlockNum(numBytes) + NUM_PARITY_BLOCKS);
+  public static short getRealTotalBlockNum(int numBytesInStrip,
+      ErasureCodingPolicy ecPolicy) {
+    return (short) (getRealDataBlockNum(numBytesInStrip, ecPolicy) +
+        ecPolicy.getNumParityUnits());
   }
 
   public static void waitBlockGroupsReported(DistributedFileSystem fs,
@@ -267,14 +257,15 @@ public class StripedFileTestUtil {
     boolean success;
     final int ATTEMPTS = 40;
     int count = 0;
-
+    final ErasureCodingPolicy ecPolicy =
+        fs.getErasureCodingPolicy(new Path(src));
     do {
       success = true;
       count++;
       LocatedBlocks lbs = fs.getClient().getLocatedBlocks(src, 0);
       for (LocatedBlock lb : lbs.getLocatedBlocks()) {
-        short expected = (short) (getRealTotalBlockNum((int) lb.getBlockSize())
-            - numDeadDNs);
+        short expected = (short) (getRealTotalBlockNum((int) lb.getBlockSize(),
+            ecPolicy) - numDeadDNs);
         int reported = lb.getLocations().length;
         if (reported < expected){
           success = false;
@@ -357,7 +348,8 @@ public class StripedFileTestUtil {
   }
 
   static void checkData(DistributedFileSystem dfs, Path srcPath, int length,
-      List<DatanodeInfo> killedList, List<Long> oldGSList) throws IOException {
+      List<DatanodeInfo> killedList, List<Long> oldGSList, int blkGroupSize)
+      throws IOException {
 
     StripedFileTestUtil.verifyLength(dfs, srcPath, length);
     List<List<LocatedBlock>> blockGroupList = new ArrayList<>();
@@ -365,10 +357,14 @@ public class StripedFileTestUtil {
         Long.MAX_VALUE);
     int expectedNumGroup = 0;
     if (length > 0) {
-      expectedNumGroup = (length - 1) / BLOCK_GROUP_SIZE + 1;
+      expectedNumGroup = (length - 1) / blkGroupSize + 1;
     }
     assertEquals(expectedNumGroup, lbs.getLocatedBlocks().size());
 
+    final ErasureCodingPolicy ecPolicy = dfs.getErasureCodingPolicy(srcPath);
+    final int cellSize = ecPolicy.getCellSize();
+    final int dataBlkNum = ecPolicy.getNumDataUnits();
+    final int parityBlkNum = ecPolicy.getNumParityUnits();
     int index = 0;
     for (LocatedBlock firstBlock : lbs.getLocatedBlocks()) {
       Assert.assertTrue(firstBlock instanceof LocatedStripedBlock);
@@ -380,39 +376,39 @@ public class StripedFileTestUtil {
       Assert.assertTrue(s, gs >= oldGS);
 
       LocatedBlock[] blocks = StripedBlockUtil.parseStripedBlockGroup(
-          (LocatedStripedBlock) firstBlock, BLOCK_STRIPED_CELL_SIZE,
-          NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS);
+          (LocatedStripedBlock) firstBlock, cellSize,
+          dataBlkNum, parityBlkNum);
       blockGroupList.add(Arrays.asList(blocks));
     }
 
     // test each block group
     for (int group = 0; group < blockGroupList.size(); group++) {
       final boolean isLastGroup = group == blockGroupList.size() - 1;
-      final int groupSize = !isLastGroup? BLOCK_GROUP_SIZE
-          : length - (blockGroupList.size() - 1)*BLOCK_GROUP_SIZE;
-      final int numCellInGroup = (groupSize - 1)/BLOCK_STRIPED_CELL_SIZE + 1;
-      final int lastCellIndex = (numCellInGroup - 1) % NUM_DATA_BLOCKS;
-      final int lastCellSize = groupSize - (numCellInGroup - 1)*BLOCK_STRIPED_CELL_SIZE;
+      final int groupSize = !isLastGroup? blkGroupSize
+          : length - (blockGroupList.size() - 1)*blkGroupSize;
+      final int numCellInGroup = (groupSize - 1) / cellSize + 1;
+      final int lastCellIndex = (numCellInGroup - 1) % dataBlkNum;
+      final int lastCellSize = groupSize - (numCellInGroup - 1) * cellSize;
 
       //get the data of this block
       List<LocatedBlock> blockList = blockGroupList.get(group);
-      byte[][] dataBlockBytes = new byte[NUM_DATA_BLOCKS][];
-      byte[][] parityBlockBytes = new byte[NUM_PARITY_BLOCKS][];
+      byte[][] dataBlockBytes = new byte[dataBlkNum][];
+      byte[][] parityBlockBytes = new byte[parityBlkNum][];
 
       Set<Integer> checkSet = new HashSet<>();
       // for each block, use BlockReader to read data
       for (int i = 0; i < blockList.size(); i++) {
-        final int j = i >= NUM_DATA_BLOCKS? 0: i;
-        final int numCellInBlock = (numCellInGroup - 1)/NUM_DATA_BLOCKS
+        final int j = i >= dataBlkNum? 0: i;
+        final int numCellInBlock = (numCellInGroup - 1) / dataBlkNum
             + (j <= lastCellIndex? 1: 0);
-        final int blockSize = numCellInBlock*BLOCK_STRIPED_CELL_SIZE
-            + (isLastGroup && j == lastCellIndex? lastCellSize - BLOCK_STRIPED_CELL_SIZE: 0);
+        final int blockSize = numCellInBlock * cellSize
+            + (isLastGroup && j == lastCellIndex? lastCellSize - cellSize: 0);
 
         final byte[] blockBytes = new byte[blockSize];
-        if (i < NUM_DATA_BLOCKS) {
+        if (i < dataBlkNum) {
           dataBlockBytes[i] = blockBytes;
         } else {
-          parityBlockBytes[i - NUM_DATA_BLOCKS] = blockBytes;
+          parityBlockBytes[i - dataBlkNum] = blockBytes;
         }
 
         final LocatedBlock lb = blockList.get(i);
@@ -440,7 +436,7 @@ public class StripedFileTestUtil {
       LOG.info("Internal blocks to check: " + checkSet);
 
       // check data
-      final int groupPosInFile = group*BLOCK_GROUP_SIZE;
+      final int groupPosInFile = group * blkGroupSize;
       for (int i = 0; i < dataBlockBytes.length; i++) {
         boolean killed = false;
         if (!checkSet.contains(i)) {
@@ -449,7 +445,7 @@ public class StripedFileTestUtil {
         final byte[] actual = dataBlockBytes[i];
         for (int posInBlk = 0; posInBlk < actual.length; posInBlk++) {
           final long posInFile = StripedBlockUtil.offsetInBlkToOffsetInBG(
-              BLOCK_STRIPED_CELL_SIZE, NUM_DATA_BLOCKS, posInBlk, i) + groupPosInFile;
+              cellSize, dataBlkNum, posInBlk, i) + groupPosInFile;
           Assert.assertTrue(posInFile < length);
           final byte expected = getByte(posInFile);
 
@@ -469,13 +465,14 @@ public class StripedFileTestUtil {
       // check parity
       verifyParityBlocks(dfs.getConf(),
           lbs.getLocatedBlocks().get(group).getBlockSize(),
-          BLOCK_STRIPED_CELL_SIZE, dataBlockBytes, parityBlockBytes, checkSet);
+          cellSize, dataBlockBytes, parityBlockBytes, checkSet,
+          ecPolicy.getCodecName());
     }
   }
 
   static void verifyParityBlocks(Configuration conf, final long size,
       final int cellSize, byte[][] dataBytes, byte[][] parityBytes,
-      Set<Integer> checkSet) {
+      Set<Integer> checkSet, String codecName) {
     // verify the parity blocks
     int parityBlkSize = (int) StripedBlockUtil.getInternalBlockLength(
         size, cellSize, dataBytes.length, dataBytes.length);
@@ -496,8 +493,7 @@ public class StripedFileTestUtil {
     ErasureCoderOptions coderOptions = new ErasureCoderOptions(
         dataBytes.length, parityBytes.length);
     final RawErasureEncoder encoder =
-        CodecUtil.createRawEncoder(conf, TEST_EC_POLICY.getCodecName(),
-            coderOptions);
+        CodecUtil.createRawEncoder(conf, codecName, coderOptions);
     encoder.encode(dataBytes, expectedParityBytes);
     for (int i = 0; i < parityBytes.length; i++) {
       if (checkSet.contains(i + dataBytes.length)){

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
index 6af9e7c..3b46c66 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
@@ -66,19 +66,19 @@ public class TestDFSStripedInputStream {
   private Path filePath = new Path(dirPath, "file");
   private final ErasureCodingPolicy ecPolicy =
       ErasureCodingPolicyManager.getSystemDefaultPolicy();
-  private final short DATA_BLK_NUM = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  private final short PARITY_BLK_NUM = StripedFileTestUtil.NUM_PARITY_BLOCKS;
-  private final int CELLSIZE = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-  private final int NUM_STRIPE_PER_BLOCK = 2;
-  private final int INTERNAL_BLOCK_SIZE = NUM_STRIPE_PER_BLOCK * CELLSIZE;
-  private final int BLOCK_GROUP_SIZE =  DATA_BLK_NUM * INTERNAL_BLOCK_SIZE;
+  private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
+  private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
+  private final int cellSize = ecPolicy.getCellSize();
+  private final int stripesPerBlock = 2;
+  private final int blockSize = stripesPerBlock * cellSize;
+  private final int blockGroupSize =  dataBlocks * blockSize;
 
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
   @Before
   public void setup() throws IOException {
-    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, INTERNAL_BLOCK_SIZE);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
     if (ErasureCodeNative.isNativeCodeLoaded()) {
       conf.set(
@@ -87,7 +87,7 @@ public class TestDFSStripedInputStream {
     }
     SimulatedFSDataset.setFactory(conf);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
-        DATA_BLK_NUM + PARITY_BLK_NUM).build();
+        dataBlocks + parityBlocks).build();
     cluster.waitActive();
     for (DataNode dn : cluster.getDataNodes()) {
       DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
@@ -112,9 +112,9 @@ public class TestDFSStripedInputStream {
   public void testRefreshBlock() throws Exception {
     final int numBlocks = 4;
     DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
-        NUM_STRIPE_PER_BLOCK, false);
+        stripesPerBlock, false);
     LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
-        filePath.toString(), 0, BLOCK_GROUP_SIZE * numBlocks);
+        filePath.toString(), 0, blockGroupSize * numBlocks);
     final DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(),
         filePath.toString(), false, ecPolicy, null);
 
@@ -122,8 +122,8 @@ public class TestDFSStripedInputStream {
     for (LocatedBlock aLbList : lbList) {
       LocatedStripedBlock lsb = (LocatedStripedBlock) aLbList;
       LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(lsb,
-          CELLSIZE, DATA_BLK_NUM, PARITY_BLK_NUM);
-      for (int j = 0; j < DATA_BLK_NUM; j++) {
+          cellSize, dataBlocks, parityBlocks);
+      for (int j = 0; j < dataBlocks; j++) {
         LocatedBlock refreshed = in.refreshLocatedBlock(blks[j]);
         assertEquals(blks[j].getBlock(), refreshed.getBlock());
         assertEquals(blks[j].getStartOffset(), refreshed.getStartOffset());
@@ -136,18 +136,18 @@ public class TestDFSStripedInputStream {
   public void testPread() throws Exception {
     final int numBlocks = 2;
     DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
-        NUM_STRIPE_PER_BLOCK, false);
+        stripesPerBlock, false);
     LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
-        filePath.toString(), 0, BLOCK_GROUP_SIZE * numBlocks);
-    int fileLen = BLOCK_GROUP_SIZE * numBlocks;
+        filePath.toString(), 0, blockGroupSize * numBlocks);
+    int fileLen = blockGroupSize * numBlocks;
 
     byte[] expected = new byte[fileLen];
     assertEquals(numBlocks, lbs.getLocatedBlocks().size());
     for (int bgIdx = 0; bgIdx < numBlocks; bgIdx++) {
       LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(bgIdx));
-      for (int i = 0; i < DATA_BLK_NUM; i++) {
+      for (int i = 0; i < dataBlocks; i++) {
         Block blk = new Block(bg.getBlock().getBlockId() + i,
-            NUM_STRIPE_PER_BLOCK * CELLSIZE,
+            stripesPerBlock * cellSize,
             bg.getBlock().getGenerationStamp());
         blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
         cluster.injectBlocks(i, Arrays.asList(blk),
@@ -155,12 +155,12 @@ public class TestDFSStripedInputStream {
       }
 
       /** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */
-      for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
-        for (int j = 0; j < DATA_BLK_NUM; j++) {
-          for (int k = 0; k < CELLSIZE; k++) {
-            int posInBlk = i * CELLSIZE + k;
-            int posInFile = i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE + k;
-            expected[bgIdx*BLOCK_GROUP_SIZE + posInFile] =
+      for (int i = 0; i < stripesPerBlock; i++) {
+        for (int j = 0; j < dataBlocks; j++) {
+          for (int k = 0; k < cellSize; k++) {
+            int posInBlk = i * cellSize + k;
+            int posInFile = i * cellSize * dataBlocks + j * cellSize + k;
+            expected[bgIdx * blockGroupSize + posInFile] =
                 SimulatedFSDataset.simulatedByte(
                     new Block(bg.getBlock().getBlockId() + j), posInBlk);
           }
@@ -170,9 +170,9 @@ public class TestDFSStripedInputStream {
     DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(),
         filePath.toString(), false, ecPolicy, null);
 
-    int[] startOffsets = {0, 1, CELLSIZE - 102, CELLSIZE, CELLSIZE + 102,
-        CELLSIZE*DATA_BLK_NUM, CELLSIZE*DATA_BLK_NUM + 102,
-        BLOCK_GROUP_SIZE - 102, BLOCK_GROUP_SIZE, BLOCK_GROUP_SIZE + 102,
+    int[] startOffsets = {0, 1, cellSize - 102, cellSize, cellSize + 102,
+        cellSize * dataBlocks, cellSize * dataBlocks + 102,
+        blockGroupSize - 102, blockGroupSize, blockGroupSize + 102,
         fileLen - 1};
     for (int startOffset : startOffsets) {
       startOffset = Math.max(0, Math.min(startOffset, fileLen - 1));
@@ -192,17 +192,17 @@ public class TestDFSStripedInputStream {
   @Test
   public void testPreadWithDNFailure() throws Exception {
     final int numBlocks = 4;
-    final int failedDNIdx = DATA_BLK_NUM - 1;
+    final int failedDNIdx = dataBlocks - 1;
     DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
-        NUM_STRIPE_PER_BLOCK, false);
+        stripesPerBlock, false);
     LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
-        filePath.toString(), 0, BLOCK_GROUP_SIZE);
+        filePath.toString(), 0, blockGroupSize);
 
     assert lbs.get(0) instanceof LocatedStripedBlock;
     LocatedStripedBlock bg = (LocatedStripedBlock)(lbs.get(0));
-    for (int i = 0; i < DATA_BLK_NUM + PARITY_BLK_NUM; i++) {
+    for (int i = 0; i < dataBlocks + parityBlocks; i++) {
       Block blk = new Block(bg.getBlock().getBlockId() + i,
-          NUM_STRIPE_PER_BLOCK * CELLSIZE,
+          stripesPerBlock * cellSize,
           bg.getBlock().getGenerationStamp());
       blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
       cluster.injectBlocks(i, Arrays.asList(blk),
@@ -211,15 +211,15 @@ public class TestDFSStripedInputStream {
     DFSStripedInputStream in =
         new DFSStripedInputStream(fs.getClient(), filePath.toString(), false,
             ecPolicy, null);
-    int readSize = BLOCK_GROUP_SIZE;
+    int readSize = blockGroupSize;
     byte[] readBuffer = new byte[readSize];
     byte[] expected = new byte[readSize];
     /** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */
-    for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
-      for (int j = 0; j < DATA_BLK_NUM; j++) {
-        for (int k = 0; k < CELLSIZE; k++) {
-          int posInBlk = i * CELLSIZE + k;
-          int posInFile = i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE + k;
+    for (int i = 0; i < stripesPerBlock; i++) {
+      for (int j = 0; j < dataBlocks; j++) {
+        for (int k = 0; k < cellSize; k++) {
+          int posInBlk = i * cellSize + k;
+          int posInFile = i * cellSize * dataBlocks + j * cellSize + k;
           expected[posInFile] = SimulatedFSDataset.simulatedByte(
               new Block(bg.getBlock().getBlockId() + j), posInBlk);
         }
@@ -227,32 +227,32 @@ public class TestDFSStripedInputStream {
     }
 
     ErasureCoderOptions coderOptions = new ErasureCoderOptions(
-        DATA_BLK_NUM, PARITY_BLK_NUM);
+        dataBlocks, parityBlocks);
     RawErasureDecoder rawDecoder = CodecUtil.createRawDecoder(conf,
         ecPolicy.getCodecName(), coderOptions);
 
     // Update the expected content for decoded data
-    int[] missingBlkIdx = new int[PARITY_BLK_NUM];
+    int[] missingBlkIdx = new int[parityBlocks];
     for (int i = 0; i < missingBlkIdx.length; i++) {
       if (i == 0) {
         missingBlkIdx[i] = failedDNIdx;
       } else {
-        missingBlkIdx[i] = DATA_BLK_NUM + i;
+        missingBlkIdx[i] = dataBlocks + i;
       }
     }
     cluster.stopDataNode(failedDNIdx);
-    for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
-      byte[][] decodeInputs = new byte[DATA_BLK_NUM + PARITY_BLK_NUM][CELLSIZE];
-      byte[][] decodeOutputs = new byte[missingBlkIdx.length][CELLSIZE];
-      for (int j = 0; j < DATA_BLK_NUM; j++) {
-        int posInBuf = i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE;
+    for (int i = 0; i < stripesPerBlock; i++) {
+      byte[][] decodeInputs = new byte[dataBlocks + parityBlocks][cellSize];
+      byte[][] decodeOutputs = new byte[missingBlkIdx.length][cellSize];
+      for (int j = 0; j < dataBlocks; j++) {
+        int posInBuf = i * cellSize * dataBlocks + j * cellSize;
         if (j != failedDNIdx) {
-          System.arraycopy(expected, posInBuf, decodeInputs[j], 0, CELLSIZE);
+          System.arraycopy(expected, posInBuf, decodeInputs[j], 0, cellSize);
         }
       }
-      for (int j = DATA_BLK_NUM; j < DATA_BLK_NUM + PARITY_BLK_NUM; j++) {
-        for (int k = 0; k < CELLSIZE; k++) {
-          int posInBlk = i * CELLSIZE + k;
+      for (int j = dataBlocks; j < dataBlocks + parityBlocks; j++) {
+        for (int k = 0; k < cellSize; k++) {
+          int posInBlk = i * cellSize + k;
           decodeInputs[j][k] = SimulatedFSDataset.simulatedByte(
               new Block(bg.getBlock().getBlockId() + j), posInBlk);
         }
@@ -261,8 +261,8 @@ public class TestDFSStripedInputStream {
         decodeInputs[m] = null;
       }
       rawDecoder.decode(decodeInputs, missingBlkIdx, decodeOutputs);
-      int posInBuf = i * CELLSIZE * DATA_BLK_NUM + failedDNIdx * CELLSIZE;
-      System.arraycopy(decodeOutputs[0], 0, expected, posInBuf, CELLSIZE);
+      int posInBuf = i * cellSize * dataBlocks + failedDNIdx * cellSize;
+      System.arraycopy(decodeOutputs[0], 0, expected, posInBuf, cellSize);
     }
 
     int delta = 10;
@@ -278,8 +278,8 @@ public class TestDFSStripedInputStream {
     // |c_0      |c_1    |c_2 |c_3 |c_4      |c_5         |
     // |256K - 10|missing|256K|256K|256K - 10|not in range|
     done += in.read(delta, readBuffer, delta,
-        CELLSIZE * (DATA_BLK_NUM - 1) - 2 * delta);
-    assertEquals(CELLSIZE * (DATA_BLK_NUM - 1) - delta, done);
+        cellSize * (dataBlocks - 1) - 2 * delta);
+    assertEquals(cellSize * (dataBlocks - 1) - delta, done);
     assertArrayEquals(Arrays.copyOf(expected, done),
         Arrays.copyOf(readBuffer, done));
     // read the rest
@@ -298,14 +298,14 @@ public class TestDFSStripedInputStream {
   private void testStatefulRead(boolean useByteBuffer,
       boolean cellMisalignPacket) throws Exception {
     final int numBlocks = 2;
-    final int fileSize = numBlocks * BLOCK_GROUP_SIZE;
+    final int fileSize = numBlocks * blockGroupSize;
     if (cellMisalignPacket) {
       conf.setInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT + 1);
       tearDown();
       setup();
     }
     DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
-        NUM_STRIPE_PER_BLOCK, false);
+        stripesPerBlock, false);
     LocatedBlocks lbs = fs.getClient().namenode.
         getBlockLocations(filePath.toString(), 0, fileSize);
 
@@ -313,9 +313,9 @@ public class TestDFSStripedInputStream {
     for (LocatedBlock lb : lbs.getLocatedBlocks()) {
       assert lb instanceof LocatedStripedBlock;
       LocatedStripedBlock bg = (LocatedStripedBlock)(lb);
-      for (int i = 0; i < DATA_BLK_NUM; i++) {
+      for (int i = 0; i < dataBlocks; i++) {
         Block blk = new Block(bg.getBlock().getBlockId() + i,
-            NUM_STRIPE_PER_BLOCK * CELLSIZE,
+            stripesPerBlock * cellSize,
             bg.getBlock().getGenerationStamp());
         blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
         cluster.injectBlocks(i, Arrays.asList(blk),
@@ -331,12 +331,12 @@ public class TestDFSStripedInputStream {
 
     for (LocatedBlock bg : lbs.getLocatedBlocks()) {
       /** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */
-      for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
-        for (int j = 0; j < DATA_BLK_NUM; j++) {
-          for (int k = 0; k < CELLSIZE; k++) {
-            int posInBlk = i * CELLSIZE + k;
+      for (int i = 0; i < stripesPerBlock; i++) {
+        for (int j = 0; j < dataBlocks; j++) {
+          for (int k = 0; k < cellSize; k++) {
+            int posInBlk = i * cellSize + k;
             int posInFile = (int) bg.getStartOffset() +
-                i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE + k;
+                i * cellSize * dataBlocks + j * cellSize + k;
             expected[posInFile] = SimulatedFSDataset.simulatedByte(
                 new Block(bg.getBlock().getBlockId() + j), posInBlk);
           }
@@ -369,17 +369,17 @@ public class TestDFSStripedInputStream {
   @Test
   public void testStatefulReadWithDNFailure() throws Exception {
     final int numBlocks = 4;
-    final int failedDNIdx = DATA_BLK_NUM - 1;
+    final int failedDNIdx = dataBlocks - 1;
     DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
-        NUM_STRIPE_PER_BLOCK, false);
+        stripesPerBlock, false);
     LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
-        filePath.toString(), 0, BLOCK_GROUP_SIZE);
+        filePath.toString(), 0, blockGroupSize);
 
     assert lbs.get(0) instanceof LocatedStripedBlock;
     LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
-    for (int i = 0; i < DATA_BLK_NUM + PARITY_BLK_NUM; i++) {
+    for (int i = 0; i < dataBlocks + parityBlocks; i++) {
       Block blk = new Block(bg.getBlock().getBlockId() + i,
-          NUM_STRIPE_PER_BLOCK * CELLSIZE,
+          stripesPerBlock * cellSize,
           bg.getBlock().getGenerationStamp());
       blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
       cluster.injectBlocks(i, Arrays.asList(blk),
@@ -388,15 +388,15 @@ public class TestDFSStripedInputStream {
     DFSStripedInputStream in =
         new DFSStripedInputStream(fs.getClient(), filePath.toString(), false,
             ecPolicy, null);
-    int readSize = BLOCK_GROUP_SIZE;
+    int readSize = blockGroupSize;
     byte[] readBuffer = new byte[readSize];
     byte[] expected = new byte[readSize];
     /** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */
-    for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
-      for (int j = 0; j < DATA_BLK_NUM; j++) {
-        for (int k = 0; k < CELLSIZE; k++) {
-          int posInBlk = i * CELLSIZE + k;
-          int posInFile = i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE + k;
+    for (int i = 0; i < stripesPerBlock; i++) {
+      for (int j = 0; j < dataBlocks; j++) {
+        for (int k = 0; k < cellSize; k++) {
+          int posInBlk = i * cellSize + k;
+          int posInFile = i * cellSize * dataBlocks + j * cellSize + k;
           expected[posInFile] = SimulatedFSDataset.simulatedByte(
               new Block(bg.getBlock().getBlockId() + j), posInBlk);
         }
@@ -404,32 +404,32 @@ public class TestDFSStripedInputStream {
     }
 
     ErasureCoderOptions coderOptions = new ErasureCoderOptions(
-        DATA_BLK_NUM, PARITY_BLK_NUM);
+        dataBlocks, parityBlocks);
     RawErasureDecoder rawDecoder = CodecUtil.createRawDecoder(conf,
         ecPolicy.getCodecName(), coderOptions);
 
     // Update the expected content for decoded data
-    int[] missingBlkIdx = new int[PARITY_BLK_NUM];
+    int[] missingBlkIdx = new int[parityBlocks];
     for (int i = 0; i < missingBlkIdx.length; i++) {
       if (i == 0) {
         missingBlkIdx[i] = failedDNIdx;
       } else {
-        missingBlkIdx[i] = DATA_BLK_NUM + i;
+        missingBlkIdx[i] = dataBlocks + i;
       }
     }
     cluster.stopDataNode(failedDNIdx);
-    for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
-      byte[][] decodeInputs = new byte[DATA_BLK_NUM + PARITY_BLK_NUM][CELLSIZE];
-      byte[][] decodeOutputs = new byte[missingBlkIdx.length][CELLSIZE];
-      for (int j = 0; j < DATA_BLK_NUM; j++) {
-        int posInBuf = i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE;
+    for (int i = 0; i < stripesPerBlock; i++) {
+      byte[][] decodeInputs = new byte[dataBlocks + parityBlocks][cellSize];
+      byte[][] decodeOutputs = new byte[missingBlkIdx.length][cellSize];
+      for (int j = 0; j < dataBlocks; j++) {
+        int posInBuf = i * cellSize * dataBlocks + j * cellSize;
         if (j != failedDNIdx) {
-          System.arraycopy(expected, posInBuf, decodeInputs[j], 0, CELLSIZE);
+          System.arraycopy(expected, posInBuf, decodeInputs[j], 0, cellSize);
         }
       }
-      for (int j = DATA_BLK_NUM; j < DATA_BLK_NUM + PARITY_BLK_NUM; j++) {
-        for (int k = 0; k < CELLSIZE; k++) {
-          int posInBlk = i * CELLSIZE + k;
+      for (int j = dataBlocks; j < dataBlocks + parityBlocks; j++) {
+        for (int k = 0; k < cellSize; k++) {
+          int posInBlk = i * cellSize + k;
           decodeInputs[j][k] = SimulatedFSDataset.simulatedByte(
               new Block(bg.getBlock().getBlockId() + j), posInBlk);
         }
@@ -438,8 +438,8 @@ public class TestDFSStripedInputStream {
         decodeInputs[m] = null;
       }
       rawDecoder.decode(decodeInputs, missingBlkIdx, decodeOutputs);
-      int posInBuf = i * CELLSIZE * DATA_BLK_NUM + failedDNIdx * CELLSIZE;
-      System.arraycopy(decodeOutputs[0], 0, expected, posInBuf, CELLSIZE);
+      int posInBuf = i * cellSize * dataBlocks + failedDNIdx * cellSize;
+      System.arraycopy(decodeOutputs[0], 0, expected, posInBuf, cellSize);
     }
 
     int delta = 10;
@@ -452,13 +452,13 @@ public class TestDFSStripedInputStream {
     // both head and trail cells are partial
     // |c_0      |c_1    |c_2 |c_3 |c_4      |c_5         |
     // |256K - 10|missing|256K|256K|256K - 10|not in range|
-    while (done < (CELLSIZE * (DATA_BLK_NUM - 1) - 2 * delta)) {
+    while (done < (cellSize * (dataBlocks - 1) - 2 * delta)) {
       int ret = in.read(readBuffer, delta,
-          CELLSIZE * (DATA_BLK_NUM - 1) - 2 * delta);
+          cellSize * (dataBlocks - 1) - 2 * delta);
       assertTrue(ret > 0);
       done += ret;
     }
-    assertEquals(CELLSIZE * (DATA_BLK_NUM - 1) - delta, done);
+    assertEquals(cellSize * (dataBlocks - 1) - delta, done);
     // read the rest
 
     int restSize;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
index 4f0a36c..47d5e02 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
@@ -25,6 +25,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
 import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
@@ -45,13 +47,15 @@ public class TestDFSStripedOutputStream {
     GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.ALL);
   }
 
-  private int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  private int parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final int dataBlocks = ecPolicy.getNumDataUnits();
+  private final int parityBlocks = ecPolicy.getNumParityUnits();
 
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
   private Configuration conf;
-  private final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
+  private final int cellSize = ecPolicy.getCellSize();
   private final int stripesPerBlock = 4;
   private final int blockSize = cellSize * stripesPerBlock;
 
@@ -169,6 +173,6 @@ public class TestDFSStripedOutputStream {
     StripedFileTestUtil.waitBlockGroupsReported(fs, src);
 
     StripedFileTestUtil.checkData(fs, testPath, writeBytes,
-        new ArrayList<DatanodeInfo>(), null);
+        new ArrayList<DatanodeInfo>(), null, blockSize * dataBlocks);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index 23809c3..53059ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@@ -35,6 +36,7 @@ import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
@@ -44,6 +46,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Level;
 import org.junit.Assert;
+import org.junit.Assume;
 import org.junit.Test;
 
 import java.io.IOException;
@@ -59,7 +62,9 @@ import java.util.concurrent.atomic.AtomicInteger;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
-
+/**
+ * Test striped file write operation with data node failures.
+ */
 public class TestDFSStripedOutputStreamWithFailure {
   public static final Log LOG = LogFactory.getLog(
       TestDFSStripedOutputStreamWithFailure.class);
@@ -71,53 +76,44 @@ public class TestDFSStripedOutputStreamWithFailure {
         .getLogger().setLevel(Level.ALL);
   }
 
-  private static final int NUM_DATA_BLOCKS = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  private static final int NUM_PARITY_BLOCKS = StripedFileTestUtil.NUM_PARITY_BLOCKS;
-  private static final int CELL_SIZE = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-  private static final int STRIPES_PER_BLOCK = 4;
-  private static final int BLOCK_SIZE = CELL_SIZE * STRIPES_PER_BLOCK;
-  private static final int BLOCK_GROUP_SIZE = BLOCK_SIZE * NUM_DATA_BLOCKS;
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final int dataBlocks = ecPolicy.getNumDataUnits();
+  private final int parityBlocks = ecPolicy.getNumParityUnits();
+  private final int cellSize = ecPolicy.getCellSize();
+  private final int stripesPerBlock = 4;
+  private final int blockSize = cellSize * stripesPerBlock;
+  private final int blockGroupSize = blockSize * dataBlocks;
 
   private static final int FLUSH_POS =
       9 * DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT + 1;
 
-  static {
-    System.out.println("NUM_DATA_BLOCKS  = " + NUM_DATA_BLOCKS);
-    System.out.println("NUM_PARITY_BLOCKS= " + NUM_PARITY_BLOCKS);
-    System.out.println("CELL_SIZE        = " + CELL_SIZE
-        + " (=" + StringUtils.TraditionalBinaryPrefix.long2String(CELL_SIZE, "B", 2) + ")");
-    System.out.println("BLOCK_SIZE       = " + BLOCK_SIZE
-        + " (=" + StringUtils.TraditionalBinaryPrefix.long2String(BLOCK_SIZE, "B", 2) + ")");
-    System.out.println("BLOCK_GROUP_SIZE = " + BLOCK_GROUP_SIZE
-        + " (=" + StringUtils.TraditionalBinaryPrefix.long2String(BLOCK_GROUP_SIZE, "B", 2) + ")");
-  }
-
-  static List<Integer> newLengths() {
-    final List<Integer> lengths = new ArrayList<>();
-    lengths.add(FLUSH_POS + 2);
+  List<Integer> newLengths() {
+    final List<Integer> lens = new ArrayList<>();
+    lens.add(FLUSH_POS + 2);
     for(int b = 0; b <= 2; b++) {
-      for(int c = 0; c < STRIPES_PER_BLOCK*NUM_DATA_BLOCKS; c++) {
+      for(int c = 0; c < stripesPerBlock * dataBlocks; c++) {
         for(int delta = -1; delta <= 1; delta++) {
-          final int length = b*BLOCK_GROUP_SIZE + c*CELL_SIZE + delta;
-          System.out.println(lengths.size() + ": length=" + length
+          final int length = b * blockGroupSize + c * cellSize + delta;
+          System.out.println(lens.size() + ": length=" + length
               + ", (b, c, d) = (" + b + ", " + c + ", " + delta + ")");
-          lengths.add(length);
+          lens.add(length);
         }
       }
     }
-    return lengths;
+    return lens;
   }
 
-  private static final int[][] dnIndexSuite = getDnIndexSuite();
+  private final int[][] dnIndexSuite = getDnIndexSuite();
 
-  private static int[][] getDnIndexSuite() {
+  private int[][] getDnIndexSuite() {
     final int maxNumLevel = 2;
     final int maxPerLevel = 8;
     List<List<Integer>> allLists = new ArrayList<>();
-    int numIndex = NUM_PARITY_BLOCKS;
+    int numIndex = parityBlocks;
     for (int i = 0; i < maxNumLevel && numIndex > 1; i++) {
       List<List<Integer>> lists =
-          combinations(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS, numIndex);
+          combinations(dataBlocks + parityBlocks, numIndex);
       if (lists.size() > maxPerLevel) {
         Collections.shuffle(lists);
         lists = lists.subList(0, maxPerLevel);
@@ -125,15 +121,15 @@ public class TestDFSStripedOutputStreamWithFailure {
       allLists.addAll(lists);
       numIndex--;
     }
-    int[][] dnIndexSuite = new int[allLists.size()][];
-    for (int i = 0; i < dnIndexSuite.length; i++) {
+    int[][] dnIndexArray = new int[allLists.size()][];
+    for (int i = 0; i < dnIndexArray.length; i++) {
       int[] list = new int[allLists.get(i).size()];
       for (int j = 0; j < list.length; j++) {
         list[j] = allLists.get(i).get(j);
       }
-      dnIndexSuite[i] = list;
+      dnIndexArray[i] = list;
     }
-    return dnIndexSuite;
+    return dnIndexArray;
   }
 
   // get all combinations of k integers from {0,...,n-1}
@@ -171,10 +167,10 @@ public class TestDFSStripedOutputStreamWithFailure {
     return positions;
   }
 
-  private static final List<Integer> LENGTHS = newLengths();
+  private final List<Integer> lengths = newLengths();
 
-  static Integer getLength(int i) {
-    return i >= 0 && i < LENGTHS.size()? LENGTHS.get(i): null;
+  Integer getLength(int i) {
+    return i >= 0 && i < lengths.size()? lengths.get(i): null;
   }
 
   private static final Random RANDOM = new Random();
@@ -185,7 +181,18 @@ public class TestDFSStripedOutputStreamWithFailure {
       + TestDFSStripedOutputStreamWithFailure.class.getSimpleName());
 
   private void setup(Configuration conf) throws IOException {
-    final int numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
+    System.out.println("NUM_DATA_BLOCKS  = " + dataBlocks);
+    System.out.println("NUM_PARITY_BLOCKS= " + parityBlocks);
+    System.out.println("CELL_SIZE        = " + cellSize + " (=" +
+        StringUtils.TraditionalBinaryPrefix.long2String(cellSize, "B", 2)
+        + ")");
+    System.out.println("BLOCK_SIZE       = " + blockSize + " (=" +
+        StringUtils.TraditionalBinaryPrefix.long2String(blockSize, "B", 2)
+        + ")");
+    System.out.println("BLOCK_GROUP_SIZE = " + blockGroupSize + " (=" +
+        StringUtils.TraditionalBinaryPrefix.long2String(blockGroupSize, "B", 2)
+        + ")");
+    final int numDNs = dataBlocks + parityBlocks;
     if (ErasureCodeNative.isNativeCodeLoaded()) {
       conf.set(
           CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
@@ -195,7 +202,7 @@ public class TestDFSStripedOutputStreamWithFailure {
     cluster.waitActive();
     dfs = cluster.getFileSystem();
     dfs.mkdirs(dir);
-    dfs.setErasureCodingPolicy(dir, null);
+    dfs.setErasureCodingPolicy(dir, ecPolicy);
   }
 
   private void tearDown() {
@@ -206,7 +213,7 @@ public class TestDFSStripedOutputStreamWithFailure {
 
   private HdfsConfiguration newHdfsConfiguration() {
     final HdfsConfiguration conf = new HdfsConfiguration();
-    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
         false);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
@@ -220,12 +227,12 @@ public class TestDFSStripedOutputStreamWithFailure {
   }
 
   /**
-   * Randomly pick a length and run tests with multiple data failures
+   * Randomly pick a length and run tests with multiple data failures.
    * TODO: enable this later
    */
   //@Test(timeout=240000)
   public void testMultipleDatanodeFailureRandomLength() throws Exception {
-    int lenIndex = RANDOM.nextInt(LENGTHS.size());
+    int lenIndex = RANDOM.nextInt(lengths.size());
     LOG.info("run testMultipleDatanodeFailureRandomLength with length index: "
         + lenIndex);
     runTestWithMultipleFailure(getLength(lenIndex));
@@ -233,7 +240,7 @@ public class TestDFSStripedOutputStreamWithFailure {
 
   @Test(timeout=240000)
   public void testBlockTokenExpired() throws Exception {
-    final int length = NUM_DATA_BLOCKS * (BLOCK_SIZE - CELL_SIZE);
+    final int length = dataBlocks * (blockSize - cellSize);
     final HdfsConfiguration conf = newHdfsConfiguration();
 
     conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
@@ -241,7 +248,7 @@ public class TestDFSStripedOutputStreamWithFailure {
         CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
     // Set short retry timeouts so this test runs faster
     conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
-    for (int dn = 0; dn < NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS; dn += 2) {
+    for (int dn = 0; dn < dataBlocks + parityBlocks; dn += 2) {
       try {
         setup(conf);
         runTest(length, new int[]{length / 2}, new int[]{dn}, true);
@@ -258,20 +265,21 @@ public class TestDFSStripedOutputStreamWithFailure {
   public void testAddBlockWhenNoSufficientDataBlockNumOfNodes()
       throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     try {
       setup(conf);
       ArrayList<DataNode> dataNodes = cluster.getDataNodes();
       // shutdown few datanodes to avoid getting sufficient data blocks number
       // of datanodes
       int numDatanodes = dataNodes.size();
-      while (numDatanodes >= NUM_DATA_BLOCKS) {
+      while (numDatanodes >= dataBlocks) {
         cluster.stopDataNode(0);
         numDatanodes--;
       }
       cluster.restartNameNodes();
       cluster.triggerHeartbeats();
-      DatanodeInfo[] info = dfs.getClient().datanodeReport(DatanodeReportType.LIVE);
+      DatanodeInfo[] info = dfs.getClient().datanodeReport(
+          DatanodeReportType.LIVE);
       assertEquals("Mismatches number of live Dns ", numDatanodes, info.length);
       final Path dirFile = new Path(dir, "ecfile");
       FSDataOutputStream out;
@@ -284,8 +292,8 @@ public class TestDFSStripedOutputStreamWithFailure {
       } catch (IOException ioe) {
         // expected
         GenericTestUtils.assertExceptionContains("Failed to get " +
-            NUM_DATA_BLOCKS + " nodes from namenode: blockGroupSize= " +
-            (NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS) + ", blocks.length= " +
+            dataBlocks + " nodes from namenode: blockGroupSize= " +
+            (dataBlocks + parityBlocks) + ", blocks.length= " +
             numDatanodes, ioe);
       }
     } finally {
@@ -294,14 +302,15 @@ public class TestDFSStripedOutputStreamWithFailure {
   }
 
   @Test(timeout = 90000)
-  public void testAddBlockWhenNoSufficientParityNumOfNodes() throws IOException {
+  public void testAddBlockWhenNoSufficientParityNumOfNodes()
+      throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     try {
       setup(conf);
       ArrayList<DataNode> dataNodes = cluster.getDataNodes();
       // shutdown few data nodes to avoid writing parity blocks
-      int killDns = (NUM_PARITY_BLOCKS - 1);
+      int killDns = (parityBlocks - 1);
       int numDatanodes = dataNodes.size() - killDns;
       for (int i = 0; i < killDns; i++) {
         cluster.stopDataNode(i);
@@ -312,11 +321,12 @@ public class TestDFSStripedOutputStreamWithFailure {
           DatanodeReportType.LIVE);
       assertEquals("Mismatches number of live Dns ", numDatanodes, info.length);
       Path srcPath = new Path(dir, "testAddBlockWhenNoSufficientParityNodes");
-      int fileLength = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE - 1000;
+      int fileLength = cellSize - 1000;
       final byte[] expected = StripedFileTestUtil.generateBytes(fileLength);
       DFSTestUtil.writeFile(dfs, srcPath, new String(expected));
       LOG.info("writing finished. Seek and read the file to verify.");
-      StripedFileTestUtil.verifySeek(dfs, srcPath, fileLength);
+      StripedFileTestUtil.verifySeek(dfs, srcPath, fileLength, ecPolicy,
+          blockGroupSize);
     } finally {
       tearDown();
     }
@@ -324,7 +334,7 @@ public class TestDFSStripedOutputStreamWithFailure {
 
   void runTest(final int length) {
     final HdfsConfiguration conf = newHdfsConfiguration();
-    for (int dn = 0; dn < NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS; dn++) {
+    for (int dn = 0; dn < dataBlocks + parityBlocks; dn++) {
       try {
         LOG.info("runTest: dn=" + dn + ", length=" + length);
         setup(conf);
@@ -346,7 +356,8 @@ public class TestDFSStripedOutputStreamWithFailure {
       int[] killPos = getKillPositions(length, dnIndex.length);
       try {
         LOG.info("runTestWithMultipleFailure: length==" + length + ", killPos="
-            + Arrays.toString(killPos) + ", dnIndex=" + Arrays.toString(dnIndex));
+            + Arrays.toString(killPos) + ", dnIndex="
+            + Arrays.toString(dnIndex));
         setup(conf);
         runTest(length, killPos, dnIndex, false);
       } catch (Throwable e) {
@@ -361,7 +372,7 @@ public class TestDFSStripedOutputStreamWithFailure {
   }
 
   /**
-   * runTest implementation
+   * runTest implementation.
    * @param length file length
    * @param killPos killing positions in ascending order
    * @param dnIndex DN index to kill when meets killing positions
@@ -371,8 +382,9 @@ public class TestDFSStripedOutputStreamWithFailure {
   private void runTest(final int length, final int[] killPos,
       final int[] dnIndex, final boolean tokenExpire) throws Exception {
     if (killPos[0] <= FLUSH_POS) {
-      LOG.warn("killPos=" + Arrays.toString(killPos) + " <= FLUSH_POS=" + FLUSH_POS
-          + ", length=" + length + ", dnIndex=" + Arrays.toString(dnIndex));
+      LOG.warn("killPos=" + Arrays.toString(killPos) + " <= FLUSH_POS="
+          + FLUSH_POS + ", length=" + length + ", dnIndex="
+          + Arrays.toString(dnIndex));
       return; //skip test
     }
     Preconditions.checkArgument(length > killPos[0], "length=%s <= killPos=%s",
@@ -398,12 +410,13 @@ public class TestDFSStripedOutputStreamWithFailure {
     final DFSStripedOutputStream stripedOut
         = (DFSStripedOutputStream)out.getWrappedStream();
 
-    long firstGS = -1;  // first GS of this block group which never proceeds blockRecovery
+    // first GS of this block group which never proceeds blockRecovery
+    long firstGS = -1;
     long oldGS = -1; // the old GS before bumping
     List<Long> gsList = new ArrayList<>();
     final List<DatanodeInfo> killedDN = new ArrayList<>();
     int numKilled=0;
-    for(; pos.get() < length; ) {
+    for(; pos.get() < length;) {
       final int i = pos.getAndIncrement();
       if (numKilled < killPos.length &&  i == killPos[numKilled]) {
         assertTrue(firstGS != -1);
@@ -421,17 +434,18 @@ public class TestDFSStripedOutputStreamWithFailure {
           waitTokenExpires(out);
         }
 
-        killedDN.add(killDatanode(cluster, stripedOut, dnIndex[numKilled], pos));
+        killedDN.add(
+            killDatanode(cluster, stripedOut, dnIndex[numKilled], pos));
         numKilled++;
       }
 
       write(out, i);
 
-      if (i % BLOCK_GROUP_SIZE == FLUSH_POS) {
+      if (i % blockGroupSize == FLUSH_POS) {
         firstGS = getGenerationStamp(stripedOut);
         oldGS = firstGS;
       }
-      if (i > 0 && (i + 1) % BLOCK_GROUP_SIZE == 0) {
+      if (i > 0 && (i + 1) % blockGroupSize == 0) {
         gsList.add(oldGS);
       }
     }
@@ -442,7 +456,8 @@ public class TestDFSStripedOutputStreamWithFailure {
     StripedFileTestUtil.waitBlockGroupsReported(dfs, fullPath, numKilled);
 
     cluster.triggerBlockReports();
-    StripedFileTestUtil.checkData(dfs, p, length, killedDN, gsList);
+    StripedFileTestUtil.checkData(dfs, p, length, killedDN, gsList,
+        blockGroupSize);
   }
 
   static void write(FSDataOutputStream out, int i) throws IOException {
@@ -508,43 +523,85 @@ public class TestDFSStripedOutputStreamWithFailure {
     }
   }
 
-  public static abstract class TestBase {
-    static final long TIMEOUT = 240000;
-
-    int getBase() {
-      final String name = getClass().getSimpleName();
-      int i = name.length() - 1;
-      for(; i >= 0 && Character.isDigit(name.charAt(i)); i--);
-      return Integer.parseInt(name.substring(i + 1));
+  int getBase() {
+    final String name = getClass().getSimpleName();
+    int i = name.length() - 1;
+    for(; i >= 0 && Character.isDigit(name.charAt(i));){
+      i--;
     }
+    String number = name.substring(i + 1);
+    try {
+      return Integer.parseInt(number);
+    } catch (Exception e) {
+      return -1;
+    }
+  }
 
-    private final TestDFSStripedOutputStreamWithFailure test
-        = new TestDFSStripedOutputStreamWithFailure();
-    private void run(int offset) {
-      final int i = offset + getBase();
-      final Integer length = getLength(i);
-      if (length == null) {
-        System.out.println("Skip test " + i + " since length=null.");
-        return;
-      }
-      if (RANDOM.nextInt(16) != 0) {
-        System.out.println("Test " + i + ", length=" + length
-            + ", is not chosen to run.");
-        return;
-      }
-      System.out.println("Run test " + i + ", length=" + length);
-      test.runTest(length);
+  private void run(int offset) {
+    int base = getBase();
+    Assume.assumeTrue(base >= 0);
+    final int i = offset + base;
+    final Integer length = getLength(i);
+    if (length == null) {
+      System.out.println("Skip test " + i + " since length=null.");
+      return;
     }
+    if (RANDOM.nextInt(16) != 0) {
+      System.out.println("Test " + i + ", length=" + length
+          + ", is not chosen to run.");
+      return;
+    }
+    System.out.println("Run test " + i + ", length=" + length);
+    runTest(length);
+  }
+
+  @Test(timeout = 240000)
+  public void test0() {
+    run(0);
+  }
+
+  @Test(timeout = 240000)
+  public void test1() {
+    run(1);
+  }
+
+  @Test(timeout = 240000)
+  public void test2() {
+    run(2);
+  }
+
+  @Test(timeout = 240000)
+  public void test3() {
+    run(3);
+  }
+
+  @Test(timeout = 240000)
+  public void test4() {
+    run(4);
+  }
+
+  @Test(timeout = 240000)
+  public void test5() {
+    run(5);
+  }
+
+  @Test(timeout = 240000)
+  public void test6() {
+    run(6);
+  }
+
+  @Test(timeout = 240000)
+  public void test7() {
+    run(7);
+  }
+
+  @Test(timeout = 240000)
+  public void test8() {
+    run(8);
+  }
 
-    @Test(timeout=TIMEOUT) public void test0() {run(0);}
-    @Test(timeout=TIMEOUT) public void test1() {run(1);}
-    @Test(timeout=TIMEOUT) public void test2() {run(2);}
-    @Test(timeout=TIMEOUT) public void test3() {run(3);}
-    @Test(timeout=TIMEOUT) public void test4() {run(4);}
-    @Test(timeout=TIMEOUT) public void test5() {run(5);}
-    @Test(timeout=TIMEOUT) public void test6() {run(6);}
-    @Test(timeout=TIMEOUT) public void test7() {run(7);}
-    @Test(timeout=TIMEOUT) public void test8() {run(8);}
-    @Test(timeout=TIMEOUT) public void test9() {run(9);}
+  @Test(timeout = 240000)
+  public void test9() {
+    run(9);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure000.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure000.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure000.java
index b4fb1b8..44dd584 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure000.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure000.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure000 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure000
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure010.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure010.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure010.java
index 8489c3d..15261dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure010.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure010.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure010 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure010
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure020.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure020.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure020.java
index 21feee6..7c0586a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure020.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure020.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure020 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure020
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure030.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure030.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure030.java
index 01e6e73..0d3948a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure030.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure030.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure030 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure030
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure040.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure040.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure040.java
index 8519fed..96e067a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure040.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure040.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure040 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure040
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure050.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure050.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure050.java
index d750cbf..c75f78f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure050.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure050.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure050 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure050
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure060.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure060.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure060.java
index cb8ce23..ea53da2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure060.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure060.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure060 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure060
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure070.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure070.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure070.java
index ee72c92..31a777a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure070.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure070.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure070 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure070
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure080.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure080.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure080.java
index 90ff587..a1bb14f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure080.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure080.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure080 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure080
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure090.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure090.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure090.java
index ce56cd2..52275d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure090.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure090.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure090 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure090
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure100.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure100.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure100.java
index d63c19d..4d08487 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure100.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure100.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure100 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure100
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure110.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure110.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure110.java
index a590623..f03efa6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure110.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure110.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure110 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure110
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure120.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure120.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure120.java
index 0e641ff..b8b8b28 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure120.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure120.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure120 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure120
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure130.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure130.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure130.java
index e84ad1f..dada985 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure130.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure130.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure130 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure130
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure140.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure140.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure140.java
index b128c85..3118d72 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure140.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure140.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure140 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure140
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure150.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure150.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure150.java
index 41940af..8008ad7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure150.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure150.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure150 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure150
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure160.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure160.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure160.java
index 8e6c39b..0a19041 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure160.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure160.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure160 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure160
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure170.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure170.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure170.java
index d54be46..0a669c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure170.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure170.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure170 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure170
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure180.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure180.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure180.java
index 93f00b4..364781e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure180.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure180.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure180 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure180
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure190.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure190.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure190.java
index 04f5e0d..d6339d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure190.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure190.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure190 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure190
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure200.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure200.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure200.java
index 196d743..7e419d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure200.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure200.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure200 extends TestBase {}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure200
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure210.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure210.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure210.java
index ef5d65b..bf7cebb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure210.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure210.java
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
-
-public class TestDFSStripedOutputStreamWithFailure210 extends TestBase {
-}
\ No newline at end of file
+/**
+ * Test striped file write operation with data node failures.
+ */
+public class TestDFSStripedOutputStreamWithFailure210
+    extends TestDFSStripedOutputStreamWithFailure {}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message