hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jiten...@apache.org
Subject svn commit: r1076426 - in /hadoop/hdfs/branches/HDFS-1052: CHANGES.txt src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
Date Wed, 02 Mar 2011 22:06:26 GMT
Author: jitendra
Date: Wed Mar  2 22:06:25 2011
New Revision: 1076426

URL: http://svn.apache.org/viewvc?rev=1076426&view=rev
Log:
Federation: TestOverReplicatedBlocks and TestWriteToReplica failing. Contributed by jhoman
and jitendra.

Modified:
    hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java

Modified: hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/CHANGES.txt?rev=1076426&r1=1076425&r2=1076426&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-1052/CHANGES.txt Wed Mar  2 22:06:25 2011
@@ -138,6 +138,9 @@ Trunk (unreleased changes)
     HDFS:1699. Federation: Fix failure of TestBlockReport.
     (Matt Foley via suresh)
 
+    HDFS-1698. Federation: TestOverReplicatedBlocks and TestWriteToReplica 
+    failing. (jhoman and jitendra)
+
   IMPROVEMENTS
 
     HDFS-1510. Added test-patch.properties required by test-patch.sh (nigel)

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java?rev=1076426&r1=1076425&r2=1076426&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
Wed Mar  2 22:06:25 2011
@@ -24,18 +24,12 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-
 import org.junit.Assert;
 import org.junit.Test;
 
 /** Test if FSDataset#append, writeToRbw, and writeToTmp */
 public class TestWriteToReplica {
-  static String bpid = "BP-TEST";
-  final private static ExtendedBlock[] blocks = new ExtendedBlock[] {
-    new ExtendedBlock(bpid, 1, 1, 2001), new ExtendedBlock(bpid, 2, 1, 2002), 
-    new ExtendedBlock(bpid, 3, 1, 2003), new ExtendedBlock(bpid, 4, 1, 2004),
-    new ExtendedBlock(bpid, 5, 1, 2005), new ExtendedBlock(bpid, 6, 1, 2006)
-  };
+
   final private static int FINALIZED = 0;
   final private static int TEMPORARY = 1;
   final private static int RBW = 2;
@@ -47,6 +41,7 @@ public class TestWriteToReplica {
   @Test
   public void testClose() throws Exception {
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
+    
     try {
       cluster.waitActive();
       DataNode dn = cluster.getDataNodes().get(0);
@@ -54,10 +49,11 @@ public class TestWriteToReplica {
 
       // set up replicasMap
       String bpid = cluster.getNamesystem().getBlockPoolId();
-      setup(bpid, dataSet);
+      
+      ExtendedBlock[] blocks = setup(bpid, dataSet);
 
       // test close
-      testClose(dataSet);
+      testClose(dataSet, blocks);
     } finally {
       cluster.shutdown();
     }
@@ -74,10 +70,10 @@ public class TestWriteToReplica {
 
       // set up replicasMap
       String bpid = cluster.getNamesystem().getBlockPoolId();
-      setup(bpid, dataSet);
+      ExtendedBlock[] blocks = setup(bpid, dataSet);
 
       // test append
-      testAppend(bpid, dataSet);
+      testAppend(bpid, dataSet, blocks);
     } finally {
       cluster.shutdown();
     }
@@ -94,10 +90,10 @@ public class TestWriteToReplica {
 
       // set up replicasMap
       String bpid = cluster.getNamesystem().getBlockPoolId();
-      setup(bpid, dataSet);
+      ExtendedBlock[] blocks = setup(bpid, dataSet);
 
       // test writeToRbw
-      testWriteToRbw(dataSet);
+      testWriteToRbw(dataSet, blocks);
     } finally {
       cluster.shutdown();
     }
@@ -114,17 +110,33 @@ public class TestWriteToReplica {
 
       // set up replicasMap
       String bpid = cluster.getNamesystem().getBlockPoolId();
-      setup(bpid, dataSet);
+      ExtendedBlock[] blocks = setup(bpid, dataSet);
 
       // test writeToTemporary
-      testWriteToTemporary(dataSet);
+      testWriteToTemporary(dataSet, blocks);
     } finally {
       cluster.shutdown();
     }
   }
   
-  private void setup(String bpid, FSDataset dataSet) throws IOException {
+  /**
+   * Generate testing environment and return a collection of blocks
+   * on which to run the tests.
+   * 
+   * @param bpid Block pool ID to generate blocks for
+   * @param dataSet Namespace in which to insert blocks
+   * @return Contrived blocks for further testing.
+   * @throws IOException
+   */
+  private ExtendedBlock[] setup(String bpid, FSDataset dataSet) throws IOException {
     // setup replicas map
+    
+    ExtendedBlock[] blocks = new ExtendedBlock[] {
+        new ExtendedBlock(bpid, 1, 1, 2001), new ExtendedBlock(bpid, 2, 1, 2002), 
+        new ExtendedBlock(bpid, 3, 1, 2003), new ExtendedBlock(bpid, 4, 1, 2004),
+        new ExtendedBlock(bpid, 5, 1, 2005), new ExtendedBlock(bpid, 6, 1, 2006)
+    };
+    
     ReplicasMap replicasMap = dataSet.volumeMap;
     FSVolume vol = dataSet.volumes.getNextVolume(0);
     ReplicaInfo replicaInfo = new FinalizedReplica(
@@ -149,9 +161,11 @@ public class TestWriteToReplica {
             blocks[RWR].getLocalBlock()).getParentFile()));
     replicasMap.add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR]
         .getLocalBlock(), vol, vol.getDir()), 2007));    
+    
+    return blocks;
   }
   
-  private void testAppend(String bpid, FSDataset dataSet) throws IOException {
+  private void testAppend(String bpid, FSDataset dataSet, ExtendedBlock[] blocks) throws
IOException {
     long newGS = blocks[FINALIZED].getGenerationStamp()+1;
     FSVolume v = dataSet.volumeMap.get(bpid, blocks[FINALIZED].getLocalBlock())
         .getVolume();
@@ -271,7 +285,7 @@ public class TestWriteToReplica {
     }
   }
 
-  private void testClose(FSDataset dataSet) throws IOException {
+  private void testClose(FSDataset dataSet, ExtendedBlock [] blocks) throws IOException {
     long newGS = blocks[FINALIZED].getGenerationStamp()+1;
     dataSet.recoverClose(blocks[FINALIZED], newGS, 
         blocks[FINALIZED].getNumBytes());  // successful
@@ -321,7 +335,7 @@ public class TestWriteToReplica {
     }
   }
   
-  private void testWriteToRbw(FSDataset dataSet) throws IOException {
+  private void testWriteToRbw(FSDataset dataSet, ExtendedBlock[] blocks) throws IOException
{
     try {
       dataSet.recoverRbw(blocks[FINALIZED],
           blocks[FINALIZED].getGenerationStamp()+1,
@@ -414,7 +428,7 @@ public class TestWriteToReplica {
     dataSet.createRbw(blocks[NON_EXISTENT]);
   }
   
-  private void testWriteToTemporary(FSDataset dataSet) throws IOException {
+  private void testWriteToTemporary(FSDataset dataSet, ExtendedBlock[] blocks) throws IOException
{
     try {
       dataSet.createTemporary(blocks[FINALIZED]);
       Assert.fail("Should not have created a temporary replica that was " +

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java?rev=1076426&r1=1076425&r2=1076426&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
(original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
Wed Mar  2 22:06:25 2011
@@ -60,12 +60,10 @@ public class TestOverReplicatedBlocks ex
       TestDatanodeBlockScanner.corruptReplica(block, 0);
       DataNodeProperties dnProps = cluster.stopDataNode(0);
       // remove block scanner log to trigger block scanning
-      // TODO:FEDERATION needs change when data block scanner is changed
-      // TODO:FEDERATION remove finalzied_dir_name and use methods in MiniDFSCluster
-      final String finalized_dir_name = "/current/finalized/";
-      File scanLog = new File(System.getProperty("test.build.data"),
-          "dfs/data/data1" + finalized_dir_name + 
-          "dncp_block_verification.log.curr");
+      File scanLog = new File(MiniDFSCluster.getFinalizedDir(
+          MiniDFSCluster.getStorageDir(0, 0),
+          cluster.getNamesystem().getBlockPoolId()).getParent().toString()
+          + "/../dncp_block_verification.log.prev");
       //wait for one minute for deletion to succeed;
       for(int i=0; !scanLog.delete(); i++) {
         assertTrue("Could not delete log file in one minute", i < 60);



Mime
View raw message