hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1337003 [2/2] - in /hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs: ./ src/contrib/fuse-dfs/src/ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/security/token/block/ src/main/jav...
Date Fri, 11 May 2012 02:05:35 GMT
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
(original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
Fri May 11 02:05:31 2012
@@ -26,6 +26,7 @@ import java.util.Map.Entry;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -47,17 +48,10 @@ import com.google.common.collect.LinkedL
 import com.google.common.collect.Lists;
 
 public class TestBlockManager {
-  private final List<DatanodeDescriptor> nodes = ImmutableList.of( 
-      new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"),
-      new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"),
-      new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"),
-      new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackB"),
-      new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackB"),
-      new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackB")
-    );
-  private final List<DatanodeDescriptor> rackA = nodes.subList(0, 3);
-  private final List<DatanodeDescriptor> rackB = nodes.subList(3, 6);
-  
+  private List<DatanodeDescriptor> nodes;
+  private List<DatanodeDescriptor> rackA;
+  private List<DatanodeDescriptor> rackB;
+
   /**
    * Some of these tests exercise code which has some randomness involved -
    * ie even if there's a bug, they may pass because the random node selection
@@ -82,6 +76,16 @@ public class TestBlockManager {
     fsn = Mockito.mock(FSNamesystem.class);
     Mockito.doReturn(true).when(fsn).hasWriteLock();
     bm = new BlockManager(fsn, fsn, conf);
+    nodes = ImmutableList.of(
+        DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"),
+        DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"),
+        DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"),
+        DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackB"),
+        DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackB"),
+        DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackB")
+      );
+    rackA = nodes.subList(0, 3);
+    rackB = nodes.subList(3, 6);
   }
   
   private void addNodes(Iterable<DatanodeDescriptor> nodesToAdd) {
@@ -116,7 +120,7 @@ public class TestBlockManager {
   }
   
   private void doBasicTest(int testIndex) {
-    List<DatanodeDescriptor> origNodes = nodes(0, 1);
+    List<DatanodeDescriptor> origNodes = getNodes(0, 1);
     BlockInfo blockInfo = addBlockOnNodes((long)testIndex, origNodes);
 
     DatanodeDescriptor[] pipeline = scheduleSingleReplication(blockInfo);
@@ -147,7 +151,7 @@ public class TestBlockManager {
   
   private void doTestTwoOfThreeNodesDecommissioned(int testIndex) throws Exception {
     // Block originally on A1, A2, B1
-    List<DatanodeDescriptor> origNodes = nodes(0, 1, 3);
+    List<DatanodeDescriptor> origNodes = getNodes(0, 1, 3);
     BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
     
     // Decommission two of the nodes (A1, A2)
@@ -157,7 +161,7 @@ public class TestBlockManager {
     assertTrue("Source of replication should be one of the nodes the block " +
         "was on. Was: " + pipeline[0],
         origNodes.contains(pipeline[0]));
-    assertEquals("Should have two targets", 3, pipeline.length);
+    assertEquals("Should have three targets", 3, pipeline.length);
     
     boolean foundOneOnRackA = false;
     for (int i = 1; i < pipeline.length; i++) {
@@ -190,7 +194,7 @@ public class TestBlockManager {
 
   private void doTestAllNodesHoldingReplicasDecommissioned(int testIndex) throws Exception
{
     // Block originally on A1, A2, B1
-    List<DatanodeDescriptor> origNodes = nodes(0, 1, 3);
+    List<DatanodeDescriptor> origNodes = getNodes(0, 1, 3);
     BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
     
     // Decommission all of the nodes
@@ -242,7 +246,7 @@ public class TestBlockManager {
   
   private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception {
     // Block originally on A1, A2, B1
-    List<DatanodeDescriptor> origNodes = nodes(0, 1, 3);
+    List<DatanodeDescriptor> origNodes = getNodes(0, 1, 3);
     BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
     
     // Decommission all of the nodes in rack A
@@ -252,7 +256,7 @@ public class TestBlockManager {
     assertTrue("Source of replication should be one of the nodes the block " +
         "was on. Was: " + pipeline[0],
         origNodes.contains(pipeline[0]));
-    assertEquals("Should have 2 targets", 3, pipeline.length);
+    assertEquals("Should have three targets", 3, pipeline.length);
     
     boolean foundOneOnRackB = false;
     for (int i = 1; i < pipeline.length; i++) {
@@ -273,7 +277,8 @@ public class TestBlockManager {
 
     // the block is still under-replicated. Add a new node. This should allow
     // the third off-rack replica.
-    DatanodeDescriptor rackCNode = new DatanodeDescriptor(new DatanodeID("h7", 100), "/rackC");
+    DatanodeDescriptor rackCNode =
+      DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/rackC");
     addNodes(ImmutableList.of(rackCNode));
     try {
       DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo);
@@ -313,13 +318,13 @@ public class TestBlockManager {
   
   @Test
   public void testBlocksAreNotUnderreplicatedInSingleRack() throws Exception {
-    List<DatanodeDescriptor> nodes = ImmutableList.of( 
-        new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"),
-        new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"),
-        new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"),
-        new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackA"),
-        new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackA"),
-        new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackA")
+    List<DatanodeDescriptor> nodes = ImmutableList.of(
+        DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"),
+        DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"),
+        DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"),
+        DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackA"),
+        DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackA"),
+        DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackA")
       );
     addNodes(nodes);
     List<DatanodeDescriptor> origNodes = nodes.subList(0, 3);;
@@ -359,7 +364,7 @@ public class TestBlockManager {
     return blockInfo;
   }
 
-  private List<DatanodeDescriptor> nodes(int ... indexes) {
+  private List<DatanodeDescriptor> getNodes(int ... indexes) {
     List<DatanodeDescriptor> ret = Lists.newArrayList();
     for (int idx : indexes) {
       ret.add(nodes.get(idx));
@@ -368,7 +373,7 @@ public class TestBlockManager {
   }
   
   private List<DatanodeDescriptor> startDecommission(int ... indexes) {
-    List<DatanodeDescriptor> nodes = nodes(indexes);
+    List<DatanodeDescriptor> nodes = getNodes(indexes);
     for (DatanodeDescriptor node : nodes) {
       node.startDecommission();
     }
@@ -380,7 +385,7 @@ public class TestBlockManager {
     Mockito.doReturn((short)3).when(iNode).getReplication();
     BlockInfo blockInfo = blockOnNodes(blockId, nodes);
 
-    bm.blocksMap.addINode(blockInfo, iNode);
+    bm.blocksMap.addBlockCollection(blockInfo, iNode);
     return blockInfo;
   }
 
@@ -404,8 +409,9 @@ public class TestBlockManager {
 
     LinkedListMultimap<DatanodeDescriptor, BlockTargetPair> repls = getAllPendingReplications();
     assertEquals(1, repls.size());
-    Entry<DatanodeDescriptor, BlockTargetPair> repl = repls.entries()
-        .iterator().next();
+    Entry<DatanodeDescriptor, BlockTargetPair> repl =
+      repls.entries().iterator().next();
+        
     DatanodeDescriptor[] targets = repl.getValue().targets;
 
     DatanodeDescriptor[] pipeline = new DatanodeDescriptor[1 + targets.length];

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
(original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
Fri May 11 02:05:31 2012
@@ -18,73 +18,75 @@
 
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.junit.Before;
 import org.junit.Test;
 
+import static org.junit.Assert.*;
+
 public class TestHost2NodesMap {
   private Host2NodesMap map = new Host2NodesMap();
-  private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
-    new DatanodeDescriptor(new DatanodeID("ip1", "h1", "", 5020, -1, -1), "/d1/r1"),
-    new DatanodeDescriptor(new DatanodeID("ip2", "h1", "", 5020, -1, -1), "/d1/r1"),
-    new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5020, -1, -1), "/d1/r2"),
-    new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5030, -1, -1), "/d1/r2"),
-  };
-  private final DatanodeDescriptor NULL_NODE = null; 
-  private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3", 5040),
-      "/d1/r4");
-
+  private DatanodeDescriptor dataNodes[];
+  
   @Before
   public void setup() {
-    for(DatanodeDescriptor node:dataNodes) {
+    dataNodes = new DatanodeDescriptor[] {
+        DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
+        DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
+        DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
+        DFSTestUtil.getDatanodeDescriptor("3.3.3.3", 5021, "/d1/r2"),
+    };
+    for (DatanodeDescriptor node : dataNodes) {
       map.add(node);
     }
-    map.add(NULL_NODE);
+    map.add(null);
   }
   
   @Test
   public void testContains() throws Exception {
-    for(int i=0; i<dataNodes.length; i++) {
+    DatanodeDescriptor nodeNotInMap =
+      DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4");
+    for (int i = 0; i < dataNodes.length; i++) {
       assertTrue(map.contains(dataNodes[i]));
     }
-    assertFalse(map.contains(NULL_NODE));
-    assertFalse(map.contains(NODE));
+    assertFalse(map.contains(null));
+    assertFalse(map.contains(nodeNotInMap));
   }
 
   @Test
   public void testGetDatanodeByHost() throws Exception {
-    assertTrue(map.getDatanodeByHost("ip1")==dataNodes[0]);
-    assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
-    DatanodeDescriptor node = map.getDatanodeByHost("ip3");
-    assertTrue(node==dataNodes[2] || node==dataNodes[3]);
-    assertTrue(null==map.getDatanodeByHost("ip4"));
+    assertEquals(map.getDatanodeByHost("1.1.1.1"), dataNodes[0]);
+    assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
+    DatanodeDescriptor node = map.getDatanodeByHost("3.3.3.3");
+    assertTrue(node == dataNodes[2] || node == dataNodes[3]);
+    assertNull(map.getDatanodeByHost("4.4.4.4"));
   }
 
   @Test
   public void testRemove() throws Exception {
-    assertFalse(map.remove(NODE));
+    DatanodeDescriptor nodeNotInMap =
+      DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4");
+    assertFalse(map.remove(nodeNotInMap));
     
     assertTrue(map.remove(dataNodes[0]));
-    assertTrue(map.getDatanodeByHost("ip1")==null);
-    assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
-    DatanodeDescriptor node = map.getDatanodeByHost("ip3");
+    assertTrue(map.getDatanodeByHost("1.1.1.1.")==null);
+    assertTrue(map.getDatanodeByHost("2.2.2.2")==dataNodes[1]);
+    DatanodeDescriptor node = map.getDatanodeByHost("3.3.3.3");
     assertTrue(node==dataNodes[2] || node==dataNodes[3]);
-    assertTrue(null==map.getDatanodeByHost("ip4"));
+    assertNull(map.getDatanodeByHost("4.4.4.4"));
     
     assertTrue(map.remove(dataNodes[2]));
-    assertTrue(map.getDatanodeByHost("ip1")==null);
-    assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
-    assertTrue(map.getDatanodeByHost("ip3")==dataNodes[3]);
+    assertNull(map.getDatanodeByHost("1.1.1.1"));
+    assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
+    assertEquals(map.getDatanodeByHost("3.3.3.3"), dataNodes[3]);
     
     assertTrue(map.remove(dataNodes[3]));
-    assertTrue(map.getDatanodeByHost("ip1")==null);
-    assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
-    assertTrue(map.getDatanodeByHost("ip3")==null);
+    assertNull(map.getDatanodeByHost("1.1.1.1"));
+    assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
+    assertNull(map.getDatanodeByHost("3.3.3.3"));
     
-    assertFalse(map.remove(NULL_NODE));
+    assertFalse(map.remove(null));
     assertTrue(map.remove(dataNodes[1]));
     assertFalse(map.remove(dataNodes[1]));
   }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
(original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
Fri May 11 02:05:31 2012
@@ -21,6 +21,7 @@ import static org.junit.Assert.*;
 
 import java.util.Queue;
 
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo;
@@ -38,12 +39,10 @@ public class TestPendingDataNodeMessages
   private final Block block1Gs2DifferentInstance =
     new Block(1, 0, 2);
   private final Block block2Gs1 = new Block(2, 0, 1);
-  
-  private final DatanodeDescriptor fakeDN = new DatanodeDescriptor(
-      new DatanodeID("fake", 100));
-  
+
   @Test
   public void testQueues() {
+    DatanodeDescriptor fakeDN = DFSTestUtil.getLocalDatanodeDescriptor();
     msgs.enqueueReportedBlock(fakeDN, block1Gs1, ReplicaState.FINALIZED);
     msgs.enqueueReportedBlock(fakeDN, block1Gs2, ReplicaState.FINALIZED);
 
@@ -56,8 +55,8 @@ public class TestPendingDataNodeMessages
     Queue<ReportedBlockInfo> q =
       msgs.takeBlockQueue(block1Gs2DifferentInstance);
     assertEquals(
-        "ReportedBlockInfo [block=blk_1_1, dn=fake:100, reportedState=FINALIZED]," +
-        "ReportedBlockInfo [block=blk_1_2, dn=fake:100, reportedState=FINALIZED]",
+        "ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:50010, reportedState=FINALIZED],"
+
+        "ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:50010, reportedState=FINALIZED]",
         Joiner.on(",").join(q));
     assertEquals(0, msgs.count());
     

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
(original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
Fri May 11 02:05:31 2012
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.bl
 
 import static org.junit.Assert.*;
 
-import java.io.IOException;
+import java.io.File;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -39,54 +39,55 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class TestReplicationPolicy {
-  private Random random= DFSUtil.getRandom();
+  private Random random = DFSUtil.getRandom();
   private static final int BLOCK_SIZE = 1024;
   private static final int NUM_OF_DATANODES = 6;
-  private static final Configuration CONF = new HdfsConfiguration();
-  private static final NetworkTopology cluster;
-  private static final NameNode namenode;
-  private static final BlockPlacementPolicy replicator;
+  private static NetworkTopology cluster;
+  private static NameNode namenode;
+  private static BlockPlacementPolicy replicator;
   private static final String filename = "/dummyfile.txt";
-  private static final DatanodeDescriptor dataNodes[] = 
-    new DatanodeDescriptor[] {
-      new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
-      new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
-      new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"),
-      new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"),
-      new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d2/r3"),
-      new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3")
-    };
-   
-  private final static DatanodeDescriptor NODE = 
-    new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r4");
-  
-  static {
-    try {
-      FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
-      CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
-      DFSTestUtil.formatNameNode(CONF);
-      namenode = new NameNode(CONF);
-    } catch (IOException e) {
-      e.printStackTrace();
-      throw (RuntimeException)new RuntimeException().initCause(e);
-    }
+  private static DatanodeDescriptor dataNodes[];
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    dataNodes = new DatanodeDescriptor[] {
+        DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
+        DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
+        DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
+        DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
+        DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d2/r3"),
+        DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3")        
+      };
+
+    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
+    File baseDir = new File(System.getProperty(
+        "test.build.data", "build/test/data"), "dfs/");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+        new File(baseDir, "name").getPath());
+
+    DFSTestUtil.formatNameNode(conf);
+    namenode = new NameNode(conf);
+
     final BlockManager bm = namenode.getNamesystem().getBlockManager();
     replicator = bm.getBlockPlacementPolicy();
     cluster = bm.getDatanodeManager().getNetworkTopology();
     // construct network topology
-    for(int i=0; i<NUM_OF_DATANODES; i++) {
+    for (int i=0; i < NUM_OF_DATANODES; i++) {
       cluster.add(dataNodes[i]);
     }
-    for(int i=0; i<NUM_OF_DATANODES; i++) {
+    for (int i=0; i < NUM_OF_DATANODES; i++) {
       dataNodes[i].updateHeartbeat(
           2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
           2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
-    }
+    }    
   }
-  
+
   /**
    * In this testcase, client is dataNodes[0]. So the 1st replica should be
    * placed on dataNodes[0], the 2nd replica should be placed on 
@@ -328,6 +329,7 @@ public class TestReplicationPolicy {
           HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
     }
   }
+
   /**
    * In this testcase, client is is a node outside of file system.
    * So the 1st replica can be placed on any node. 
@@ -337,22 +339,25 @@ public class TestReplicationPolicy {
    */
   @Test
   public void testChooseTarget5() throws Exception {
+    DatanodeDescriptor writerDesc =
+      DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r4");
+
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(filename,
-                                      0, NODE, BLOCK_SIZE);
+                                      0, writerDesc, BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
     targets = replicator.chooseTarget(filename,
-                                      1, NODE, BLOCK_SIZE);
+                                      1, writerDesc, BLOCK_SIZE);
     assertEquals(targets.length, 1);
     
     targets = replicator.chooseTarget(filename,
-                                      2, NODE, BLOCK_SIZE);
+                                      2, writerDesc, BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
     
     targets = replicator.chooseTarget(filename,
-                                      3, NODE, BLOCK_SIZE);
+                                      3, writerDesc, BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));    

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
(original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
Fri May 11 02:05:31 2012
@@ -136,11 +136,6 @@ public class DataNodeTestUtils {  
       ) throws IOException {
     return FsDatasetTestUtil.getBlockFile(dn.getFSDataset(), bpid, b);
   }
-  
-  public static File getMetaFile(DataNode dn, String bpid, Block b)
-      throws IOException {
-    return FsDatasetTestUtil.getMetaFile(dn.getFSDataset(), bpid, b);
-  }
 
   public static boolean unlinkBlock(DataNode dn, ExtendedBlock bk, int numLinks
       ) throws IOException {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
(original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
Fri May 11 02:05:31 2012
@@ -115,7 +115,7 @@ public class TestBPOfferService {
             0, HdfsConstants.LAYOUT_VERSION))
       .when(mock).versionRequest();
     
-    Mockito.doReturn(new DatanodeRegistration("fake-node", 100))
+    Mockito.doReturn(new DatanodeRegistration("1.2.3.4", 100))
       .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
     
     Mockito.doAnswer(new HeartbeatAnswer(nnIdx))

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
(original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
Fri May 11 02:05:31 2012
@@ -47,6 +47,7 @@ import org.apache.hadoop.fs.CommonConfig
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -197,9 +198,9 @@ public class TestBlockRecovery {
         locs, RECOVERY_ID);
     ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
     BlockRecord record1 = new BlockRecord(
-        new DatanodeID("xx", "yy", "zz", 1, 2, 3), dn1, replica1);
+        DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
     BlockRecord record2 = new BlockRecord(
-        new DatanodeID("aa", "bb", "cc", 1, 2, 3), dn2, replica2);
+        DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
     syncList.add(record1);
     syncList.add(record2);
     
@@ -401,8 +402,7 @@ public class TestBlockRecovery {
 
   private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
     Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
-    DatanodeInfo mockOtherDN = new DatanodeInfo(
-        new DatanodeID("127.0.0.1", "localhost", "storage-1234", 0, 0, 0));
+    DatanodeInfo mockOtherDN = DFSTestUtil.getLocalDatanodeInfo();
     DatanodeInfo[] locs = new DatanodeInfo[] {
         new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
         mockOtherDN };

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
(original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
Fri May 11 02:05:31 2012
@@ -36,12 +36,6 @@ public class FsDatasetTestUtil {
       ) throws IOException {
     return ((FsDatasetImpl)fsd).getBlockFile(bpid, b);
   }
-  
-  public static File getMetaFile(FsDatasetSpi<?> fsd, String bpid, Block b)
-      throws IOException {
-    return FsDatasetUtil.getMetaFile(getBlockFile(fsd, bpid, b), b
-        .getGenerationStamp());
-  }
 
   public static boolean unlinkBlock(FsDatasetSpi<?> fsd,
       ExtendedBlock block, int numLinks) throws IOException {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
(original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
Fri May 11 02:05:31 2012
@@ -356,8 +356,7 @@ public class TestInterDatanodeProtocol {
     server.start();
 
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
-    DatanodeID fakeDnId = new DatanodeID(
-        "localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
+    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
     DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
     InterDatanodeProtocol proxy = null;
 

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
(original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
Fri May 11 02:05:31 2012
@@ -29,6 +29,7 @@ import java.util.Arrays;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.Random;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -1155,4 +1156,75 @@ public class TestEditLog extends TestCas
           "No non-corrupt logs for txid " + startGapTxId, ioe);
     }
   }
+
+  /**
+   * Test that we can read from a byte stream without crashing.
+   *
+   */
+  static void validateNoCrash(byte garbage[]) throws IOException {
+    final String TEST_LOG_NAME = "test_edit_log";
+
+    EditLogFileOutputStream elfos = null;
+    File file = null;
+    EditLogFileInputStream elfis = null;
+    try {
+      file = new File(TEST_LOG_NAME);
+      elfos = new EditLogFileOutputStream(file, 0);
+      elfos.create();
+      elfos.writeRaw(garbage, 0, garbage.length);
+      elfos.setReadyToFlush();
+      elfos.flushAndSync();
+      elfos.close();
+      elfos = null;
+      file = new File(TEST_LOG_NAME);
+      elfis = new EditLogFileInputStream(file);
+
+      // verify that we can read everything without killing the JVM or
+      // throwing an exception other than IOException
+      try {
+        while (true) {
+          FSEditLogOp op = elfis.readOp();
+          if (op == null)
+            break;
+        }
+      } catch (IOException e) {
+      } catch (Throwable t) {
+        StringWriter sw = new StringWriter();
+        t.printStackTrace(new PrintWriter(sw));
+        fail("caught non-IOException throwable with message " +
+            t.getMessage() + "\nstack trace\n" + sw.toString());
+      }
+    } finally {
+      if ((elfos != null) && (elfos.isOpen()))
+        elfos.close();
+      if (elfis != null)
+        elfis.close();
+    }
+  }
+
+  static byte[][] invalidSequenecs = null;
+
+  /**
+   * "Fuzz" test for the edit log.
+   *
+   * This tests that we can read random garbage from the edit log without
+   * crashing the JVM or throwing an unchecked exception.
+   */
+  @Test
+  public void testFuzzSequences() throws IOException {
+    final int MAX_GARBAGE_LENGTH = 512;
+    final int MAX_INVALID_SEQ = 5000;
+    // The seed to use for our random number generator.  When given the same
+    // seed, Java.util.Random will always produce the same sequence of values.
+    // This is important because it means that the test is deterministic and
+    // repeatable on any machine.
+    final int RANDOM_SEED = 123;
+
+    Random r = new Random(RANDOM_SEED);
+    for (int i = 0; i < MAX_INVALID_SEQ; i++) {
+      byte[] garbage = new byte[r.nextInt(MAX_GARBAGE_LENGTH)];
+      r.nextBytes(garbage);
+      validateNoCrash(garbage);
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
(original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
Fri May 11 02:05:31 2012
@@ -18,52 +18,60 @@
 
 package org.apache.hadoop.net;
 
-
 import java.util.HashMap;
 import java.util.Map;
 
-import junit.framework.TestCase;
-
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 
-public class TestNetworkTopology extends TestCase {
+import org.junit.Test;
+import org.junit.Before;
+
+import static org.junit.Assert.*;
+
+public class TestNetworkTopology {
   private final static NetworkTopology cluster = new NetworkTopology();
-  private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
-    new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
-    new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
-    new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"),
-    new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"),
-    new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d1/r2"),
-    new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3"),
-    new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r3")
-  };
-  private final static DatanodeDescriptor NODE = 
-    new DatanodeDescriptor(new DatanodeID("h8", 5020), "/d2/r4");
+  private DatanodeDescriptor dataNodes[];
   
-  static {
-    for(int i=0; i<dataNodes.length; i++) {
+  @Before
+  public void setupDatanodes() {
+    dataNodes = new DatanodeDescriptor[] {
+        DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
+        DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
+        DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
+        DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
+        DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d1/r2"),
+        DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3"),
+        DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3")
+    };
+    for (int i = 0; i < dataNodes.length; i++) {
       cluster.add(dataNodes[i]);
     }
   }
   
+  @Test
   public void testContains() throws Exception {
-    for(int i=0; i<dataNodes.length; i++) {
+    DatanodeDescriptor nodeNotInMap = 
+      DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r4");
+    for (int i=0; i < dataNodes.length; i++) {
       assertTrue(cluster.contains(dataNodes[i]));
     }
-    assertFalse(cluster.contains(NODE));
+    assertFalse(cluster.contains(nodeNotInMap));
   }
   
+  @Test
   public void testNumOfChildren() throws Exception {
     assertEquals(cluster.getNumOfLeaves(), dataNodes.length);
   }
 
+  @Test
   public void testCreateInvalidTopology() throws Exception {
     NetworkTopology invalCluster = new NetworkTopology();
     DatanodeDescriptor invalDataNodes[] = new DatanodeDescriptor[] {
-      new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
-      new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
-      new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1")
+        DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
+        DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
+        DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1")
     };
     invalCluster.add(invalDataNodes[0]);
     invalCluster.add(invalDataNodes[1]);
@@ -77,6 +85,7 @@ public class TestNetworkTopology extends
     }
   }
 
+  @Test
   public void testRacks() throws Exception {
     assertEquals(cluster.getNumOfRacks(), 3);
     assertTrue(cluster.isOnSameRack(dataNodes[0], dataNodes[1]));
@@ -87,6 +96,7 @@ public class TestNetworkTopology extends
     assertTrue(cluster.isOnSameRack(dataNodes[5], dataNodes[6]));
   }
   
+  @Test
   public void testGetDistance() throws Exception {
     assertEquals(cluster.getDistance(dataNodes[0], dataNodes[0]), 0);
     assertEquals(cluster.getDistance(dataNodes[0], dataNodes[1]), 2);
@@ -94,6 +104,7 @@ public class TestNetworkTopology extends
     assertEquals(cluster.getDistance(dataNodes[0], dataNodes[6]), 6);
   }
 
+  @Test
   public void testPseudoSortByDistance() throws Exception {
     DatanodeDescriptor[] testNodes = new DatanodeDescriptor[3];
     
@@ -136,6 +147,7 @@ public class TestNetworkTopology extends
     assertTrue(testNodes[2] == dataNodes[3]);
   }
   
+  @Test
   public void testRemove() throws Exception {
     for(int i=0; i<dataNodes.length; i++) {
       cluster.remove(dataNodes[i]);
@@ -173,6 +185,7 @@ public class TestNetworkTopology extends
   /**
    * This test checks that chooseRandom works for an excluded node.
    */
+  @Test
   public void testChooseRandomExcludedNode() {
     String scope = "~" + NodeBase.getPath(dataNodes[0]);
     Map<Node, Integer> frequency = pickNodesAtRandom(100, scope);
@@ -186,6 +199,7 @@ public class TestNetworkTopology extends
   /**
    * This test checks that chooseRandom works for an excluded rack.
    */
+  @Test
   public void testChooseRandomExcludedRack() {
     Map<Node, Integer> frequency = pickNodesAtRandom(100, "~" + "/d2");
     // all the nodes on the second rack should be zero



Mime
View raw message