hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From s..@apache.org
Subject svn commit: r1492448 - in /hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
Date Wed, 12 Jun 2013 22:10:46 GMT
Author: shv
Date: Wed Jun 12 22:10:46 2013
New Revision: 1492448

URL: http://svn.apache.org/r1492448
Log:
HDFS-4878. On Remove Block, block is not removed from neededReplications queue. Contributed
by Tao Luo.

Modified:
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1492448&r1=1492447&r2=1492448&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Jun
12 22:10:46 2013
@@ -21,6 +21,9 @@ Release 0.23.9 - UNRELEASED
     HDFS-4832. Namenode doesn't change the number of missing blocks in
     safemode when DNs rejoin or leave (Ravi Prakash via kihwal)
 
+    HDFS-4878. On Remove Block, block is not removed from neededReplications
+    queue. (Tao Luo via shv)
+
 Release 0.23.8 - 2013-06-05
   
   INCOMPATIBLE CHANGES

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1492448&r1=1492447&r2=1492448&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
(original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
Wed Jun 12 22:10:46 2013
@@ -2445,8 +2445,9 @@ public class BlockManager {
     addToInvalidates(block);
     corruptReplicas.removeFromCorruptReplicasMap(block);
     blocksMap.removeBlock(block);
-    // Remove the block from pendingReplications
+    // Remove the block from pendingReplications and neededReplications
     pendingReplications.remove(block);
+    neededReplications.remove(block, UnderReplicatedBlocks.LEVEL);
   }
 
   public BlockInfo getStoredBlock(Block block) {

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java?rev=1492448&r1=1492447&r2=1492448&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
(original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
Wed Jun 12 22:10:46 2013
@@ -97,16 +97,69 @@ public class TestMetaSave {
         + "metasave.out.txt";
     FileInputStream fstream = new FileInputStream(logFile);
     DataInputStream in = new DataInputStream(fstream);
-    BufferedReader reader = new BufferedReader(new InputStreamReader(in));
-    String line = reader.readLine();
-    assertTrue(line.equals("3 files and directories, 2 blocks = 5 total"));
-    line = reader.readLine();
-    assertTrue(line.equals("Live Datanodes: 1"));
-    line = reader.readLine();
-    assertTrue(line.equals("Dead Datanodes: 1"));
-    line = reader.readLine();
-    line = reader.readLine();
-    assertTrue(line.matches("^/filestatus[01]:.*"));
+    BufferedReader reader = null;
+    try {
+      reader = new BufferedReader(new InputStreamReader(in));
+      String line = reader.readLine();
+      assertTrue(line.equals(
+          "3 files and directories, 2 blocks = 5 total"));
+      line = reader.readLine();
+      assertTrue(line.equals("Live Datanodes: 1"));
+      line = reader.readLine();
+      assertTrue(line.equals("Dead Datanodes: 1"));
+      line = reader.readLine();
+      line = reader.readLine();
+      assertTrue(line.matches("^/filestatus[01]:.*"));
+    } finally {
+      if (reader != null)
+        reader.close();
+    }
+  }
+
+  /**
+   * Tests metasave after delete, to make sure there are no orphaned blocks
+   */
+  @Test
+  public void testMetasaveAfterDelete()
+      throws IOException, InterruptedException {
+
+    final FSNamesystem namesystem = cluster.getNamesystem();
+
+    for (int i = 0; i < 2; i++) {
+      Path file = new Path("/filestatus" + i);
+      createFile(fileSys, file);
+    }
+
+    cluster.stopDataNode(1);
+    // wait for namenode to discover that a datanode is dead
+    Thread.sleep(15000);
+    namesystem.setReplication("/filestatus0", (short) 4);
+    namesystem.delete("/filestatus0", true);
+    namesystem.delete("/filestatus1", true);
+
+    namesystem.metaSave("metasaveAfterDelete.out.txt");
+
+    // Verification
+    String logFile = System.getProperty("hadoop.log.dir") + "/"
+        + "metasaveAfterDelete.out.txt";
+    BufferedReader reader = null;
+    try {
+      FileInputStream fstream = new FileInputStream(logFile);
+      DataInputStream in = new DataInputStream(fstream);
+      reader = new BufferedReader(new InputStreamReader(in));
+      reader.readLine();
+      String line = reader.readLine();
+      assertTrue(line.equals("Live Datanodes: 1"));
+      line = reader.readLine();
+      assertTrue(line.equals("Dead Datanodes: 1"));
+      line = reader.readLine();
+      assertTrue(line.equals("Metasave: Blocks waiting for replication: 0"));
+      line = reader.readLine();
+      assertTrue(line.equals("Metasave: Blocks being replicated: 0"));
+    } finally {
+      if (reader != null)
+        reader.close();
+    }
   }
 
   @AfterClass



Mime
View raw message