hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hair...@apache.org
Subject svn commit: r719393 - in /hadoop/core/trunk: ./ src/hdfs/org/apache/hadoop/hdfs/server/namenode/ src/test/org/apache/hadoop/hdfs/server/namenode/
Date Thu, 20 Nov 2008 22:00:30 GMT
Author: hairong
Date: Thu Nov 20 14:00:29 2008
New Revision: 719393

URL: http://svn.apache.org/viewvc?rev=719393&view=rev
Log:
HADOOP-4598. Setrep command skips under-replicated blocks. Contributed by Hairong Kuang.

Added:
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java
Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=719393&r1=719392&r2=719393&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Thu Nov 20 14:00:29 2008
@@ -183,6 +183,8 @@
 
     HADOOP-4691. Correct a link in the javadoc of IndexedSortable. (szetszwo)
 
+    HADOOP-4598. '-setrep' command skips under-replicated blocks. (hairong)
+
 Release 0.19.0 - 2008-11-18
 
   INCOMPATIBLE CHANGES

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=719393&r1=719392&r2=719393&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu
Nov 20 14:00:29 2008
@@ -887,8 +887,6 @@
       return true;
 
     // update needReplication priority queues
-    LOG.info("Increasing replication for file " + src 
-             + ". New replication is " + replication);
     for(int idx = 0; idx < fileBlocks.length; idx++)
       updateNeededReplications(fileBlocks[idx], 0, replication-oldRepl);
       
@@ -898,6 +896,9 @@
                + ". New replication is " + replication);
       for(int idx = 0; idx < fileBlocks.length; idx++)
         processOverReplicatedBlock(fileBlocks[idx], replication, null, null);
+    } else { // replication factor is increased
+      LOG.info("Increasing replication for file " + src 
+          + ". New replication is " + replication);
     }
     return true;
   }
@@ -1436,7 +1437,7 @@
    * @param b block
    * @param n datanode
    */
-  private void addToInvalidates(Block b, DatanodeInfo n) {
+  void addToInvalidates(Block b, DatanodeInfo n) {
     addToInvalidatesNoLog(b, n);
     NameNode.stateChangeLog.info("BLOCK* NameSystem.addToInvalidates: "
         + b.getBlockName() + " is added to invalidSet of " + n.getName());

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java?rev=719393&r1=719392&r2=719393&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java
(original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java
Thu Nov 20 14:00:29 2008
@@ -171,8 +171,7 @@
     if(oldPri != LEVEL && oldPri != curPri) {
       remove(block, oldPri);
     }
-    if(curPri != LEVEL && oldPri != curPri 
-        && priorityQueues.get(curPri).add(block)) {
+    if(curPri != LEVEL && priorityQueues.get(curPri).add(block)) {
       NameNode.stateChangeLog.debug(
                                     "BLOCK* NameSystem.UnderReplicationBlock.update:"
                                     + block

Added: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java?rev=719393&view=auto
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java
(added)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java
Thu Nov 20 14:00:29 2008
@@ -0,0 +1,44 @@
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.Block;
+
+import junit.framework.TestCase;
+
+public class TestUnderReplicatedBlocks extends TestCase {
+  public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {
+    Configuration conf = new Configuration();
+    final short REPLICATION_FACTOR = 2;
+    final String FILE_NAME = "/testFile";
+    final Path FILE_PATH = new Path(FILE_NAME);
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION_FACTOR+1, true, null);
+    try {
+      // create a file with one block with a replication factor of 2
+      final FileSystem fs = cluster.getFileSystem();
+      DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
+      DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
+      
+      // remove one replica from the blocksMap so block becomes under-replicated
+      // but the block does not get put into the under-replicated blocks queue
+      FSNamesystem namesystem = cluster.getNameNode().namesystem;
+      Block b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
+      DatanodeDescriptor dn = namesystem.blocksMap.nodeIterator(b).next();
+      namesystem.addToInvalidates(b, dn);
+      namesystem.blocksMap.removeNode(b, dn);
+      
+      // increment this file's replication factor
+      FsShell shell = new FsShell(conf);
+      assertEquals(0, shell.run(new String[]{
+          "-setrep", "-w", Integer.toString(1+REPLICATION_FACTOR), FILE_NAME}));
+    } finally {
+      cluster.shutdown();
+    }
+    
+  }
+
+}



Mime
View raw message