hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From aajis...@apache.org
Subject hadoop git commit: Revert "HDFS-6682. Add a metric to expose the timestamp of the oldest under-replicated block. (aajisaka)"
Date Wed, 29 Jul 2015 07:54:14 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9d8191604 -> dff49aee0


Revert "HDFS-6682. Add a metric to expose the timestamp of the oldest under-replicated block.
(aajisaka)"

This reverts commit 17319c4a659dd150ccfe80da81fbaee69ca8ef1e.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dff49aee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dff49aee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dff49aee

Branch: refs/heads/branch-2
Commit: dff49aee08187744fdb911a4c1005a15dab95954
Parents: 9d81916
Author: Akira Ajisaka <aajisaka@apache.org>
Authored: Wed Jul 29 16:52:54 2015 +0900
Committer: Akira Ajisaka <aajisaka@apache.org>
Committed: Wed Jul 29 16:52:54 2015 +0900

----------------------------------------------------------------------
 .../hadoop-common/src/site/markdown/Metrics.md  |  1 -
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 --
 .../server/blockmanagement/BlockManager.java    |  4 --
 .../blockmanagement/UnderReplicatedBlocks.java  | 33 ++------------
 .../hdfs/server/namenode/FSNamesystem.java      |  9 +---
 .../TestUnderReplicatedBlocks.java              | 48 --------------------
 6 files changed, 5 insertions(+), 93 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dff49aee/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 7461374..7005986 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -200,7 +200,6 @@ Each metrics record contains tags such as HAState and Hostname as additional
inf
 | Name | Description |
 |:---- |:---- |
 | `MissingBlocks` | Current number of missing blocks |
-| `TimeOfTheOldestBlockToBeReplicated` | The timestamp of the oldest block to be replicated.
If there are no under-replicated or corrupt blocks, return 0. |
 | `ExpiredHeartbeats` | Total number of expired heartbeats |
 | `TransactionsSinceLastCheckpoint` | Total number of transactions since last checkpoint
|
 | `TransactionsSinceLastLogRoll` | Total number of transactions since last edit log roll
|

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dff49aee/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 28c455e..537c06d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -404,9 +404,6 @@ Release 2.8.0 - UNRELEASED
     HDFS-8730. Clean up the import statements in ClientProtocol.
     (Takanobu Asanuma via wheat9)
 
-    HDFS-6682. Add a metric to expose the timestamp of the oldest
-    under-replicated block. (aajisaka)
-
     HDFS-8735. Inotify: All events classes should implement toString() API.
     (Surendra Singh Lilhore via aajisaka)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dff49aee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 56b4f8c..3f461e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -170,10 +170,6 @@ public class BlockManager implements BlockStatsMXBean {
   public int getPendingDataNodeMessageCount() {
     return pendingDNMessages.count();
   }
-  /** Used by metrics. */
-  public long getTimeOfTheOldestBlockToBeReplicated() {
-    return neededReplications.getTimeOfTheOldestBlockToBeReplicated();
-  }
 
   /**replicationRecheckInterval is how often namenode checks for new replication work*/
   private final long replicationRecheckInterval;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dff49aee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
index 0d621c0..0fce69c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
@@ -18,15 +18,10 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.Iterator;
-import java.util.LinkedHashMap;
 import java.util.List;
-import java.util.Map;
-
 import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.util.Time;
 
 /**
  * Keep prioritized queues of under replicated blocks.
@@ -87,9 +82,6 @@ class UnderReplicatedBlocks implements Iterable<BlockInfo> {
 
   /** The number of corrupt blocks with replication factor 1 */
   private int corruptReplOneBlocks = 0;
-  /** Keep timestamp when a block is put into the queue. */
-  private final Map<BlockInfo, Long> timestampsMap =
-      Collections.synchronizedMap(new LinkedHashMap<BlockInfo, Long>());
 
   /** Create an object. */
   UnderReplicatedBlocks() {
@@ -99,14 +91,13 @@ class UnderReplicatedBlocks implements Iterable<BlockInfo> {
   }
 
   /**
-   * Empty the queues and timestamps.
+   * Empty the queues.
    */
   synchronized void clear() {
     for (int i = 0; i < LEVEL; i++) {
       priorityQueues.get(i).clear();
     }
     corruptReplOneBlocks = 0;
-    timestampsMap.clear();
   }
 
   /** Return the total number of under replication blocks */
@@ -129,20 +120,6 @@ class UnderReplicatedBlocks implements Iterable<BlockInfo> {
     return size;
   }
 
-  /**
-   * Return the smallest timestamp of the under-replicated/corrupt blocks.
-   * If there are no under-replicated or corrupt blocks, return 0.
-   */
-  long getTimeOfTheOldestBlockToBeReplicated() {
-    synchronized (timestampsMap) {
-      if (timestampsMap.isEmpty()) {
-        return 0;
-      }
-      // Since we are using LinkedHashMap, the first value is the smallest.
-      return timestampsMap.entrySet().iterator().next().getValue();
-    }
-  }
-
   /** Return the number of corrupt blocks */
   synchronized int getCorruptBlockSize() {
     return priorityQueues.get(QUEUE_WITH_CORRUPT_BLOCKS).size();
@@ -221,7 +198,7 @@ class UnderReplicatedBlocks implements Iterable<BlockInfo> {
               + " has only {} replicas and need {} replicas so is added to" +
               " neededReplications at priority level {}", block, curReplicas,
           expectedReplicas, priLevel);
-      timestampsMap.put(block, Time.now());
+
       return true;
     }
     return false;
@@ -266,9 +243,8 @@ class UnderReplicatedBlocks implements Iterable<BlockInfo> {
     if(priLevel >= 0 && priLevel < LEVEL
         && priorityQueues.get(priLevel).remove(block)) {
       NameNode.blockStateChangeLog.debug(
-          "BLOCK* NameSystem.UnderReplicationBlock.remove: Removing block {}" +
-              " from priority queue {}", block, priLevel);
-      timestampsMap.remove(block);
+        "BLOCK* NameSystem.UnderReplicationBlock.remove: Removing block {}" +
+            " from priority queue {}", block, priLevel);
       return true;
     } else {
       // Try to remove the block from all queues if the block was
@@ -278,7 +254,6 @@ class UnderReplicatedBlocks implements Iterable<BlockInfo> {
           NameNode.blockStateChangeLog.debug(
               "BLOCK* NameSystem.UnderReplicationBlock.remove: Removing block" +
                   " {} from priority queue {}", block, priLevel);
-          timestampsMap.remove(block);
           return true;
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dff49aee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index c479fd1..f2adeb8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3767,14 +3767,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     // not locking
     return blockManager.getMissingReplOneBlocksCount();
   }
-
-  @Metric({"TimeOfTheOldestBlockToBeReplicated",
-      "The timestamp of the oldest block to be replicated. If there are no" +
-      "under-replicated or corrupt blocks, return 0."})
-  public long getTimeOfTheOldestBlockToBeReplicated() {
-    return blockManager.getTimeOfTheOldestBlockToBeReplicated();
-  }
-
+  
   @Metric({"ExpiredHeartbeats", "Number of expired heartbeats"})
   public int getExpiredHeartbeats() {
     return datanodeStatistics.getExpiredHeartbeats();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dff49aee/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
index af2499b..334ece1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
@@ -28,9 +28,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.util.Time;
 import org.junit.Test;
 
 import java.util.Iterator;
@@ -142,50 +140,4 @@ public class TestUnderReplicatedBlocks {
 
   }
 
-  @Test
-  public void testGetTimeOfTheOldestBlockToBeReplicated() {
-    UnderReplicatedBlocks blocks = new UnderReplicatedBlocks();
-    BlockInfo block1 = new BlockInfoContiguous(new Block(1), (short) 1);
-    BlockInfo block2 = new BlockInfoContiguous(new Block(2), (short) 1);
-
-    // if there are no under-replicated or corrupt blocks, return 0
-    assertEquals(blocks.getTimeOfTheOldestBlockToBeReplicated(), 0L);
-
-    // add block1, add block2, remove block1, remove block2
-    long time1 = Time.now();
-    blocks.add(block1, 1, 0, 3);
-    long time2 = Time.now();
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() >= time1);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() <= time2);
-
-    blocks.add(block2, 2, 0, 3);
-    long time3 = Time.now();
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() >= time1);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() <= time2);
-
-    blocks.remove(block1, UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() >= time2);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() <= time3);
-
-    blocks.remove(block2, UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED);
-    assertEquals(blocks.getTimeOfTheOldestBlockToBeReplicated(), 0L);
-
-    // add block2, add block1, remove block1, remove block2
-    time1 = Time.now();
-    blocks.add(block2, 2, 0, 3);
-    time2 = Time.now();
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() >= time1);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() <= time2);
-
-    blocks.add(block1, 1, 0, 3);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() >= time1);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() <= time2);
-
-    blocks.remove(block1, UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() >= time1);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() <= time2);
-
-    blocks.remove(block2, UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED);
-    assertEquals(blocks.getTimeOfTheOldestBlockToBeReplicated(), 0L);
-  }
 }


Mime
View raw message