hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From l..@apache.org
Subject [2/2] hadoop git commit: HDFS-10999. Introduce separate stats for Replicated and Erasure Coded Blocks apart from the current Aggregated stats. (Manoj Govindassamy via lei)
Date Wed, 14 Jun 2017 17:45:35 GMT
HDFS-10999. Introduce separate stats for Replicated and Erasure Coded Blocks apart from the current Aggregated stats. (Manoj Govindassamy via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/999c8fcb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/999c8fcb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/999c8fcb

Branch: refs/heads/trunk
Commit: 999c8fcbefc876d9c26c23c5b87a64a81e4f113e
Parents: 6ed54f3
Author: Lei Xu <lei@apache.org>
Authored: Wed Jun 14 10:44:19 2017 -0700
Committer: Lei Xu <lei@apache.org>
Committed: Wed Jun 14 10:44:19 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   7 +-
 .../hadoop/hdfs/DistributedFileSystem.java      |   6 +-
 .../hadoop/hdfs/protocol/BlocksStats.java       |  90 +++++
 .../hadoop/hdfs/protocol/ClientProtocol.java    |  37 ++-
 .../hdfs/protocol/ECBlockGroupsStats.java       |  83 +++++
 .../ClientNamenodeProtocolTranslatorPB.java     |  32 ++
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  53 +++
 .../src/main/proto/ClientNamenodeProtocol.proto |  27 ++
 ...tNamenodeProtocolServerSideTranslatorPB.java |  26 ++
 .../server/blockmanagement/BlockManager.java    |  57 +++-
 .../blockmanagement/BlockManagerSafeMode.java   |  29 +-
 .../blockmanagement/CorruptReplicasMap.java     | 103 +++---
 .../blockmanagement/InvalidateBlocks.java       | 198 ++++++++---
 .../blockmanagement/LowRedundancyBlocks.java    | 125 +++++--
 .../hdfs/server/namenode/FSNamesystem.java      | 200 ++++++++++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  16 +
 .../metrics/ECBlockGroupsStatsMBean.java        |  59 ++++
 .../namenode/metrics/FSNamesystemMBean.java     |  26 +-
 .../metrics/ReplicatedBlocksStatsMBean.java     |  63 ++++
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |   2 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |  47 +++
 .../apache/hadoop/hdfs/TestFileCorruption.java  |   2 +-
 .../hadoop/hdfs/TestMaintenanceState.java       |   2 +-
 .../hadoop/hdfs/TestMissingBlocksAlert.java     |   4 +-
 .../TestComputeInvalidateWork.java              | 150 +++++++--
 .../blockmanagement/TestCorruptReplicaInfo.java | 199 +++++++-----
 .../TestLowRedundancyBlockQueues.java           |  73 +++--
 .../TestUnderReplicatedBlocks.java              |  42 ++-
 .../datanode/TestReadOnlySharedStorage.java     |   4 +-
 .../fsdataset/impl/TestLazyPersistFiles.java    |   2 +-
 .../server/namenode/TestAddStripedBlocks.java   |   2 +
 .../namenode/TestDecommissioningStatus.java     |   8 +-
 .../server/namenode/TestNameNodeMXBean.java     |  52 ++-
 .../namenode/TestReconstructStripedBlocks.java  |  83 +++++
 .../namenode/metrics/TestNameNodeMetrics.java   | 325 ++++++++++++++++---
 35 files changed, 1866 insertions(+), 368 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 4fa7c5f..ec142f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1919,12 +1919,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
   }
 
   /**
-   * Returns count of blocks with one of more replica missing.
+   * Returns aggregated count of blocks with less redundancy.
    * @throws IOException
    */
-  public long getUnderReplicatedBlocksCount() throws IOException {
-    return getStateByIndex(ClientProtocol.
-        GET_STATS_UNDER_REPLICATED_IDX);
+  public long getLowRedundancyBlocksCount() throws IOException {
+    return getStateByIndex(ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index b65f9c2..344f574 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1266,12 +1266,12 @@ public class DistributedFileSystem extends FileSystem {
   }
 
   /**
-   * Returns count of blocks with one of more replica missing.
+   * Returns aggregated count of blocks with less redundancy.
    *
    * @throws IOException
    */
-  public long getUnderReplicatedBlocksCount() throws IOException {
-    return dfs.getUnderReplicatedBlocksCount();
+  public long getLowRedundancyBlocksCount() throws IOException {
+    return dfs.getLowRedundancyBlocksCount();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlocksStats.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlocksStats.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlocksStats.java
new file mode 100644
index 0000000..7eb30ca
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlocksStats.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Get statistics pertaining to blocks of type {@link BlockType#CONTIGUOUS}
+ * in the filesystem.
+ * <p>
+ * @see ClientProtocol#getBlocksStats()
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public final class BlocksStats {
+  private final long lowRedundancyBlocksStat;
+  private final long corruptBlocksStat;
+  private final long missingBlocksStat;
+  private final long missingReplicationOneBlocksStat;
+  private final long bytesInFutureBlocksStat;
+  private final long pendingDeletionBlocksStat;
+
+  public BlocksStats(long lowRedundancyBlocksStat,
+      long corruptBlocksStat, long missingBlocksStat,
+      long missingReplicationOneBlocksStat, long bytesInFutureBlocksStat,
+      long pendingDeletionBlocksStat) {
+    this.lowRedundancyBlocksStat = lowRedundancyBlocksStat;
+    this.corruptBlocksStat = corruptBlocksStat;
+    this.missingBlocksStat = missingBlocksStat;
+    this.missingReplicationOneBlocksStat = missingReplicationOneBlocksStat;
+    this.bytesInFutureBlocksStat = bytesInFutureBlocksStat;
+    this.pendingDeletionBlocksStat = pendingDeletionBlocksStat;
+  }
+
+  public long getLowRedundancyBlocksStat() {
+    return lowRedundancyBlocksStat;
+  }
+
+  public long getCorruptBlocksStat() {
+    return corruptBlocksStat;
+  }
+
+  public long getMissingReplicaBlocksStat() {
+    return missingBlocksStat;
+  }
+
+  public long getMissingReplicationOneBlocksStat() {
+    return missingReplicationOneBlocksStat;
+  }
+
+  public long getBytesInFutureBlocksStat() {
+    return bytesInFutureBlocksStat;
+  }
+
+  public long getPendingDeletionBlocksStat() {
+    return pendingDeletionBlocksStat;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder statsBuilder = new StringBuilder();
+    statsBuilder.append("ReplicatedBlocksStats=[")
+        .append("LowRedundancyBlocks=").append(getLowRedundancyBlocksStat())
+        .append(", CorruptBlocks=").append(getCorruptBlocksStat())
+        .append(", MissingReplicaBlocks=").append(getMissingReplicaBlocksStat())
+        .append(", MissingReplicationOneBlocks=").append(
+            getMissingReplicationOneBlocksStat())
+        .append(", BytesInFutureBlocks=").append(getBytesInFutureBlocksStat())
+        .append(", PendingDeletionBlocks=").append(
+            getPendingDeletionBlocksStat())
+        .append("]");
+    return statsBuilder.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index bf8eb4e..82e5c32 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -731,10 +731,19 @@ public interface ClientProtocol {
   @Idempotent
   boolean recoverLease(String src, String clientName) throws IOException;
 
+  /**
+   * Constants to index the array of aggregated stats returned by
+   * {@link #getStats()}.
+   */
   int GET_STATS_CAPACITY_IDX = 0;
   int GET_STATS_USED_IDX = 1;
   int GET_STATS_REMAINING_IDX = 2;
+  /**
+   * Use {@link #GET_STATS_LOW_REDUNDANCY_IDX} instead.
+   */
+  @Deprecated
   int GET_STATS_UNDER_REPLICATED_IDX = 3;
+  int GET_STATS_LOW_REDUNDANCY_IDX = 3;
   int GET_STATS_CORRUPT_BLOCKS_IDX = 4;
   int GET_STATS_MISSING_BLOCKS_IDX = 5;
   int GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX = 6;
@@ -743,27 +752,41 @@ public interface ClientProtocol {
   int STATS_ARRAY_LENGTH = 9;
 
   /**
-   * Get a set of statistics about the filesystem.
-   * Right now, only eight values are returned.
+   * Get an array of aggregated statistics combining blocks of both type
+   * {@link BlockType#CONTIGUOUS} and {@link BlockType#STRIPED} in the
+   * filesystem. Use public constants like {@link #GET_STATS_CAPACITY_IDX} in
+   * place of actual numbers to index into the array.
    * <ul>
    * <li> [0] contains the total storage capacity of the system, in bytes.</li>
    * <li> [1] contains the total used space of the system, in bytes.</li>
    * <li> [2] contains the available storage of the system, in bytes.</li>
-   * <li> [3] contains number of under replicated blocks in the system.</li>
-   * <li> [4] contains number of blocks with a corrupt replica. </li>
+   * <li> [3] contains number of low redundancy blocks in the system.</li>
+   * <li> [4] contains number of corrupt blocks. </li>
    * <li> [5] contains number of blocks without any good replicas left. </li>
    * <li> [6] contains number of blocks which have replication factor
    *          1 and have lost the only replica. </li>
-   * <li> [7] contains number of bytes  that are at risk for deletion. </li>
+   * <li> [7] contains number of bytes that are at risk for deletion. </li>
    * <li> [8] contains number of pending deletion blocks. </li>
    * </ul>
-   * Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of
-   * actual numbers to index into the array.
    */
   @Idempotent
   long[] getStats() throws IOException;
 
   /**
+   * Get statistics pertaining to blocks of type {@link BlockType#CONTIGUOUS}
+   * in the filesystem.
+   */
+  @Idempotent
+  BlocksStats getBlocksStats() throws IOException;
+
+  /**
+   * Get statistics pertaining to blocks of type {@link BlockType#STRIPED}
+   * in the filesystem.
+   */
+  @Idempotent
+  ECBlockGroupsStats getECBlockGroupsStats() throws IOException;
+
+  /**
    * Get a report on the system's current datanodes.
    * One DatanodeInfo object is returned for each DataNode.
    * Return live datanodes if type is LIVE; dead datanodes if type is DEAD;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupsStats.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupsStats.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupsStats.java
new file mode 100644
index 0000000..80cf262
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupsStats.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Get statistics pertaining to blocks of type {@link BlockType#STRIPED}
+ * in the filesystem.
+ * <p>
+ * @see ClientProtocol#getECBlockGroupsStats()
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public final class ECBlockGroupsStats {
+  private final long lowRedundancyBlockGroupsStat;
+  private final long corruptBlockGroupsStat;
+  private final long missingBlockGroupsStat;
+  private final long bytesInFutureBlockGroupsStat;
+  private final long pendingDeletionBlockGroupsStat;
+
+  public ECBlockGroupsStats(long lowRedundancyBlockGroupsStat, long
+      corruptBlockGroupsStat, long missingBlockGroupsStat, long
+      bytesInFutureBlockGroupsStat, long pendingDeletionBlockGroupsStat) {
+    this.lowRedundancyBlockGroupsStat = lowRedundancyBlockGroupsStat;
+    this.corruptBlockGroupsStat = corruptBlockGroupsStat;
+    this.missingBlockGroupsStat = missingBlockGroupsStat;
+    this.bytesInFutureBlockGroupsStat = bytesInFutureBlockGroupsStat;
+    this.pendingDeletionBlockGroupsStat = pendingDeletionBlockGroupsStat;
+  }
+
+  public long getBytesInFutureBlockGroupsStat() {
+    return bytesInFutureBlockGroupsStat;
+  }
+
+  public long getCorruptBlockGroupsStat() {
+    return corruptBlockGroupsStat;
+  }
+
+  public long getLowRedundancyBlockGroupsStat() {
+    return lowRedundancyBlockGroupsStat;
+  }
+
+  public long getMissingBlockGroupsStat() {
+    return missingBlockGroupsStat;
+  }
+
+  public long getPendingDeletionBlockGroupsStat() {
+    return pendingDeletionBlockGroupsStat;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder statsBuilder = new StringBuilder();
+    statsBuilder.append("ECBlockGroupsStats=[")
+        .append("LowRedundancyBlockGroups=").append(
+            getLowRedundancyBlockGroupsStat())
+        .append(", CorruptBlockGroups=").append(getCorruptBlockGroupsStat())
+        .append(", MissingBlockGroups=").append(getMissingBlockGroupsStat())
+        .append(", BytesInFutureBlockGroups=").append(
+            getBytesInFutureBlockGroupsStat())
+        .append(", PendingDeletionBlockGroups=").append(
+            getPendingDeletionBlockGroupsStat())
+        .append("]");
+    return statsBuilder.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index 19127d6..f29de15 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -70,6 +71,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.BlocksStats;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
@@ -114,6 +116,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFil
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupsStatsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsBlocksStatsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto;
@@ -228,6 +232,14 @@ public class ClientNamenodeProtocolTranslatorPB implements
   private final static GetFsStatusRequestProto VOID_GET_FSSTATUS_REQUEST =
       GetFsStatusRequestProto.newBuilder().build();
 
+  private final static GetFsBlocksStatsRequestProto
+      VOID_GET_FS_REPLICABLOCKS_STATS_REQUEST =
+      GetFsBlocksStatsRequestProto.newBuilder().build();
+
+  private final static GetFsECBlockGroupsStatsRequestProto
+      VOID_GET_FS_ECBLOCKGROUPS_STATS_REQUEST =
+      GetFsECBlockGroupsStatsRequestProto.newBuilder().build();
+
   private final static RollEditsRequestProto VOID_ROLLEDITS_REQUEST =
       RollEditsRequestProto.getDefaultInstance();
 
@@ -669,6 +681,26 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
 
   @Override
+  public BlocksStats getBlocksStats() throws IOException {
+    try {
+      return PBHelperClient.convert(rpcProxy.getFsBlocksStats(null,
+          VOID_GET_FS_REPLICABLOCKS_STATS_REQUEST));
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public ECBlockGroupsStats getECBlockGroupsStats() throws IOException {
+    try {
+      return PBHelperClient.convert(rpcProxy.getFsECBlockGroupsStats(null,
+          VOID_GET_FS_ECBLOCKGROUPS_STATS_REQUEST));
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
   public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
       throws IOException {
     GetDatanodeReportRequestProto req = GetDatanodeReportRequestProto

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 1c2c83f..1716fba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -76,6 +76,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -89,6 +90,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
+import org.apache.hadoop.hdfs.protocol.BlocksStats;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
@@ -115,6 +117,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Create
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupsStatsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsBlocksStatsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto;
@@ -1717,6 +1721,21 @@ public class PBHelperClient {
     return result;
   }
 
+  public static BlocksStats convert(
+      GetFsBlocksStatsResponseProto res) {
+    return new BlocksStats(res.getLowRedundancy(),
+        res.getCorruptBlocks(), res.getMissingBlocks(),
+        res.getMissingReplOneBlocks(), res.getBlocksInFuture(),
+        res.getPendingDeletionBlocks());
+  }
+
+  public static ECBlockGroupsStats convert(
+      GetFsECBlockGroupsStatsResponseProto res) {
+    return new ECBlockGroupsStats(res.getLowRedundancy(),
+        res.getCorruptBlocks(), res.getMissingBlocks(),
+        res.getBlocksInFuture(), res.getPendingDeletionBlocks());
+  }
+
   public static DatanodeReportTypeProto convert(DatanodeReportType t) {
     switch (t) {
     case ALL: return DatanodeReportTypeProto.ALL;
@@ -2124,6 +2143,40 @@ public class PBHelperClient {
     return result.build();
   }
 
+  public static GetFsBlocksStatsResponseProto convert(
+      BlocksStats blocksStats) {
+    GetFsBlocksStatsResponseProto.Builder result =
+        GetFsBlocksStatsResponseProto.newBuilder();
+    result.setLowRedundancy(
+        blocksStats.getLowRedundancyBlocksStat());
+    result.setCorruptBlocks(
+        blocksStats.getCorruptBlocksStat());
+    result.setMissingBlocks(
+        blocksStats.getMissingReplicaBlocksStat());
+    result.setMissingReplOneBlocks(
+        blocksStats.getMissingReplicationOneBlocksStat());
+    result.setBlocksInFuture(
+        blocksStats.getBytesInFutureBlocksStat());
+    result.setPendingDeletionBlocks(
+        blocksStats.getPendingDeletionBlocksStat());
+    return result.build();
+  }
+
+  public static GetFsECBlockGroupsStatsResponseProto convert(
+      ECBlockGroupsStats ecBlockGroupsStats) {
+    GetFsECBlockGroupsStatsResponseProto.Builder result =
+        GetFsECBlockGroupsStatsResponseProto.newBuilder();
+    result.setLowRedundancy(
+        ecBlockGroupsStats.getLowRedundancyBlockGroupsStat());
+    result.setCorruptBlocks(ecBlockGroupsStats.getCorruptBlockGroupsStat());
+    result.setMissingBlocks(ecBlockGroupsStats.getMissingBlockGroupsStat());
+    result.setBlocksInFuture(
+        ecBlockGroupsStats.getBytesInFutureBlockGroupsStat());
+    result.setPendingDeletionBlocks(
+        ecBlockGroupsStats.getPendingDeletionBlockGroupsStat());
+    return result.build();
+  }
+
   public static DatanodeReportType convert(DatanodeReportTypeProto t) {
     switch (t) {
     case ALL: return DatanodeReportType.ALL;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index fb42271..3b1504c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -327,6 +327,29 @@ message GetFsStatsResponseProto {
   optional uint64 pending_deletion_blocks = 9;
 }
 
+message GetFsBlocksStatsRequestProto { // no input paramters
+}
+
+message GetFsBlocksStatsResponseProto {
+  required uint64 low_redundancy = 1;
+  required uint64 corrupt_blocks = 2;
+  required uint64 missing_blocks = 3;
+  required uint64 missing_repl_one_blocks = 4;
+  required uint64 blocks_in_future = 5;
+  required uint64 pending_deletion_blocks = 6;
+}
+
+message GetFsECBlockGroupsStatsRequestProto { // no input paramters
+}
+
+message GetFsECBlockGroupsStatsResponseProto {
+  required uint64 low_redundancy = 1;
+  required uint64 corrupt_blocks = 2;
+  required uint64 missing_blocks = 3;
+  required uint64 blocks_in_future = 4;
+  required uint64 pending_deletion_blocks = 5;
+}
+
 enum DatanodeReportTypeProto {  // type of the datanode report
   ALL = 1;
   LIVE = 2;
@@ -792,6 +815,10 @@ service ClientNamenodeProtocol {
   rpc recoverLease(RecoverLeaseRequestProto)
       returns(RecoverLeaseResponseProto);
   rpc getFsStats(GetFsStatusRequestProto) returns(GetFsStatsResponseProto);
+  rpc getFsBlocksStats(GetFsBlocksStatsRequestProto)
+      returns (GetFsBlocksStatsResponseProto);
+  rpc getFsECBlockGroupsStats(GetFsECBlockGroupsStatsRequestProto)
+      returns (GetFsECBlockGroupsStatsResponseProto);
   rpc getDatanodeReport(GetDatanodeReportRequestProto)
       returns(GetDatanodeReportResponseProto);
   rpc getDatanodeStorageReport(GetDatanodeStorageReportRequestProto)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index f10ce44..ba59ed8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -124,7 +124,11 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFil
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsBlocksStatsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupsStatsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsBlocksStatsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupsStatsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto;
@@ -746,6 +750,28 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   }
 
   @Override
+  public GetFsBlocksStatsResponseProto getFsBlocksStats(
+      RpcController controller, GetFsBlocksStatsRequestProto request)
+      throws ServiceException {
+    try {
+      return PBHelperClient.convert(server.getBlocksStats());
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public GetFsECBlockGroupsStatsResponseProto getFsECBlockGroupsStats(
+      RpcController controller, GetFsECBlockGroupsStatsRequestProto request)
+      throws ServiceException {
+    try {
+      return PBHelperClient.convert(server.getECBlockGroupsStats());
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
   public GetDatanodeReportResponseProto getDatanodeReport(
       RpcController controller, GetDatanodeReportRequestProto req)
       throws ServiceException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 42ee850..2ef80a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -195,7 +195,7 @@ public class BlockManager implements BlockStatsMXBean {
     return pendingReconstructionBlocksCount;
   }
   /** Used by metrics */
-  public long getUnderReplicatedBlocksCount() {
+  public long getLowRedundancyBlocksCount() {
     return lowRedundancyBlocksCount;
   }
   /** Used by metrics */
@@ -231,6 +231,51 @@ public class BlockManager implements BlockStatsMXBean {
     return pendingReconstruction.getNumTimedOuts();
   }
 
+  /** Used by metrics. */
+  public long getLowRedundancyBlocksStat() {
+    return neededReconstruction.getLowRedundancyBlocksStat();
+  }
+
+  /** Used by metrics. */
+  public long getCorruptBlocksStat() {
+    return corruptReplicas.getCorruptBlocksStat();
+  }
+
+  /** Used by metrics. */
+  public long getMissingBlocksStat() {
+    return neededReconstruction.getCorruptBlocksStat();
+  }
+
+  /** Used by metrics. */
+  public long getMissingReplicationOneBlocksStat() {
+    return neededReconstruction.getCorruptReplicationOneBlocksStat();
+  }
+
+  /** Used by metrics. */
+  public long getPendingDeletionBlocksStat() {
+    return invalidateBlocks.getBlocksStat();
+  }
+
+  /** Used by metrics. */
+  public long getLowRedundancyECBlockGroupsStat() {
+    return neededReconstruction.getLowRedundancyECBlockGroupsStat();
+  }
+
+  /** Used by metrics. */
+  public long getCorruptECBlockGroupsStat() {
+    return corruptReplicas.getCorruptECBlockGroupsStat();
+  }
+
+  /** Used by metrics. */
+  public long getMissingECBlockGroupsStat() {
+    return neededReconstruction.getCorruptECBlockGroupsStat();
+  }
+
+  /** Used by metrics. */
+  public long getPendingDeletionECBlockGroupsStat() {
+    return invalidateBlocks.getECBlockGroupsStat();
+  }
+
   /**
    * redundancyRecheckInterval is how often namenode checks for new
    * reconstruction work.
@@ -2244,6 +2289,14 @@ public class BlockManager implements BlockStatsMXBean {
     return bmSafeMode.getBytesInFuture();
   }
 
+  public long getBytesInFutureReplicatedBlocksStat() {
+    return bmSafeMode.getBytesInFutureBlocks();
+  }
+
+  public long getBytesInFutureStripedBlocksStat() {
+    return bmSafeMode.getBytesInFutureECBlockGroups();
+  }
+
   /**
    * Removes the blocks from blocksmap and updates the safemode blocks total.
    * @param blocks An instance of {@link BlocksMapUpdateInfo} which contains a
@@ -4245,7 +4298,7 @@ public class BlockManager implements BlockStatsMXBean {
 
   public long getMissingReplOneBlocksCount() {
     // not locking
-    return this.neededReconstruction.getCorruptReplOneBlockSize();
+    return this.neededReconstruction.getCorruptReplicationOneBlockSize();
   }
 
   public BlockInfo addBlockCollection(BlockInfo block,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
index ed304f2..daa3d8b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
@@ -41,6 +41,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.LongAdder;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
@@ -116,7 +117,9 @@ class BlockManagerSafeMode {
   private Counter awaitingReportedBlocksCounter;
 
   /** Keeps track of how many bytes are in Future Generation blocks. */
-  private final AtomicLong numberOfBytesInFutureBlocks = new AtomicLong();
+  private final LongAdder bytesInFutureBlocks = new LongAdder();
+  private final LongAdder bytesInFutureECBlockGroups = new LongAdder();
+
   /** Reports if Name node was started with Rollback option. */
   private final boolean inRollBack;
 
@@ -358,12 +361,13 @@ class BlockManagerSafeMode {
   boolean leaveSafeMode(boolean force) {
     assert namesystem.hasWriteLock() : "Leaving safe mode needs write lock!";
 
-    final long bytesInFuture = numberOfBytesInFutureBlocks.get();
+    final long bytesInFuture = getBytesInFuture();
     if (bytesInFuture > 0) {
       if (force) {
         LOG.warn("Leaving safe mode due to forceExit. This will cause a data "
             + "loss of {} byte(s).", bytesInFuture);
-        numberOfBytesInFutureBlocks.set(0);
+        bytesInFutureBlocks.reset();
+        bytesInFutureECBlockGroups.reset();
       } else {
         LOG.error("Refusing to leave safe mode without a force flag. " +
             "Exiting safe mode will cause a deletion of {} byte(s). Please " +
@@ -481,9 +485,12 @@ class BlockManagerSafeMode {
     }
 
     if (!blockManager.getShouldPostponeBlocksFromFuture() &&
-        !inRollBack &&
-        blockManager.isGenStampInFuture(brr)) {
-      numberOfBytesInFutureBlocks.addAndGet(brr.getBytesOnDisk());
+        !inRollBack && blockManager.isGenStampInFuture(brr)) {
+      if (BlockIdManager.isStripedBlockID(brr.getBlockId())) {
+        bytesInFutureECBlockGroups.add(brr.getBytesOnDisk());
+      } else {
+        bytesInFutureBlocks.add(brr.getBytesOnDisk());
+      }
     }
   }
 
@@ -494,7 +501,15 @@ class BlockManagerSafeMode {
    * @return Bytes in future
    */
   long getBytesInFuture() {
-    return numberOfBytesInFutureBlocks.get();
+    return getBytesInFutureBlocks() + getBytesInFutureECBlockGroups();
+  }
+
+  long getBytesInFutureBlocks() {
+    return bytesInFutureBlocks.longValue();
+  }
+
+  long getBytesInFutureECBlockGroups() {
+    return bytesInFutureECBlockGroups.longValue();
   }
 
   void close() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index 0442588..d158b64 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
@@ -17,17 +17,16 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.Map;
 import java.util.Set;
-import java.util.TreeMap;
+import java.util.concurrent.atomic.LongAdder;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.Server;
 
@@ -58,6 +57,9 @@ public class CorruptReplicasMap{
   private final Map<Block, Map<DatanodeDescriptor, Reason>> corruptReplicasMap =
     new HashMap<Block, Map<DatanodeDescriptor, Reason>>();
 
+  private final LongAdder totalCorruptBlocks = new LongAdder();
+  private final LongAdder totalCorruptECBlockGroups = new LongAdder();
+
   /**
    * Mark the block belonging to datanode as corrupt.
    *
@@ -72,6 +74,7 @@ public class CorruptReplicasMap{
     if (nodes == null) {
       nodes = new HashMap<DatanodeDescriptor, Reason>();
       corruptReplicasMap.put(blk, nodes);
+      incrementBlockStat(blk);
     }
     
     String reasonText;
@@ -97,13 +100,15 @@ public class CorruptReplicasMap{
   }
 
   /**
-   * Remove Block from CorruptBlocksMap
-   *
+   * Remove Block from CorruptBlocksMap.
    * @param blk Block to be removed
    */
   void removeFromCorruptReplicasMap(Block blk) {
     if (corruptReplicasMap != null) {
-      corruptReplicasMap.remove(blk);
+      Map<DatanodeDescriptor, Reason> value = corruptReplicasMap.remove(blk);
+      if (value != null) {
+        decrementBlockStat(blk);
+      }
     }
   }
 
@@ -121,8 +126,9 @@ public class CorruptReplicasMap{
   boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode,
       Reason reason) {
     Map <DatanodeDescriptor, Reason> datanodes = corruptReplicasMap.get(blk);
-    if (datanodes==null)
+    if (datanodes == null) {
       return false;
+    }
 
     // if reasons can be compared but don't match, return false.
     Reason storedReason = datanodes.get(datanode);
@@ -135,12 +141,28 @@ public class CorruptReplicasMap{
       if (datanodes.isEmpty()) {
         // remove the block if there is no more corrupted replicas
         corruptReplicasMap.remove(blk);
+        decrementBlockStat(blk);
       }
       return true;
     }
     return false;
   }
-    
+
+  private void incrementBlockStat(Block block) {
+    if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+      totalCorruptECBlockGroups.increment();
+    } else {
+      totalCorruptBlocks.increment();
+    }
+  }
+
+  private void decrementBlockStat(Block block) {
+    if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+      totalCorruptECBlockGroups.decrement();
+    } else {
+      totalCorruptBlocks.decrement();
+    }
+  }
 
   /**
    * Get Nodes which have corrupt replicas of Block
@@ -188,49 +210,30 @@ public class CorruptReplicasMap{
    * @param startingBlockId Block id from which to start. If null, start at
    *  beginning.
    * @return Up to numExpectedBlocks blocks from startingBlockId if it exists
-   *
    */
   @VisibleForTesting
-  long[] getCorruptReplicaBlockIdsForTesting(int numExpectedBlocks,
-                                   Long startingBlockId) {
+  long[] getCorruptBlockIdsForTesting(BlockType blockType,
+      int numExpectedBlocks, Long startingBlockId) {
     if (numExpectedBlocks < 0 || numExpectedBlocks > 100) {
       return null;
     }
-    
-    Iterator<Block> blockIt = 
-        new TreeMap<>(corruptReplicasMap).keySet().iterator();
-    
-    // if the starting block id was specified, iterate over keys until
-    // we find the matching block. If we find a matching block, break
-    // to leave the iterator on the next block after the specified block. 
-    if (startingBlockId != null) {
-      boolean isBlockFound = false;
-      while (blockIt.hasNext()) {
-        Block b = blockIt.next();
-        if (b.getBlockId() == startingBlockId) {
-          isBlockFound = true;
-          break; 
-        }
-      }
-      
-      if (!isBlockFound) {
-        return null;
-      }
-    }
-
-    ArrayList<Long> corruptReplicaBlockIds = new ArrayList<Long>();
-
-    // append up to numExpectedBlocks blockIds to our list
-    for(int i=0; i<numExpectedBlocks && blockIt.hasNext(); i++) {
-      corruptReplicaBlockIds.add(blockIt.next().getBlockId());
-    }
-    
-    long[] ret = new long[corruptReplicaBlockIds.size()];
-    for(int i=0; i<ret.length; i++) {
-      ret[i] = corruptReplicaBlockIds.get(i);
-    }
-    
-    return ret;
+    long cursorBlockId =
+        startingBlockId != null ? startingBlockId : Long.MIN_VALUE;
+    return corruptReplicasMap.keySet()
+        .stream()
+        .filter(r -> {
+          if (blockType == BlockType.STRIPED) {
+            return BlockIdManager.isStripedBlockID(r.getBlockId()) &&
+                r.getBlockId() >= cursorBlockId;
+          } else {
+            return !BlockIdManager.isStripedBlockID(r.getBlockId()) &&
+                r.getBlockId() >= cursorBlockId;
+          }
+        })
+        .sorted()
+        .limit(numExpectedBlocks)
+        .mapToLong(Block::getBlockId)
+        .toArray();
   }
 
   /**
@@ -263,4 +266,12 @@ public class CorruptReplicasMap{
       return null;
     }
   }
+
+  long getCorruptBlocksStat() {
+    return totalCorruptBlocks.longValue();
+  }
+
+  long getCorruptECBlockGroupsStat() {
+    return totalCorruptECBlockGroups.longValue();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
index 4e8bb58..7b6b8a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
@@ -23,8 +23,11 @@ import java.util.ArrayList;
 import java.util.Calendar;
 import java.util.GregorianCalendar;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.atomic.LongAdder;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -47,12 +50,12 @@ import org.slf4j.Logger;
  */
 @InterfaceAudience.Private
 class InvalidateBlocks {
-  /** Mapping: DatanodeInfo -> Collection of Blocks */
-  private final Map<DatanodeInfo, LightWeightHashSet<Block>> node2blocks =
-      new HashMap<DatanodeInfo, LightWeightHashSet<Block>>();
-  /** The total number of blocks in the map. */
-  private long numBlocks = 0L;
-
+  private final Map<DatanodeInfo, LightWeightHashSet<Block>>
+      nodeToBlocks = new HashMap<>();
+  private final Map<DatanodeInfo, LightWeightHashSet<Block>>
+      nodeToECBlockGroups = new HashMap<>();
+  private final LongAdder numBlocks = new LongAdder();
+  private final LongAdder numECBlockGroups = new LongAdder();
   private final int blockInvalidateLimit;
 
   /**
@@ -80,11 +83,73 @@ class InvalidateBlocks {
         sdf.format(calendar.getTime()));
   }
 
-  /** @return the number of blocks to be invalidated . */
-  synchronized long numBlocks() {
-    return numBlocks;
+  /**
+   * @return The total number of blocks to be invalidated.
+   */
+  long numBlocks() {
+    return getECBlockGroupsStat() + getBlocksStat();
+  }
+
+  /**
+   * @return The total number of blocks of type
+   * {@link org.apache.hadoop.hdfs.protocol.BlockType#CONTIGUOUS}
+   * to be invalidated.
+   */
+  long getBlocksStat() {
+    return numBlocks.longValue();
+  }
+
+  /**
+   * @return The total number of blocks of type
+   * {@link org.apache.hadoop.hdfs.protocol.BlockType#STRIPED}
+   * to be invalidated.
+   */
+  long getECBlockGroupsStat() {
+    return numECBlockGroups.longValue();
+  }
+
+  private LightWeightHashSet<Block> getBlocksSet(final DatanodeInfo dn) {
+    if (nodeToBlocks.containsKey(dn)) {
+      return nodeToBlocks.get(dn);
+    }
+    return null;
+  }
+
+  private LightWeightHashSet<Block> getECBlockGroupsSet(final DatanodeInfo dn) {
+    if (nodeToECBlockGroups.containsKey(dn)) {
+      return nodeToECBlockGroups.get(dn);
+    }
+    return null;
+  }
+
+  private LightWeightHashSet<Block> getBlocksSet(final DatanodeInfo dn,
+      final Block block) {
+    if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+      return getECBlockGroupsSet(dn);
+    } else {
+      return getBlocksSet(dn);
+    }
   }
 
+  private void putBlocksSet(final DatanodeInfo dn, final Block block,
+      final LightWeightHashSet set) {
+    if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+      assert getECBlockGroupsSet(dn) == null;
+      nodeToECBlockGroups.put(dn, set);
+    } else {
+      assert getBlocksSet(dn) == null;
+      nodeToBlocks.put(dn, set);
+    }
+  }
+
+  private long getBlockSetsSize(final DatanodeInfo dn) {
+    LightWeightHashSet<Block> replicaBlocks = getBlocksSet(dn);
+    LightWeightHashSet<Block> stripedBlocks = getECBlockGroupsSet(dn);
+    return ((replicaBlocks == null ? 0 : replicaBlocks.size()) +
+        (stripedBlocks == null ? 0 : stripedBlocks.size()));
+  }
+
+
   /**
    * @return true if the given storage has the given block listed for
    * invalidation. Blocks are compared including their generation stamps:
@@ -92,7 +157,7 @@ class InvalidateBlocks {
    * returns false.
    */
   synchronized boolean contains(final DatanodeInfo dn, final Block block) {
-    final LightWeightHashSet<Block> s = node2blocks.get(dn);
+    final LightWeightHashSet<Block> s = getBlocksSet(dn, block);
     if (s == null) {
       return false; // no invalidate blocks for this storage ID
     }
@@ -102,18 +167,22 @@ class InvalidateBlocks {
   }
 
   /**
-   * Add a block to the block collection
-   * which will be invalidated on the specified datanode.
+   * Add a block to the block collection which will be
+   * invalidated on the specified datanode.
    */
   synchronized void add(final Block block, final DatanodeInfo datanode,
       final boolean log) {
-    LightWeightHashSet<Block> set = node2blocks.get(datanode);
+    LightWeightHashSet<Block> set = getBlocksSet(datanode, block);
     if (set == null) {
-      set = new LightWeightHashSet<Block>();
-      node2blocks.put(datanode, set);
+      set = new LightWeightHashSet<>();
+      putBlocksSet(datanode, block, set);
     }
     if (set.add(block)) {
-      numBlocks++;
+      if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+        numECBlockGroups.increment();
+      } else {
+        numBlocks.increment();
+      }
       if (log) {
         NameNode.blockStateChangeLog.debug("BLOCK* {}: add {} to {}",
             getClass().getSimpleName(), block, datanode);
@@ -123,44 +192,61 @@ class InvalidateBlocks {
 
   /** Remove a storage from the invalidatesSet */
   synchronized void remove(final DatanodeInfo dn) {
-    final LightWeightHashSet<Block> blocks = node2blocks.remove(dn);
-    if (blocks != null) {
-      numBlocks -= blocks.size();
+    LightWeightHashSet<Block> replicaBlockSets = nodeToBlocks.remove(dn);
+    if (replicaBlockSets != null) {
+      numBlocks.add(replicaBlockSets.size() * -1);
+    }
+    LightWeightHashSet<Block> blockGroupSets = nodeToECBlockGroups.remove(dn);
+    if (blockGroupSets != null) {
+      numECBlockGroups.add(blockGroupSets.size() * -1);
     }
   }
 
   /** Remove the block from the specified storage. */
   synchronized void remove(final DatanodeInfo dn, final Block block) {
-    final LightWeightHashSet<Block> v = node2blocks.get(dn);
+    final LightWeightHashSet<Block> v = getBlocksSet(dn, block);
     if (v != null && v.remove(block)) {
-      numBlocks--;
-      if (v.isEmpty()) {
-        node2blocks.remove(dn);
+      if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+        numECBlockGroups.decrement();
+      } else {
+        numBlocks.decrement();
+      }
+      if (v.isEmpty() && getBlockSetsSize(dn) == 0) {
+        remove(dn);
       }
     }
   }
 
+  private void dumpBlockSet(final Map<DatanodeInfo,
+      LightWeightHashSet<Block>> nodeToBlocksMap, final PrintWriter out) {
+    for(Entry<DatanodeInfo, LightWeightHashSet<Block>> entry :
+        nodeToBlocksMap.entrySet()) {
+      final LightWeightHashSet<Block> blocks = entry.getValue();
+      if (blocks != null && blocks.size() > 0) {
+        out.println(entry.getKey());
+        out.println(StringUtils.join(',', blocks));
+      }
+    }
+  }
   /** Print the contents to out. */
   synchronized void dump(final PrintWriter out) {
-    final int size = node2blocks.values().size();
-    out.println("Metasave: Blocks " + numBlocks 
+    final int size = nodeToBlocks.values().size() +
+        nodeToECBlockGroups.values().size();
+    out.println("Metasave: Blocks " + numBlocks()
         + " waiting deletion from " + size + " datanodes.");
     if (size == 0) {
       return;
     }
-
-    for(Map.Entry<DatanodeInfo, LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
-      final LightWeightHashSet<Block> blocks = entry.getValue();
-      if (blocks.size() > 0) {
-        out.println(entry.getKey());
-        out.println(StringUtils.join(',', blocks));
-      }
-    }
+    dumpBlockSet(nodeToBlocks, out);
+    dumpBlockSet(nodeToECBlockGroups, out);
   }
 
   /** @return a list of the storage IDs. */
   synchronized List<DatanodeInfo> getDatanodes() {
-    return new ArrayList<DatanodeInfo>(node2blocks.keySet());
+    HashSet<DatanodeInfo> set = new HashSet<>();
+    set.addAll(nodeToBlocks.keySet());
+    set.addAll(nodeToECBlockGroups.keySet());
+    return new ArrayList<>(set);
   }
 
   /**
@@ -171,6 +257,22 @@ class InvalidateBlocks {
     return pendingPeriodInMs - (Time.monotonicNow() - startupTime);
   }
 
+  /**
+   * Get blocks to invalidate by limit as blocks that can be sent in one
+   * message is limited.
+   * @return the remaining limit
+   */
+  private int getBlocksToInvalidateByLimit(LightWeightHashSet<Block> blockSet,
+      List<Block> toInvalidate, LongAdder statsAdder, int limit) {
+    assert blockSet != null;
+    int remainingLimit = limit;
+    List<Block> polledBlocks = blockSet.pollN(limit);
+    remainingLimit -= polledBlocks.size();
+    toInvalidate.addAll(polledBlocks);
+    statsAdder.add(polledBlocks.size() * -1);
+    return remainingLimit;
+  }
+
   synchronized List<Block> invalidateWork(final DatanodeDescriptor dn) {
     final long delay = getInvalidationDelay();
     if (delay > 0) {
@@ -179,27 +281,29 @@ class InvalidateBlocks {
               + "The deletion will start after {} ms.", delay);
       return null;
     }
-    final LightWeightHashSet<Block> set = node2blocks.get(dn);
-    if (set == null) {
-      return null;
-    }
 
-    // # blocks that can be sent in one message is limited
-    final int limit = blockInvalidateLimit;
-    final List<Block> toInvalidate = set.pollN(limit);
+    int remainingLimit = blockInvalidateLimit;
+    final List<Block> toInvalidate = new ArrayList<>();
 
-    // If we send everything in this message, remove this node entry
-    if (set.isEmpty()) {
+    if (nodeToBlocks.get(dn) != null) {
+      remainingLimit = getBlocksToInvalidateByLimit(nodeToBlocks.get(dn),
+          toInvalidate, numBlocks, remainingLimit);
+    }
+    if ((remainingLimit > 0) && (nodeToECBlockGroups.get(dn) != null)) {
+      getBlocksToInvalidateByLimit(nodeToECBlockGroups.get(dn),
+          toInvalidate, numECBlockGroups, remainingLimit);
+    }
+    if (toInvalidate.size() > 0 && getBlockSetsSize(dn) == 0) {
       remove(dn);
     }
-
     dn.addBlocksToBeInvalidated(toInvalidate);
-    numBlocks -= toInvalidate.size();
     return toInvalidate;
   }
   
   synchronized void clear() {
-    node2blocks.clear();
-    numBlocks = 0;
+    nodeToBlocks.clear();
+    nodeToECBlockGroups.clear();
+    numBlocks.reset();
+    numECBlockGroups.reset();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
index 1a38480..af2cb7e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
@@ -21,6 +21,7 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.concurrent.atomic.LongAdder;
 
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
@@ -85,7 +86,12 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
       = new ArrayList<>(LEVEL);
 
   /** The number of corrupt blocks with replication factor 1 */
-  private int corruptReplOneBlocks = 0;
+
+  private final LongAdder lowRedundancyBlocks = new LongAdder();
+  private final LongAdder corruptBlocks = new LongAdder();
+  private final LongAdder corruptReplicationOneBlocks = new LongAdder();
+  private final LongAdder lowRedundancyECBlockGroups = new LongAdder();
+  private final LongAdder corruptECBlockGroups = new LongAdder();
 
   /** Create an object. */
   LowRedundancyBlocks() {
@@ -101,7 +107,11 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
     for (int i = 0; i < LEVEL; i++) {
       priorityQueues.get(i).clear();
     }
-    corruptReplOneBlocks = 0;
+    lowRedundancyBlocks.reset();
+    corruptBlocks.reset();
+    corruptReplicationOneBlocks.reset();
+    lowRedundancyECBlockGroups.reset();
+    corruptECBlockGroups.reset();
   }
 
   /** Return the total number of insufficient redundancy blocks. */
@@ -133,8 +143,35 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
   }
 
   /** Return the number of corrupt blocks with replication factor 1 */
-  synchronized int getCorruptReplOneBlockSize() {
-    return corruptReplOneBlocks;
+  long getCorruptReplicationOneBlockSize() {
+    return getCorruptReplicationOneBlocksStat();
+  }
+
+  /**
+   * Return under replicated block count excluding corrupt replicas.
+   */
+  long getLowRedundancyBlocksStat() {
+    return lowRedundancyBlocks.longValue() - getCorruptBlocksStat();
+  }
+
+  long getCorruptBlocksStat() {
+    return corruptBlocks.longValue();
+  }
+
+  long getCorruptReplicationOneBlocksStat() {
+    return corruptReplicationOneBlocks.longValue();
+  }
+
+  /**
+   *  Return low redundancy striped blocks excluding corrupt blocks.
+   */
+  long getLowRedundancyECBlockGroupsStat() {
+    return lowRedundancyECBlockGroups.longValue() -
+        getCorruptECBlockGroupsStat();
+  }
+
+  long getCorruptECBlockGroupsStat() {
+    return corruptECBlockGroups.longValue();
   }
 
   /** Check if a block is in the neededReconstruction queue. */
@@ -236,11 +273,7 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
       int outOfServiceReplicas, int expectedReplicas) {
     final int priLevel = getPriority(block, curReplicas, readOnlyReplicas,
         outOfServiceReplicas, expectedReplicas);
-    if(priorityQueues.get(priLevel).add(block)) {
-      if (priLevel == QUEUE_WITH_CORRUPT_BLOCKS &&
-          expectedReplicas == 1) {
-        corruptReplOneBlocks++;
-      }
+    if(add(block, priLevel, expectedReplicas)) {
       NameNode.blockStateChangeLog.debug(
           "BLOCK* NameSystem.LowRedundancyBlock.add: {}"
               + " has only {} replicas and need {} replicas so is added to"
@@ -252,18 +285,43 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
     return false;
   }
 
+  private boolean add(BlockInfo blockInfo, int priLevel, int expectedReplicas) {
+    if (priorityQueues.get(priLevel).add(blockInfo)) {
+      incrementBlockStat(blockInfo, priLevel, expectedReplicas);
+      return true;
+    }
+    return false;
+  }
+
+  private void incrementBlockStat(BlockInfo blockInfo, int priLevel,
+      int expectedReplicas) {
+    if (blockInfo.isStriped()) {
+      lowRedundancyECBlockGroups.increment();
+      if (priLevel == QUEUE_WITH_CORRUPT_BLOCKS) {
+        corruptECBlockGroups.increment();
+      }
+    } else {
+      lowRedundancyBlocks.increment();
+      if (priLevel == QUEUE_WITH_CORRUPT_BLOCKS) {
+        corruptBlocks.increment();
+        if (expectedReplicas == 1) {
+          corruptReplicationOneBlocks.increment();
+        }
+      }
+    }
+  }
+
   /** Remove a block from a low redundancy queue. */
   synchronized boolean remove(BlockInfo block,
       int oldReplicas, int oldReadOnlyReplicas,
       int outOfServiceReplicas, int oldExpectedReplicas) {
     final int priLevel = getPriority(block, oldReplicas, oldReadOnlyReplicas,
         outOfServiceReplicas, oldExpectedReplicas);
-    boolean removedBlock = remove(block, priLevel);
+    boolean removedBlock = remove(block, priLevel, oldExpectedReplicas);
     if (priLevel == QUEUE_WITH_CORRUPT_BLOCKS &&
         oldExpectedReplicas == 1 &&
         removedBlock) {
-      corruptReplOneBlocks--;
-      assert corruptReplOneBlocks >= 0 :
+      assert corruptReplicationOneBlocks.longValue() >= 0 :
           "Number of corrupt blocks with replication factor 1 " +
               "should be non-negative";
     }
@@ -287,12 +345,17 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
    *         queues
    */
   boolean remove(BlockInfo block, int priLevel) {
+    return remove(block, priLevel, block.getReplication());
+  }
+
+  boolean remove(BlockInfo block, int priLevel, int oldExpectedReplicas) {
     if(priLevel >= 0 && priLevel < LEVEL
         && priorityQueues.get(priLevel).remove(block)) {
       NameNode.blockStateChangeLog.debug(
           "BLOCK* NameSystem.LowRedundancyBlock.remove: Removing block {}"
               + " from priority queue {}",
           block, priLevel);
+      decrementBlockStat(block, priLevel, oldExpectedReplicas);
       return true;
     } else {
       // Try to remove the block from all queues if the block was
@@ -302,6 +365,7 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
           NameNode.blockStateChangeLog.debug(
               "BLOCK* NameSystem.LowRedundancyBlock.remove: Removing block" +
                   " {} from priority queue {}", block, i);
+          decrementBlockStat(block, priLevel, oldExpectedReplicas);
           return true;
         }
       }
@@ -309,6 +373,27 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
     return false;
   }
 
+  private void decrementBlockStat(BlockInfo blockInfo, int priLevel,
+      int oldExpectedReplicas) {
+    if (blockInfo.isStriped()) {
+      lowRedundancyECBlockGroups.decrement();
+      if (priLevel == QUEUE_WITH_CORRUPT_BLOCKS) {
+        corruptECBlockGroups.decrement();
+      }
+    } else {
+      lowRedundancyBlocks.decrement();
+      if (priLevel == QUEUE_WITH_CORRUPT_BLOCKS) {
+        corruptBlocks.decrement();
+        if (oldExpectedReplicas == 1) {
+          corruptReplicationOneBlocks.decrement();
+          assert corruptReplicationOneBlocks.longValue() >= 0 :
+              "Number of corrupt blocks with replication factor 1 " +
+                  "should be non-negative";
+        }
+      }
+    }
+  }
+
   /**
    * Recalculate and potentially update the priority level of a block.
    *
@@ -348,8 +433,8 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
     }
     // oldPri is mostly correct, but not always. If not found with oldPri,
     // other levels will be searched until the block is found & removed.
-    remove(block, oldPri);
-    if(priorityQueues.get(curPri).add(block)) {
+    remove(block, oldPri, oldExpectedReplicas);
+    if(add(block, curPri, curExpectedReplicas)) {
       NameNode.blockStateChangeLog.debug(
           "BLOCK* NameSystem.LowRedundancyBlock.update: {} has only {} "
               + "replicas and needs {} replicas so is added to "
@@ -357,18 +442,6 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
           block, curReplicas, curExpectedReplicas, curPri);
 
     }
-    if (oldPri != curPri || expectedReplicasDelta != 0) {
-      // corruptReplOneBlocks could possibly change
-      if (curPri == QUEUE_WITH_CORRUPT_BLOCKS &&
-          curExpectedReplicas == 1) {
-        // add a new corrupt block with replication factor 1
-        corruptReplOneBlocks++;
-      } else if (oldPri == QUEUE_WITH_CORRUPT_BLOCKS &&
-          curExpectedReplicas - expectedReplicasDelta == 1) {
-        // remove an existing corrupt block with replication factor 1
-        corruptReplOneBlocks--;
-      }
-    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 2c662fe..2a611b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -89,6 +89,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.*;
 
+import org.apache.hadoop.hdfs.protocol.BlocksStats;
+import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import static org.apache.hadoop.util.Time.now;
 import static org.apache.hadoop.util.Time.monotonicNow;
@@ -240,8 +242,10 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer;
+import org.apache.hadoop.hdfs.server.namenode.metrics.ECBlockGroupsStatsMBean;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import org.apache.hadoop.hdfs.server.namenode.metrics.ReplicatedBlocksStatsMBean;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
@@ -335,7 +339,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 @InterfaceAudience.Private
 @Metrics(context="dfs")
 public class FSNamesystem implements Namesystem, FSNamesystemMBean,
-  NameNodeMXBean {
+    NameNodeMXBean, ReplicatedBlocksStatsMBean, ECBlockGroupsStatsMBean {
   public static final Log LOG = LogFactory.getLog(FSNamesystem.class);
   private final MetricsRegistry registry = new MetricsRegistry("FSNamesystem");
   @Metric final MutableRatesWithAggregation detailedLockHoldTimeMetrics =
@@ -4005,9 +4009,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   /** @see ClientProtocol#getStats() */
   long[] getStats() {
     final long[] stats = datanodeStatistics.getStats();
-    stats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX] = getUnderReplicatedBlocks();
-    stats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX] = getCorruptReplicaBlocks();
-    stats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX] = getMissingBlocksCount();
+    stats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX] =
+        getLowRedundancyBlocks();
+    stats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX] =
+        getCorruptReplicaBlocks();
+    stats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX] =
+        getMissingBlocksCount();
     stats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX] =
         getMissingReplOneBlocksCount();
     stats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX] =
@@ -4017,6 +4024,31 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return stats;
   }
 
+  /**
+   * Get statistics pertaining to blocks of type {@link BlockType#CONTIGUOUS}
+   * in the filesystem.
+   * <p>
+   * @see ClientProtocol#getBlocksStats()
+   */
+  BlocksStats getBlocksStats() {
+    return new BlocksStats(getLowRedundancyBlocksStat(),
+        getCorruptBlocksStat(), getMissingBlocksStat(),
+        getMissingReplicationOneBlocksStat(), getBlocksBytesInFutureStat(),
+        getPendingDeletionBlocksStat());
+  }
+
+  /**
+   * Get statistics pertaining to blocks of type {@link BlockType#STRIPED}
+   * in the filesystem.
+   * <p>
+   * @see ClientProtocol#getECBlockGroupsStats()
+   */
+  ECBlockGroupsStats getECBlockGroupsStats() {
+    return new ECBlockGroupsStats(getLowRedundancyECBlockGroupsStat(),
+        getCorruptECBlockGroupsStat(), getMissingECBlockGroupsStat(),
+        getECBlocksBytesInFutureStat(), getPendingDeletionECBlockGroupsStat());
+  }
+
   @Override // FSNamesystemMBean
   @Metric({"CapacityTotal",
       "Total raw capacity of data nodes in bytes"})
@@ -4501,16 +4533,43 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return this.dir.totalInodes();
   }
 
+  /**
+   * Get aggregated count of all blocks pending to be reconstructed.
+   */
   @Override // FSNamesystemMBean
   @Metric
+  @Deprecated
   public long getPendingReplicationBlocks() {
     return blockManager.getPendingReconstructionBlocksCount();
   }
 
+  /**
+   * Get aggregated count of all blocks pending to be reconstructed.
+   */
+  @Override // FSNamesystemMBean
+  @Metric
+  public long getPendingReconstructionBlocks() {
+    return blockManager.getPendingReconstructionBlocksCount();
+  }
+
+  /**
+   * Get aggregated count of all blocks with low redundancy.
+   * @deprecated - Use {@link #getLowRedundancyBlocks()} instead.
+   */
   @Override // FSNamesystemMBean
   @Metric
+  @Deprecated
   public long getUnderReplicatedBlocks() {
-    return blockManager.getUnderReplicatedBlocksCount();
+    return blockManager.getLowRedundancyBlocksCount();
+  }
+
+  /**
+   * Get aggregated count of all blocks with low redundancy.
+   */
+  @Override // FSNamesystemMBean
+  @Metric
+  public long getLowRedundancyBlocks() {
+    return blockManager.getLowRedundancyBlocksCount();
   }
 
   /** Returns number of blocks with corrupt replicas */
@@ -4531,6 +4590,81 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return blockManager.getPendingDeletionBlocksCount();
   }
 
+  @Override // ReplicatedBlocksMBean
+  @Metric({"LowRedundancyReplicatedBlocks",
+      "Number of low redundancy replicated blocks"})
+  public long getLowRedundancyBlocksStat() {
+    return blockManager.getLowRedundancyBlocksStat();
+  }
+
+  @Override // ReplicatedBlocksMBean
+  @Metric({"CorruptReplicatedBlocks", "Number of corrupted replicated blocks"})
+  public long getCorruptBlocksStat() {
+    return blockManager.getCorruptBlocksStat();
+  }
+
+  @Override // ReplicatedBlocksMBean
+  @Metric({"MissingReplicatedBlocks", "Number of missing replicated blocks"})
+  public long getMissingBlocksStat() {
+    return blockManager.getMissingBlocksStat();
+  }
+
+  @Override // ReplicatedBlocksMBean
+  @Metric({"MissingReplicatedOneBlocks", "Number of missing replicated blocks" +
+      " with replication factor 1"})
+  public long getMissingReplicationOneBlocksStat() {
+    return blockManager.getMissingReplicationOneBlocksStat();
+  }
+
+  @Override // ReplicatedBlocksMBean
+  @Metric({"BytesReplicatedFutureBlocks", "Total bytes in replicated blocks " +
+      "with future generation stamp"})
+  public long getBlocksBytesInFutureStat() {
+    return blockManager.getBytesInFutureReplicatedBlocksStat();
+  }
+
+  @Override // ReplicatedBlocksMBean
+  @Metric({"PendingDeletionReplicatedBlocks", "Number of replicated blocks " +
+      "that are pending deletion"})
+  public long getPendingDeletionBlocksStat() {
+    return blockManager.getPendingDeletionBlocksStat();
+  }
+
+  @Override // ECBlockGroupsStatsMBean
+  @Metric({"LowRedundancyECBlockGroups", "Number of erasure coded block " +
+      "groups with low redundancy"})
+  public long getLowRedundancyECBlockGroupsStat() {
+    return blockManager.getLowRedundancyECBlockGroupsStat();
+  }
+
+  @Override // ECBlockGroupsStatsMBean
+  @Metric({"CorruptECBlockGroups", "Number of erasure coded block groups that" +
+      " are corrupt"})
+  public long getCorruptECBlockGroupsStat() {
+    return blockManager.getCorruptECBlockGroupsStat();
+  }
+
+  @Override // ECBlockGroupsStatsMBean
+  @Metric({"MissingECBlockGroups", "Number of erasure coded block groups that" +
+      " are missing"})
+  public long getMissingECBlockGroupsStat() {
+    return blockManager.getMissingECBlockGroupsStat();
+  }
+
+  @Override // ECBlockGroupsStatsMBean
+  @Metric({"BytesFutureECBlockGroups", "Total bytes in erasure coded block " +
+      "groups with future generation stamp"})
+  public long getECBlocksBytesInFutureStat() {
+    return blockManager.getBytesInFutureStripedBlocksStat();
+  }
+
+  @Override // ECBlockGroupsStatsMBean
+  @Metric({"PendingDeletionECBlockGroups", "Number of erasure coded block " +
+      "groups that are pending deletion"})
+  public long getPendingDeletionECBlockGroupsStat() {
+    return blockManager.getPendingDeletionECBlockGroupsStat();
+  }
+
   @Override
   public long getBlockDeletionStartTime() {
     return startTime + blockManager.getStartupDelayBlockDeletionInMs();
@@ -4588,39 +4722,62 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return isInSafeMode() ? "safeMode" : "Operational";
   }
   
-  private ObjectName mbeanName;
-  private ObjectName mxbeanName;
+  private ObjectName namesystemMBeanName, replicatedBlocksMBeanName,
+      ecBlockGroupsMBeanName, namenodeMXBeanName;
 
   /**
-   * Register the FSNamesystem MBean using the name
+   * Register following MBeans with their respective names.
+   * FSNamesystemMBean:
    *        "hadoop:service=NameNode,name=FSNamesystemState"
+   * ReplicatedBlocksStatsMBean:
+   *        "hadoop:service=NameNode,name=ReplicatedBlocksState"
+   * ECBlockGroupsStatsMBean:
+   *        "hadoop:service=NameNode,name=ECBlockGroupsState"
    */
   private void registerMBean() {
     // We can only implement one MXBean interface, so we keep the old one.
     try {
-      StandardMBean bean = new StandardMBean(this, FSNamesystemMBean.class);
-      mbeanName = MBeans.register("NameNode", "FSNamesystemState", bean);
+      StandardMBean namesystemBean = new StandardMBean(
+          this, FSNamesystemMBean.class);
+      StandardMBean replicaBean = new StandardMBean(
+          this, ReplicatedBlocksStatsMBean.class);
+      StandardMBean ecBean = new StandardMBean(
+          this, ECBlockGroupsStatsMBean.class);
+      namesystemMBeanName = MBeans.register(
+          "NameNode", "FSNamesystemState", namesystemBean);
+      replicatedBlocksMBeanName = MBeans.register(
+          "NameNode", "ReplicatedBlocksState", replicaBean);
+      ecBlockGroupsMBeanName = MBeans.register(
+          "NameNode", "ECBlockGroupsState", ecBean);
     } catch (NotCompliantMBeanException e) {
       throw new RuntimeException("Bad MBean setup", e);
     }
-
-    LOG.info("Registered FSNamesystemState MBean");
+    LOG.info("Registered FSNamesystemState, ReplicatedBlocksState and " +
+        "ECBlockGroupsState MBeans.");
   }
 
   /**
-   * shutdown FSNamesystem
+   * Shutdown FSNamesystem.
    */
   void shutdown() {
     if (snapshotManager != null) {
       snapshotManager.shutdown();
     }
-    if (mbeanName != null) {
-      MBeans.unregister(mbeanName);
-      mbeanName = null;
+    if (namesystemMBeanName != null) {
+      MBeans.unregister(namesystemMBeanName);
+      namesystemMBeanName = null;
     }
-    if (mxbeanName != null) {
-      MBeans.unregister(mxbeanName);
-      mxbeanName = null;
+    if (replicatedBlocksMBeanName != null) {
+      MBeans.unregister(replicatedBlocksMBeanName);
+      replicatedBlocksMBeanName = null;
+    }
+    if (ecBlockGroupsMBeanName != null) {
+      MBeans.unregister(ecBlockGroupsMBeanName);
+      ecBlockGroupsMBeanName = null;
+    }
+    if (namenodeMXBeanName != null) {
+      MBeans.unregister(namenodeMXBeanName);
+      namenodeMXBeanName = null;
     }
     if (dir != null) {
       dir.shutdown();
@@ -5382,11 +5539,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
                     "fsck", src, null, null);
     }
   }
+
   /**
-   * Register NameNodeMXBean
+   * Register NameNodeMXBean.
    */
   private void registerMXBean() {
-    mxbeanName = MBeans.register("NameNode", "NameNodeInfo", this);
+    namenodeMXBeanName = MBeans.register("NameNode", "NameNodeInfo", this);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 4a4fe9d..fff29df 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -97,6 +97,7 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -112,6 +113,7 @@ import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
+import org.apache.hadoop.hdfs.protocol.BlocksStats;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
@@ -1151,6 +1153,20 @@ public class NameNodeRpcServer implements NamenodeProtocols {
   }
 
   @Override // ClientProtocol
+  public BlocksStats getBlocksStats() throws IOException {
+    checkNNStartup();
+    namesystem.checkOperation(OperationCategory.READ);
+    return namesystem.getBlocksStats();
+  }
+
+  @Override // ClientProtocol
+  public ECBlockGroupsStats getECBlockGroupsStats() throws IOException {
+    checkNNStartup();
+    namesystem.checkOperation(OperationCategory.READ);
+    return namesystem.getECBlockGroupsStats();
+  }
+
+  @Override // ClientProtocol
   public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
   throws IOException {
     checkNNStartup();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsStatsMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsStatsMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsStatsMBean.java
new file mode 100644
index 0000000..f9fd416
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsStatsMBean.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.metrics;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * This interface defines the methods to get status pertaining to blocks of type
+ * {@link org.apache.hadoop.hdfs.protocol.BlockType#STRIPED} in FSNamesystem
+ * of a NameNode. It is also used for publishing via JMX.
+ * <p>
+ * Aggregated status of all blocks is reported in
+ * @see FSNamesystemMBean
+ * Name Node runtime activity statistic info is reported in
+ * @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics
+ *
+ */
+@InterfaceAudience.Private
+public interface ECBlockGroupsStatsMBean {
+  /**
+   * Return count of erasure coded block groups with low redundancy.
+   */
+  long getLowRedundancyECBlockGroupsStat();
+
+  /**
+   * Return count of erasure coded block groups that are corrupt.
+   */
+  long getCorruptECBlockGroupsStat();
+
+  /**
+   * Return count of erasure coded block groups that are missing.
+   */
+  long getMissingECBlockGroupsStat();
+
+  /**
+   * Return total bytes of erasure coded future block groups.
+   */
+  long getECBlocksBytesInFutureStat();
+
+  /**
+   * Return count of erasure coded block groups that are pending deletion.
+   */
+  long getPendingDeletionECBlockGroupsStat();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
index f1e7515..ebdbc12 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
@@ -78,17 +78,31 @@ public interface FSNamesystemMBean {
   public long getFilesTotal();
  
   /**
-   * Blocks pending to be replicated
-   * @return -  num of blocks to be replicated
+   * Get aggregated count of all blocks pending to be reconstructed.
+   * @deprecated Use {@link #getPendingReconstructionBlocks()} instead.
    */
+  @Deprecated
   public long getPendingReplicationBlocks();
- 
+
+  /**
+   * Get aggregated count of all blocks pending to be reconstructed.
+   * @return Number of blocks to be replicated.
+   */
+  public long getPendingReconstructionBlocks();
+
   /**
-   * Blocks under replicated 
-   * @return -  num of blocks under replicated
+   * Get aggregated count of all blocks with low redundancy.
+   * @deprecated Use {@link #getLowRedundancyBlocks()} instead.
    */
+  @Deprecated
   public long getUnderReplicatedBlocks();
- 
+
+  /**
+   * Get aggregated count of all blocks with low redundancy.
+   * @return Number of blocks with low redundancy.
+   */
+  public long getLowRedundancyBlocks();
+
   /**
    * Blocks scheduled for replication
    * @return -  num of blocks scheduled for replication

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksStatsMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksStatsMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksStatsMBean.java
new file mode 100644
index 0000000..4643b80
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksStatsMBean.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.metrics;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * This interface defines the methods to get status pertaining to blocks of type
+ * {@link org.apache.hadoop.hdfs.protocol.BlockType#CONTIGUOUS} in FSNamesystem
+ * of a NameNode. It is also used for publishing via JMX.
+ * <p>
+ * Aggregated status of all blocks is reported in
+ * @see FSNamesystemMBean
+ * Name Node runtime activity statistic info is reported in
+ * @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics
+ */
+@InterfaceAudience.Private
+public interface ReplicatedBlocksStatsMBean {
+  /**
+   * Return low redundancy blocks count.
+   */
+  long getLowRedundancyBlocksStat();
+
+  /**
+   * Return corrupt blocks count.
+   */
+  long getCorruptBlocksStat();
+
+  /**
+   * Return missing blocks count.
+   */
+  long getMissingBlocksStat();
+
+  /**
+   * Return count of missing blocks with replication factor one.
+   */
+  long getMissingReplicationOneBlocksStat();
+
+  /**
+   * Return total bytes of future blocks.
+   */
+  long getBlocksBytesInFutureStat();
+
+  /**
+   * Return count of blocks that are pending deletion.
+   */
+  long getPendingDeletionBlocksStat();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/999c8fcb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index e59ea37..d82dfc4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -523,7 +523,7 @@ public class DFSAdmin extends FsShell {
      * counts.
      */
     System.out.println("Under replicated blocks: " + 
-                       dfs.getUnderReplicatedBlocksCount());
+                       dfs.getLowRedundancyBlocksCount());
     System.out.println("Blocks with corrupt replicas: " + 
                        dfs.getCorruptBlocksCount());
     System.out.println("Missing blocks: " + 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message