hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From w...@apache.org
Subject hadoop git commit: HDFS-8713. Convert DatanodeDescriptor to use SLF4J logging.
Date Mon, 17 Aug 2015 17:17:10 GMT
Repository: hadoop
Updated Branches:
  refs/heads/trunk a7862d5fe -> 2e7b7e2cd


HDFS-8713. Convert DatanodeDescriptor to use SLF4J logging.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e7b7e2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e7b7e2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e7b7e2c

Branch: refs/heads/trunk
Commit: 2e7b7e2cda67eba4c03e0a2c7892d868d235b0cf
Parents: a7862d5
Author: Andrew Wang <wang@apache.org>
Authored: Mon Aug 17 10:16:26 2015 -0700
Committer: Andrew Wang <wang@apache.org>
Committed: Mon Aug 17 10:17:06 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../blockmanagement/DatanodeDescriptor.java     | 40 ++++++++++----------
 2 files changed, 23 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e7b7e2c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1f9aab4..bfd95f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -791,6 +791,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-8883. NameNode Metrics : Add FSNameSystem lock Queue Length.
     (Anu Engineer via xyao)
 
+    HDFS-8713. Convert DatanodeDescriptor to use SLF4J logging. (wang)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e7b7e2c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 7e12a99..87ce753 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -33,8 +33,6 @@ import java.util.Set;
 import com.google.common.annotations.VisibleForTesting;
 
 import com.google.common.collect.ImmutableList;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.StorageType;
@@ -50,6 +48,8 @@ import org.apache.hadoop.hdfs.util.EnumCounters;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.util.IntrusiveCollection;
 import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class extends the DatanodeInfo class with ephemeral information (eg
@@ -59,7 +59,8 @@ import org.apache.hadoop.util.Time;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class DatanodeDescriptor extends DatanodeInfo {
-  public static final Log LOG = LogFactory.getLog(DatanodeDescriptor.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(DatanodeDescriptor.class);
   public static final DatanodeDescriptor[] EMPTY_ARRAY = {};
 
   // Stores status of decommissioning.
@@ -319,9 +320,9 @@ public class DatanodeDescriptor extends DatanodeInfo {
         Map.Entry<String, DatanodeStorageInfo> entry = iter.next();
         DatanodeStorageInfo storageInfo = entry.getValue();
         if (storageInfo.getLastBlockReportId() != curBlockReportId) {
-          LOG.info(storageInfo.getStorageID() + " had lastBlockReportId 0x" +
-              Long.toHexString(storageInfo.getLastBlockReportId()) +
-              ", but curBlockReportId = 0x" +
+          LOG.info("{} had lastBlockReportId 0x{} but curBlockReportId = 0x{}",
+              storageInfo.getStorageID(),
+              Long.toHexString(storageInfo.getLastBlockReportId()),
               Long.toHexString(curBlockReportId));
           iter.remove();
           if (zombies == null) {
@@ -446,8 +447,10 @@ public class DatanodeDescriptor extends DatanodeInfo {
     }
 
     if (checkFailedStorages) {
-      LOG.info("Number of failed storage changes from "
-          + this.volumeFailures + " to " + volFailures);
+      if (this.volumeFailures != volFailures) {
+        LOG.info("Number of failed storages changes from {} to {}",
+            this.volumeFailures, volFailures);
+      }
       synchronized (storageMap) {
         failedStorageInfos =
             new HashSet<>(storageMap.values());
@@ -498,10 +501,9 @@ public class DatanodeDescriptor extends DatanodeInfo {
    */
   private void pruneStorageMap(final StorageReport[] reports) {
     synchronized (storageMap) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Number of storages reported in heartbeat=" + reports.length
-            + "; Number of storages in storageMap=" + storageMap.size());
-      }
+      LOG.debug("Number of storages reported in heartbeat={};"
+              + " Number of storages in storageMap={}", reports.length,
+          storageMap.size());
 
       HashMap<String, DatanodeStorageInfo> excessStorages;
 
@@ -518,11 +520,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
       for (final DatanodeStorageInfo storageInfo : excessStorages.values()) {
         if (storageInfo.numBlocks() == 0) {
           storageMap.remove(storageInfo.getStorageID());
-          LOG.info("Removed storage " + storageInfo + " from DataNode" + this);
-        } else if (LOG.isDebugEnabled()) {
+          LOG.info("Removed storage {} from DataNode {}", storageInfo, this);
+        } else {
           // This can occur until all block reports are received.
-          LOG.debug("Deferring removal of stale storage " + storageInfo
-              + " with " + storageInfo.numBlocks() + " blocks");
+          LOG.debug("Deferring removal of stale storage {} with {} blocks",
+              storageInfo, storageInfo.numBlocks());
         }
       }
     }
@@ -532,7 +534,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
       Set<DatanodeStorageInfo> failedStorageInfos) {
     for (DatanodeStorageInfo storageInfo : failedStorageInfos) {
       if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
-        LOG.info(storageInfo + " failed.");
+        LOG.info("{} failed.", storageInfo);
         storageInfo.setState(DatanodeStorage.State.FAILED);
       }
     }
@@ -857,8 +859,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
     synchronized (storageMap) {
       DatanodeStorageInfo storage = storageMap.get(s.getStorageID());
       if (storage == null) {
-        LOG.info("Adding new storage ID " + s.getStorageID() +
-                 " for DN " + getXferAddr());
+        LOG.info("Adding new storage ID {} for DN {}", s.getStorageID(),
+            getXferAddr());
         storage = new DatanodeStorageInfo(this, s);
         storageMap.put(s.getStorageID(), storage);
       } else if (storage.getState() != s.getState() ||


Mime
View raw message