hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r990466 [1/2] - in /hadoop/hdfs/trunk: ./ src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/server/datanode...
Date Sat, 28 Aug 2010 23:06:01 GMT
Author: szetszwo
Date: Sat Aug 28 23:06:00 2010
New Revision: 990466

URL: http://svn.apache.org/viewvc?rev=990466&view=rev
Log:
HDFS-1320. Add LOG.isDebugEnabled() guard for each LOG.debug(..).  Contributed by Erik Steffl

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java
    hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyForwardServlet.java
    hadoop/hdfs/trunk/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java
    hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/fi/ProbabilityModel.java
    hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java
    hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
    hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
    hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Sat Aug 28 23:06:00 2010
@@ -74,9 +74,6 @@ Trunk (unreleased changes)
     HDFS-1110. Reuses objects for commonly used file names in namenode to
     reduce the heap usage. (suresh)
 
-    HDFS-1114. Implement LightWeightGSet for BlocksMap in order to reduce
-    NameNode memory footprint.  (szetszwo)
-
     HDFS-752. Add interfaces classification to to HDFS source code. (suresh)
 
     HDFS-947. An Hftp read request is redirected to a datanode that has 
@@ -126,6 +123,12 @@ Trunk (unreleased changes)
     HDFS-1081. Performance regression in 
     DistributedFileSystem::getFileBlockLocations in secure systems (jghoman)
 
+    HDFS-1114. Implement LightWeightGSet for BlocksMap in order to reduce
+    NameNode memory footprint.  (szetszwo)
+
+    HDFS-1320. Add LOG.isDebugEnabled() guard for each LOG.debug(..).
+    (Erik Steffl via szetszwo)
+
   BUG FIXES
 
     HDFS-1039. Adding test for  JspHelper.getUGI(jnp via boryas)

Modified: hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java (original)
+++ hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java Sat Aug 28 23:06:00 2010
@@ -210,7 +210,9 @@ public class ProxyFilter implements Filt
       
       if (unitTest) {
         try {
-          LOG.debug("==> Entering https unit test");
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("==> Entering https unit test");
+          }
           String SslPath = rqst.getParameter("SslPath");
           InputStream inStream = new FileInputStream(SslPath);
           CertificateFactory cf = CertificateFactory.getInstance("X.509");

Modified: hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyForwardServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyForwardServlet.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyForwardServlet.java (original)
+++ hadoop/hdfs/trunk/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyForwardServlet.java Sat Aug 28 23:06:00 2010
@@ -74,7 +74,10 @@ public class ProxyForwardServlet extends
       response.sendError(HttpServletResponse.SC_NOT_FOUND);
       return;
     }
-    LOG.debug("Request to " + hostname + " is forwarded to version " + version);
+    if(LOG.isDebugEnabled()) {
+      LOG.debug("Request to " + hostname +
+          " is forwarded to version " + version);
+    }
     forwardRequest(request, response, dstContext, request.getServletPath());
 
   }

Modified: hadoop/hdfs/trunk/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java (original)
+++ hadoop/hdfs/trunk/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java Sat Aug 28 23:06:00 2010
@@ -194,11 +194,15 @@ public class HadoopThriftServer extends 
     public ThriftHandle create(Pathname path) throws ThriftIOException {
       try {
         now = now();
-        HadoopThriftHandler.LOG.debug("create: " + path);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("create: " + path);
+        }
         FSDataOutputStream out = fs.create(new Path(path.pathname));
         long id = insert(out);
         ThriftHandle obj = new ThriftHandle(id);
-        HadoopThriftHandler.LOG.debug("created: " + path + " id: " + id);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("created: " + path + " id: " + id);
+        }
         return obj;
       } catch (IOException e) {
         throw new ThriftIOException(e.getMessage());
@@ -216,12 +220,14 @@ public class HadoopThriftServer extends 
                                    long blockSize) throws ThriftIOException {
       try {
         now = now();
-        HadoopThriftHandler.LOG.debug("create: " + path +
-                                     " permission: " + mode +
-                                     " overwrite: " + overwrite +
-                                     " bufferSize: " + bufferSize +
-                                     " replication: " + replication +
-                                     " blockSize: " + blockSize);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("create: " + path +
+                                       " permission: " + mode +
+                                       " overwrite: " + overwrite +
+                                       " bufferSize: " + bufferSize +
+                                       " replication: " + replication +
+                                       " blockSize: " + blockSize);
+        }
         FSDataOutputStream out = fs.create(new Path(path.pathname), 
                                            new FsPermission(mode),
                                            overwrite,
@@ -231,7 +237,9 @@ public class HadoopThriftServer extends 
                                            null); // progress
         long id = insert(out);
         ThriftHandle obj = new ThriftHandle(id);
-        HadoopThriftHandler.LOG.debug("created: " + path + " id: " + id);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("created: " + path + " id: " + id);
+        }
         return obj;
       } catch (IOException e) {
         throw new ThriftIOException(e.getMessage());
@@ -244,11 +252,15 @@ public class HadoopThriftServer extends 
     public ThriftHandle open(Pathname path) throws ThriftIOException {
       try {
         now = now();
-        HadoopThriftHandler.LOG.debug("open: " + path);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("open: " + path);
+        }
         FSDataInputStream out = fs.open(new Path(path.pathname));
         long id = insert(out);
         ThriftHandle obj = new ThriftHandle(id);
-        HadoopThriftHandler.LOG.debug("opened: " + path + " id: " + id);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("opened: " + path + " id: " + id);
+        }
         return obj;
       } catch (IOException e) {
         throw new ThriftIOException(e.getMessage());
@@ -261,11 +273,15 @@ public class HadoopThriftServer extends 
     public ThriftHandle append(Pathname path) throws ThriftIOException {
       try {
         now = now();
-        HadoopThriftHandler.LOG.debug("append: " + path);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("append: " + path);
+        }
         FSDataOutputStream out = fs.append(new Path(path.pathname));
         long id = insert(out);
         ThriftHandle obj = new ThriftHandle(id);
-        HadoopThriftHandler.LOG.debug("appended: " + path + " id: " + id);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("appended: " + path + " id: " + id);
+        }
         return obj;
       } catch (IOException e) {
         throw new ThriftIOException(e.getMessage());
@@ -278,11 +294,15 @@ public class HadoopThriftServer extends 
     public boolean write(ThriftHandle tout, String data) throws ThriftIOException {
       try {
         now = now();
-        HadoopThriftHandler.LOG.debug("write: " + tout.id);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("write: " + tout.id);
+        }
         FSDataOutputStream out = (FSDataOutputStream)lookup(tout.id);
         byte[] tmp = data.getBytes("UTF-8");
         out.write(tmp, 0, tmp.length);
-        HadoopThriftHandler.LOG.debug("wrote: " + tout.id);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("wrote: " + tout.id);
+        }
         return true;
       } catch (IOException e) {
         throw new ThriftIOException(e.getMessage());
@@ -296,16 +316,20 @@ public class HadoopThriftServer extends 
                        int length) throws ThriftIOException {
       try {
         now = now();
-        HadoopThriftHandler.LOG.debug("read: " + tout.id +
-                                     " offset: " + offset +
-                                     " length: " + length);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("read: " + tout.id +
+                                       " offset: " + offset +
+                                       " length: " + length);
+        }
         FSDataInputStream in = (FSDataInputStream)lookup(tout.id);
         if (in.getPos() != offset) {
           in.seek(offset);
         }
         byte[] tmp = new byte[length];
         int numbytes = in.read(offset, tmp, 0, length);
-        HadoopThriftHandler.LOG.debug("read done: " + tout.id);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("read done: " + tout.id);
+        }
         return new String(tmp, 0, numbytes, "UTF-8");
       } catch (IOException e) {
         throw new ThriftIOException(e.getMessage());
@@ -319,10 +343,14 @@ public class HadoopThriftServer extends 
                           throws ThriftIOException {
       try {
         now = now();
-        HadoopThriftHandler.LOG.debug("rm: " + path +
-                                     " recursive: " + recursive);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("rm: " + path +
+                                       " recursive: " + recursive);
+        }
         boolean ret = fs.delete(new Path(path.pathname), recursive);
-        HadoopThriftHandler.LOG.debug("rm: " + path);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("rm: " + path);
+        }
         return ret;
       } catch (IOException e) {
         throw new ThriftIOException(e.getMessage());
@@ -336,11 +364,15 @@ public class HadoopThriftServer extends 
                           throws ThriftIOException {
       try {
         now = now();
-        HadoopThriftHandler.LOG.debug("rename: " + path +
-                                     " destination: " + dest);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("rename: " + path +
+                                       " destination: " + dest);
+        }
         boolean ret = fs.rename(new Path(path.pathname), 
                                 new Path(dest.pathname));
-        HadoopThriftHandler.LOG.debug("rename: " + path);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("rename: " + path);
+        }
         return ret;
       } catch (IOException e) {
         throw new ThriftIOException(e.getMessage());
@@ -353,7 +385,9 @@ public class HadoopThriftServer extends 
      public boolean close(ThriftHandle tout) throws ThriftIOException {
        try {
          now = now();
-         HadoopThriftHandler.LOG.debug("close: " + tout.id);
+         if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+           HadoopThriftHandler.LOG.debug("close: " + tout.id);
+         }
          Object obj = remove(tout.id);
          if (obj instanceof FSDataOutputStream) {
            FSDataOutputStream out = (FSDataOutputStream)obj;
@@ -364,7 +398,9 @@ public class HadoopThriftServer extends 
          } else {
            throw new ThriftIOException("Unknown thrift handle.");
          }
-         HadoopThriftHandler.LOG.debug("closed: " + tout.id);
+         if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+           HadoopThriftHandler.LOG.debug("closed: " + tout.id);
+         }
          return true;
        } catch (IOException e) {
          throw new ThriftIOException(e.getMessage());
@@ -377,9 +413,13 @@ public class HadoopThriftServer extends 
     public boolean mkdirs(Pathname path) throws ThriftIOException {
       try {
         now = now();
-        HadoopThriftHandler.LOG.debug("mkdirs: " + path);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("mkdirs: " + path);
+        }
         boolean ret = fs.mkdirs(new Path(path.pathname));
-        HadoopThriftHandler.LOG.debug("mkdirs: " + path);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("mkdirs: " + path);
+        }
         return ret;
       } catch (IOException e) {
         throw new ThriftIOException(e.getMessage());
@@ -392,9 +432,13 @@ public class HadoopThriftServer extends 
     public boolean exists(Pathname path) throws ThriftIOException {
       try {
         now = now();
-        HadoopThriftHandler.LOG.debug("exists: " + path);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("exists: " + path);
+        }
         boolean ret = fs.exists(new Path(path.pathname));
-        HadoopThriftHandler.LOG.debug("exists done: " + path);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("exists done: " + path);
+        }
         return ret;
       } catch (IOException e) {
         throw new ThriftIOException(e.getMessage());
@@ -408,10 +452,14 @@ public class HadoopThriftServer extends 
                             Pathname path) throws ThriftIOException {
       try {
         now = now();
-        HadoopThriftHandler.LOG.debug("stat: " + path);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("stat: " + path);
+        }
         org.apache.hadoop.fs.FileStatus stat = fs.getFileStatus(
                                            new Path(path.pathname));
-        HadoopThriftHandler.LOG.debug("stat done: " + path);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("stat done: " + path);
+        }
         return new org.apache.hadoop.thriftfs.api.FileStatus(
           stat.getPath().toString(),
           stat.getLen(),
@@ -435,11 +483,15 @@ public class HadoopThriftServer extends 
                             Pathname path) throws ThriftIOException {
       try {
         now = now();
-        HadoopThriftHandler.LOG.debug("listStatus: " + path);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("listStatus: " + path);
+        }
 
         org.apache.hadoop.fs.FileStatus[] stat = fs.listStatus(
                                            new Path(path.pathname));
-        HadoopThriftHandler.LOG.debug("listStatus done: " + path);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("listStatus done: " + path);
+        }
         org.apache.hadoop.thriftfs.api.FileStatus tmp;
         List<org.apache.hadoop.thriftfs.api.FileStatus> value = 
           new LinkedList<org.apache.hadoop.thriftfs.api.FileStatus>();
@@ -469,10 +521,14 @@ public class HadoopThriftServer extends 
     public void chmod(Pathname path, short mode) throws ThriftIOException {
       try {
         now = now();
-        HadoopThriftHandler.LOG.debug("chmod: " + path + 
-                                     " mode " + mode);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("chmod: " + path + 
+                                       " mode " + mode);
+        }
         fs.setPermission(new Path(path.pathname), new FsPermission(mode));
-        HadoopThriftHandler.LOG.debug("chmod done: " + path);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("chmod done: " + path);
+        }
       } catch (IOException e) {
         throw new ThriftIOException(e.getMessage());
       }
@@ -485,11 +541,15 @@ public class HadoopThriftServer extends 
                                                        throws ThriftIOException {
       try {
         now = now();
-        HadoopThriftHandler.LOG.debug("chown: " + path +
-                                     " owner: " + owner +
-                                     " group: " + group);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("chown: " + path +
+                                       " owner: " + owner +
+                                       " group: " + group);
+        }
         fs.setOwner(new Path(path.pathname), owner, group);
-        HadoopThriftHandler.LOG.debug("chown done: " + path);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("chown done: " + path);
+        }
       } catch (IOException e) {
         throw new ThriftIOException(e.getMessage());
       }
@@ -501,10 +561,14 @@ public class HadoopThriftServer extends 
     public void setReplication(Pathname path, short repl) throws ThriftIOException {
       try {
         now = now();
-        HadoopThriftHandler.LOG.debug("setrepl: " + path +
-                                     " replication factor: " + repl);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("setrepl: " + path +
+                                       " replication factor: " + repl);
+        }
         fs.setReplication(new Path(path.pathname), repl);
-        HadoopThriftHandler.LOG.debug("setrepl done: " + path);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("setrepl done: " + path);
+        }
       } catch (IOException e) {
         throw new ThriftIOException(e.getMessage());
       }
@@ -519,14 +583,16 @@ public class HadoopThriftServer extends 
                                          throws ThriftIOException {
       try {
         now = now();
-        HadoopThriftHandler.LOG.debug("getFileBlockLocations: " + path);
-
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("getFileBlockLocations: " + path);
+        }
         org.apache.hadoop.fs.FileStatus status = fs.getFileStatus(
                                                  new Path(path.pathname));
-
         org.apache.hadoop.fs.BlockLocation[] stat = 
             fs.getFileBlockLocations(status, start, length);
-        HadoopThriftHandler.LOG.debug("getFileBlockLocations done: " + path);
+        if(HadoopThriftHandler.LOG.isDebugEnabled()) {
+          HadoopThriftHandler.LOG.debug("getFileBlockLocations done: " + path);
+        }
 
         org.apache.hadoop.thriftfs.api.BlockLocation tmp;
         List<org.apache.hadoop.thriftfs.api.BlockLocation> value = 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java Sat Aug 28 23:06:00 2010
@@ -454,8 +454,10 @@ public class BlockReader extends FSInput
       out.flush();
     } catch (IOException e) {
       // its ok not to be able to send this.
-      LOG.debug("Could not write to datanode " + sock.getInetAddress() +
-                ": " + e.getMessage());
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Could not write to datanode " + sock.getInetAddress() +
+                  ": " + e.getMessage());
+      }
     }
   }
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java Sat Aug 28 23:06:00 2010
@@ -625,7 +625,9 @@ public class DFSClient implements FSCons
       permission = FsPermission.getDefault();
     }
     FsPermission masked = permission.applyUMask(FsPermission.getUMask(conf));
-    LOG.debug(src + ": masked=" + masked);
+    if(LOG.isDebugEnabled()) {
+      LOG.debug(src + ": masked=" + masked);
+    }
     OutputStream result = new DFSOutputStream(this, src, masked,
         flag, createParent, replication, blockSize, progress, buffersize,
         conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 
@@ -1212,7 +1214,9 @@ public class DFSClient implements FSCons
       permission = FsPermission.getDefault();
     }
     FsPermission masked = permission.applyUMask(FsPermission.getUMask(conf));
-    LOG.debug(src + ": masked=" + masked);
+    if(LOG.isDebugEnabled()) {
+      LOG.debug(src + ": masked=" + masked);
+    }
     try {
       return namenode.mkdirs(src, masked, createParent);
     } catch(RemoteException re) {
@@ -1239,7 +1243,9 @@ public class DFSClient implements FSCons
         FsPermission.getDefault().applyUMask(FsPermission.getUMask(conf));
     } 
 
-    LOG.debug(src + ": masked=" + absPermission);
+    if(LOG.isDebugEnabled()) {
+      LOG.debug(src + ": masked=" + absPermission);
+    }
     try {
       return namenode.mkdirs(src, absPermission, true);
     } catch(RemoteException re) {
@@ -1347,7 +1353,9 @@ public class DFSClient implements FSCons
       }
      
       if (daemonCopy != null) {
-        LOG.debug("Wait for lease checker to terminate");
+        if(LOG.isDebugEnabled()) {
+          LOG.debug("Wait for lease checker to terminate");
+        }
         daemonCopy.join();
       }
     }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java Sat Aug 28 23:06:00 2010
@@ -744,9 +744,11 @@ public class DFSInputStream extends FSIn
             done = true;
           }
         } catch (IOException e) {//make following read to retry
-          DFSClient.LOG.debug("Exception while seek to " + targetPos + " from "
-                    + currentBlock +" of " + src + " from " + currentNode + 
-                    ": " + StringUtils.stringifyException(e));
+          if(DFSClient.LOG.isDebugEnabled()) {
+            DFSClient.LOG.debug("Exception while seek to " + targetPos +
+                " from " + currentBlock +" of " + src + " from " +
+                currentNode + ": " + StringUtils.stringifyException(e));
+          }
         }
       }
     }
@@ -853,4 +855,4 @@ public class DFSInputStream extends FSIn
     }
   }
 
-}
\ No newline at end of file
+}

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java Sat Aug 28 23:06:00 2010
@@ -38,7 +38,6 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSOutputSummer;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.fs.UnresolvedLinkException;
@@ -360,7 +359,9 @@ class DFSOutputStream extends FSOutputSu
     }
     
     private void endBlock() {
-      DFSClient.LOG.debug("Closing old block " + block);
+      if(DFSClient.LOG.isDebugEnabled()) {
+        DFSClient.LOG.debug("Closing old block " + block);
+      }
       this.setName("DataStreamer for file " + src);
       closeResponder();
       closeStream();
@@ -427,11 +428,15 @@ class DFSOutputStream extends FSOutputSu
 
           // get new block from namenode.
           if (stage == BlockConstructionStage.PIPELINE_SETUP_CREATE) {
-            DFSClient.LOG.debug("Allocating new block");
+            if(DFSClient.LOG.isDebugEnabled()) {
+              DFSClient.LOG.debug("Allocating new block");
+            }
             nodes = nextBlockOutputStream(src);
             initDataStreaming();
           } else if (stage == BlockConstructionStage.PIPELINE_SETUP_APPEND) {
-            DFSClient.LOG.debug("Append to block " + block);
+            if(DFSClient.LOG.isDebugEnabled()) {
+              DFSClient.LOG.debug("Append to block " + block);
+            }
             setupPipelineForAppendOrRecovery();
             initDataStreaming();
           }
@@ -856,14 +861,18 @@ class DFSOutputStream extends FSOutputSu
 
       boolean result = false;
       try {
-        DFSClient.LOG.debug("Connecting to " + nodes[0].getName());
+        if(DFSClient.LOG.isDebugEnabled()) {
+          DFSClient.LOG.debug("Connecting to " + nodes[0].getName());
+        }
         InetSocketAddress target = NetUtils.createSocketAddr(nodes[0].getName());
         s = dfsClient.socketFactory.createSocket();
         int timeoutValue = dfsClient.getDatanodeReadTimeout(nodes.length);
         NetUtils.connect(s, target, timeoutValue);
         s.setSoTimeout(timeoutValue);
         s.setSendBufferSize(DFSClient.DEFAULT_DATA_SOCKET_SIZE);
-        DFSClient.LOG.debug("Send buf size " + s.getSendBufferSize());
+        if(DFSClient.LOG.isDebugEnabled()) {
+          DFSClient.LOG.debug("Send buf size " + s.getSendBufferSize());
+        }
         long writeTimeout = dfsClient.getDatanodeWriteTimeout(nodes.length);
 
         //
@@ -1031,8 +1040,9 @@ class DFSOutputStream extends FSOutputSu
     this.blockSize = blockSize;
     this.blockReplication = replication;
     this.progress = progress;
-    if (progress != null) {
-      DFSClient.LOG.debug("Set non-null progress callback on DFSOutputStream "+src);
+    if ((progress != null) && DFSClient.LOG.isDebugEnabled()) {
+      DFSClient.LOG.debug(
+          "Set non-null progress callback on DFSOutputStream " + src);
     }
     
     if ( bytesPerChecksum < 1 || blockSize % bytesPerChecksum != 0) {
@@ -1246,9 +1256,11 @@ class DFSOutputStream extends FSOutputSu
       // flush checksum buffer, but keep checksum buffer intact
       flushBuffer(true);
 
-      DFSClient.LOG.debug("DFSClient flush() : saveOffset " + saveOffset +  
-                " bytesCurBlock " + bytesCurBlock +
-                " lastFlushOffset " + lastFlushOffset);
+      if(DFSClient.LOG.isDebugEnabled()) {
+        DFSClient.LOG.debug("DFSClient flush() : saveOffset " + saveOffset +  
+            " bytesCurBlock " + bytesCurBlock +
+            " lastFlushOffset " + lastFlushOffset);
+      }
       
       // Flush only if we haven't already flushed till this offset.
       if (lastFlushOffset != bytesCurBlock) {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java Sat Aug 28 23:06:00 2010
@@ -147,8 +147,10 @@ public class HftpFileSystem extends File
     // otherwise it is hostname:RPC_PORT
     String key = HftpFileSystem.HFTP_SERVICE_NAME_KEY+
     SecurityUtil.buildDTServiceName(name, DEFAULT_PORT);
-    LOG.debug("Trying to find DT for " + name + " using key=" + key + 
-        "; conf=" + conf.get(key, ""));
+    if(LOG.isDebugEnabled()) {
+      LOG.debug("Trying to find DT for " + name + " using key=" + key + 
+          "; conf=" + conf.get(key, ""));
+    }
     String nnServiceName = conf.get(key);
     int nnPort = NameNode.DEFAULT_PORT;
     if (nnServiceName != null) { // get the real port
@@ -169,7 +171,9 @@ public class HftpFileSystem extends File
       for (Token<? extends TokenIdentifier> t : ugi.getTokens()) {
         if (DelegationTokenIdentifier.HDFS_DELEGATION_KIND.equals(t.getKind()) &&
             t.getService().toString().equals(canonicalName)) {
-          LOG.debug("Found existing DT for " + name);
+          if(LOG.isDebugEnabled()) {
+            LOG.debug("Found existing DT for " + name);
+          }
           delegationToken = (Token<DelegationTokenIdentifier>) t;
           break;
         }
@@ -195,13 +199,17 @@ public class HftpFileSystem extends File
           } catch (Exception e) {
             LOG.info("Couldn't get a delegation token from " + nnHttpUrl + 
             " using https.");
-            LOG.debug("error was ", e);
+            if(LOG.isDebugEnabled()) {
+              LOG.debug("error was ", e);
+            }
             //Maybe the server is in unsecure mode (that's bad but okay)
             return null;
           }
           for (Token<? extends TokenIdentifier> t : c.getAllTokens()) {
-            LOG.debug("Got dt for " + getUri() + ";t.service="
-                +t.getService());
+            if(LOG.isDebugEnabled()) {
+              LOG.debug("Got dt for " + getUri() + ";t.service="
+                  +t.getService());
+            }
             t.setService(new Text(getCanonicalServiceName()));
             return t;
           }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Sat Aug 28 23:06:00 2010
@@ -481,7 +481,9 @@ class BlockReceiver implements java.io.C
     buf.position(endOfHeader);        
     
     if (lastPacketInBlock || len == 0) {
-      LOG.debug("Receiving an empty packet or the end of the block " + block);
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Receiving an empty packet or the end of the block " + block);
+      }
     } else {
       int checksumLen = ((len + bytesPerChecksum - 1)/bytesPerChecksum)*
                                                             checksumSize;
@@ -553,7 +555,9 @@ class BlockReceiver implements java.io.C
               buf, buf.length - checksumSize, buf.length
             );
             checksumOut.write(buf);
-            LOG.debug("Writing out partial crc for data len " + len);
+            if(LOG.isDebugEnabled()) {
+              LOG.debug("Writing out partial crc for data len " + len);
+            }
             partialCrc = null;
           } else {
             lastChunkChecksum = Arrays.copyOfRange(
@@ -733,8 +737,6 @@ class BlockReceiver implements java.io.C
                    FSInputChecker.checksum2long(crcbuf);
       throw new IOException(msg);
     }
-    //LOG.debug("Partial CRC matches 0x" + 
-    //            Long.toHexString(partialCrc.getValue()));
   }
   
   
@@ -777,8 +779,10 @@ class BlockReceiver implements java.io.C
      */
     synchronized void enqueue(long seqno, boolean lastPacketInBlock, long lastByteInPacket) {
       if (running) {
-        LOG.debug("PacketResponder " + numTargets + " adding seqno " + seqno +
-                  " to ack queue.");
+        if(LOG.isDebugEnabled()) {
+          LOG.debug("PacketResponder " + numTargets + " adding seqno " + seqno +
+                    " to ack queue.");
+        }
         ackQueue.addLast(new Packet(seqno, lastPacketInBlock, lastByteInPacket));
         notifyAll();
       }
@@ -795,8 +799,10 @@ class BlockReceiver implements java.io.C
           running = false;
         }
       }
-      LOG.debug("PacketResponder " + numTargets +
-               " for block " + block + " Closing down.");
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("PacketResponder " + numTargets +
+                 " for block " + block + " Closing down.");
+      }
       running = false;
       notifyAll();
     }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Sat Aug 28 23:06:00 2010
@@ -425,7 +425,9 @@ public class DataNode extends Configured
        : new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0,
            conf, new AccessControlList(conf.get(DFSConfigKeys.DFS_ADMIN, " ")),
            secureResources.getListener());
-    LOG.debug("Datanode listening on " + infoHost + ":" + tmpInfoPort);
+    if(LOG.isDebugEnabled()) {
+      LOG.debug("Datanode listening on " + infoHost + ":" + tmpInfoPort);
+    }
     if (conf.getBoolean("dfs.https.enable", false)) {
       boolean needClientAuth = conf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
                                                DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
@@ -435,7 +437,9 @@ public class DataNode extends Configured
       sslConf.addResource(conf.get("dfs.https.server.keystore.resource",
           "ssl-server.xml"));
       this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
-      LOG.debug("Datanode listening for SSL on " + secInfoSocAddr);
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Datanode listening for SSL on " + secInfoSocAddr);
+      }
     }
     this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class);
     this.infoServer.addInternalServlet(null, "/getFileChecksum/*",

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java Sat Aug 28 23:06:00 2010
@@ -625,9 +625,11 @@ public class BlockManager {
     if (count > 1) {
       addToInvalidates(blk, dn);
       removeStoredBlock(blk, node);
-      NameNode.stateChangeLog.debug("BLOCK* NameSystem.invalidateBlocks: "
-                                   + blk + " on "
-                                   + dn.getName() + " listed for deletion.");
+      if(NameNode.stateChangeLog.isDebugEnabled()) {
+        NameNode.stateChangeLog.debug("BLOCK* NameSystem.invalidateBlocks: "
+            + blk + " on "
+            + dn.getName() + " listed for deletion.");
+      }
     } else {
       NameNode.stateChangeLog.info("BLOCK* NameSystem.invalidateBlocks: "
           + blk + " on " + dn.getName()
@@ -886,9 +888,11 @@ public class BlockManager {
         // The reason we use 'pending' is so we can retry
         // replications that fail after an appropriate amount of time.
         pendingReplications.add(block, targets.length);
-        NameNode.stateChangeLog.debug(
-            "BLOCK* block " + block
-            + " is moved from neededReplications to pendingReplications");
+        if(NameNode.stateChangeLog.isDebugEnabled()) {
+          NameNode.stateChangeLog.debug(
+              "BLOCK* block " + block
+              + " is moved from neededReplications to pendingReplications");
+        }
 
         // remove from neededReplications
         if(numEffectiveReplicas + targets.length >= requiredReplication) {
@@ -905,9 +909,11 @@ public class BlockManager {
                     "BLOCK* ask "
                     + srcNode.getName() + " to replicate "
                     + block + " to " + targetList);
-          NameNode.stateChangeLog.debug(
-                    "BLOCK* neededReplications = " + neededReplications.size()
-                    + " pendingReplications = " + pendingReplications.size());
+          if(NameNode.stateChangeLog.isDebugEnabled()) {
+            NameNode.stateChangeLog.debug(
+                "BLOCK* neededReplications = " + neededReplications.size()
+                + " pendingReplications = " + pendingReplications.size());
+          }
         }
       }
     }
@@ -1255,9 +1261,11 @@ public class BlockManager {
     }
     if (excessBlocks.add(block)) {
       excessBlocksCount++;
-      NameNode.stateChangeLog.debug("BLOCK* NameSystem.chooseExcessReplicates:"
-          + " (" + dn.getName() + ", " + block
-          + ") is added to excessReplicateMap");
+      if(NameNode.stateChangeLog.isDebugEnabled()) {
+        NameNode.stateChangeLog.debug("BLOCK* NameSystem.chooseExcessReplicates:"
+            + " (" + dn.getName() + ", " + block
+            + ") is added to excessReplicateMap");
+      }
     }
   }
 
@@ -1266,12 +1274,16 @@ public class BlockManager {
    * removed block is still valid.
    */
   void removeStoredBlock(Block block, DatanodeDescriptor node) {
-    NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: "
-        + block + " from " + node.getName());
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: "
+          + block + " from " + node.getName());
+    }
     synchronized (namesystem) {
       if (!blocksMap.removeNode(block, node)) {
-        NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: "
-            + block + " has already been removed from node " + node);
+        if(NameNode.stateChangeLog.isDebugEnabled()) {
+          NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: "
+              + block + " has already been removed from node " + node);
+        }
         return;
       }
 
@@ -1296,8 +1308,11 @@ public class BlockManager {
       if (excessBlocks != null) {
         if (excessBlocks.remove(block)) {
           excessBlocksCount--;
-          NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: "
-              + block + " is removed from excessBlocks");
+          if(NameNode.stateChangeLog.isDebugEnabled()) {
+            NameNode.stateChangeLog.debug(
+                "BLOCK* NameSystem.removeStoredBlock: "
+                + block + " is removed from excessBlocks");
+          }
           if (excessBlocks.size() == 0) {
             excessReplicateMap.remove(node.getStorageID());
           }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java Sat Aug 28 23:06:00 2010
@@ -381,11 +381,12 @@ public class BlockPlacementPolicyDefault
                                long blockSize, int maxTargetPerLoc,
                                boolean considerLoad,
                                List<DatanodeDescriptor> results) {
-    Log logr = FSNamesystem.LOG;
     // check if the node is (being) decommissed
     if (node.isDecommissionInProgress() || node.isDecommissioned()) {
-      logr.debug("Node "+NodeBase.getPath(node)+
-                " is not chosen because the node is (being) decommissioned");
+      if(FSNamesystem.LOG.isDebugEnabled()) {
+        FSNamesystem.LOG.debug("Node "+NodeBase.getPath(node)+
+            " is not chosen because the node is (being) decommissioned");
+      }
       return false;
     }
 
@@ -393,8 +394,10 @@ public class BlockPlacementPolicyDefault
                      (node.getBlocksScheduled() * blockSize); 
     // check the remaining capacity of the target machine
     if (blockSize* FSConstants.MIN_BLOCKS_FOR_WRITE>remaining) {
-      logr.debug("Node "+NodeBase.getPath(node)+
-                " is not chosen because the node does not have enough space");
+      if(FSNamesystem.LOG.isDebugEnabled()) {
+        FSNamesystem.LOG.debug("Node "+NodeBase.getPath(node)+
+            " is not chosen because the node does not have enough space");
+      }
       return false;
     }
       
@@ -406,8 +409,10 @@ public class BlockPlacementPolicyDefault
         avgLoad = (double)stats.getTotalLoad()/size;
       }
       if (node.getXceiverCount() > (2.0 * avgLoad)) {
-        logr.debug("Node "+NodeBase.getPath(node)+
-                  " is not chosen because the node is too busy");
+        if(FSNamesystem.LOG.isDebugEnabled()) {
+          FSNamesystem.LOG.debug("Node "+NodeBase.getPath(node)+
+              " is not chosen because the node is too busy");
+        }
         return false;
       }
     }
@@ -423,8 +428,10 @@ public class BlockPlacementPolicyDefault
       }
     }
     if (counter>maxTargetPerLoc) {
-      logr.debug("Node "+NodeBase.getPath(node)+
-                " is not chosen because the rack has too many chosen nodes");
+      if(FSNamesystem.LOG.isDebugEnabled()) {
+        FSNamesystem.LOG.debug("Node "+NodeBase.getPath(node)+
+            " is not chosen because the rack has too many chosen nodes");
+      }
       return false;
     }
     return true;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java Sat Aug 28 23:06:00 2010
@@ -471,9 +471,11 @@ public class DatanodeDescriptor extends 
                   Collection<Block> toAdd,    // add to DatanodeDescriptor
                   Collection<Block> toInvalidate, // should be removed from DN
                   Collection<BlockInfo> toCorrupt) {// add to corrupt replicas
-    FSNamesystem.LOG.debug("Reported block " + block
-        + " on " + getName() + " size " + block.getNumBytes()
-        + " replicaState = " + rState);
+    if(FSNamesystem.LOG.isDebugEnabled()) {
+      FSNamesystem.LOG.debug("Reported block " + block
+          + " on " + getName() + " size " + block.getNumBytes()
+          + " replicaState = " + rState);
+    }
 
     // find block by blockId
     BlockInfo storedBlock = blockManager.blocksMap.getStoredBlock(block);
@@ -484,7 +486,10 @@ public class DatanodeDescriptor extends 
       return null;
     }
 
-    FSNamesystem.LOG.debug("In memory blockUCState = " + storedBlock.getBlockUCState());
+    if(FSNamesystem.LOG.isDebugEnabled()) {
+      FSNamesystem.LOG.debug("In memory blockUCState = " +
+          storedBlock.getBlockUCState());
+    }
 
     // Block is on the DN
     boolean isCorrupt = false;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java Sat Aug 28 23:06:00 2010
@@ -169,13 +169,17 @@ class EditLogFileOutputStream extends Ed
   private void preallocate() throws IOException {
     long position = fc.position();
     if (position + 4096 >= fc.size()) {
-      FSNamesystem.LOG.debug("Preallocating Edit log, current size "
-          + fc.size());
+      if(FSNamesystem.LOG.isDebugEnabled()) {
+        FSNamesystem.LOG.debug("Preallocating Edit log, current size "
+            + fc.size());
+      }
       long newsize = position + 1024 * 1024; // 1MB
       fill.position(0);
       int written = fc.write(fill, newsize);
-      FSNamesystem.LOG.debug("Edit log size is now " + fc.size() + " written "
-          + written + " bytes " + " at offset " + newsize);
+      if(FSNamesystem.LOG.isDebugEnabled()) {
+        FSNamesystem.LOG.debug("Edit log size is now " + fc.size() +
+            " written " + written + " bytes " + " at offset " + newsize);
+      }
     }
   }
 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Sat Aug 28 23:06:00 2010
@@ -208,8 +208,10 @@ class FSDirectory implements Closeable {
     // add create file record to log, record new generation stamp
     fsImage.getEditLog().logOpenFile(path, newNode);
 
-    NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
-                                  +path+" is added to the file system");
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
+          +path+" is added to the file system");
+    }
     return newNode;
   }
 
@@ -338,10 +340,12 @@ class FSDirectory implements Closeable {
       getBlockManager().addINode(blockInfo, fileINode);
       fileINode.addBlock(blockInfo);
 
-      NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
-                                    + path + " with " + block
-                                    + " block is added to the in-memory "
-                                    + "file system");
+      if(NameNode.stateChangeLog.isDebugEnabled()) {
+        NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
+            + path + " with " + block
+            + " block is added to the in-memory "
+            + "file system");
+      }
       return blockInfo;
     }
   }
@@ -354,9 +358,11 @@ class FSDirectory implements Closeable {
 
     synchronized (rootDir) {
       fsImage.getEditLog().logOpenFile(path, file);
-      NameNode.stateChangeLog.debug("DIR* FSDirectory.persistBlocks: "
-                                    +path+" with "+ file.getBlocks().length 
-                                    +" blocks is persisted to the file system");
+      if(NameNode.stateChangeLog.isDebugEnabled()) {
+        NameNode.stateChangeLog.debug("DIR* FSDirectory.persistBlocks: "
+            +path+" with "+ file.getBlocks().length 
+            +" blocks is persisted to the file system");
+      }
     }
   }
 
@@ -372,8 +378,8 @@ class FSDirectory implements Closeable {
       fsImage.getEditLog().logCloseFile(path, file);
       if (NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug("DIR* FSDirectory.closeFile: "
-                                    +path+" with "+ file.getBlocks().length 
-                                    +" blocks is persisted to the file system");
+            +path+" with "+ file.getBlocks().length 
+            +" blocks is persisted to the file system");
       }
     }
   }
@@ -394,9 +400,11 @@ class FSDirectory implements Closeable {
 
       // write modified block locations to log
       fsImage.getEditLog().logOpenFile(path, fileNode);
-      NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
-                                    +path+" with "+block
-                                    +" block is added to the file system");
+      if(NameNode.stateChangeLog.isDebugEnabled()) {
+        NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
+            +path+" with "+block
+            +" block is added to the file system");
+      }
     }
     return true;
   }
@@ -411,7 +419,7 @@ class FSDirectory implements Closeable {
       FileAlreadyExistsException {
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: "
-                                  +src+" to "+dst);
+          +src+" to "+dst);
     }
     waitForReady();
     long now = now();
@@ -691,9 +699,9 @@ class FSDirectory implements Closeable {
         if (dstChild != null) {
           removedSrc = null;
           if (NameNode.stateChangeLog.isDebugEnabled()) {
-            NameNode.stateChangeLog
-                .debug("DIR* FSDirectory.unprotectedRenameTo: " + src
-                    + " is renamed to " + dst);
+            NameNode.stateChangeLog.debug(
+                "DIR* FSDirectory.unprotectedRenameTo: " + src
+                + " is renamed to " + dst);
           }
           srcInodes[srcInodes.length - 2].setModificationTime(timestamp);
           dstInodes[dstInodes.length - 2].setModificationTime(timestamp);
@@ -958,7 +966,9 @@ class FSDirectory implements Closeable {
     try {
       return isDirEmpty("/");
     } catch (UnresolvedLinkException e) {
-      NameNode.stateChangeLog.debug("/ cannot be a symlink");
+      if(NameNode.stateChangeLog.isDebugEnabled()) {
+        NameNode.stateChangeLog.debug("/ cannot be a symlink");
+      }
       assert false : "/ cannot be a symlink";
       return true;
     }
@@ -999,8 +1009,10 @@ class FSDirectory implements Closeable {
       INode targetNode = inodes[inodes.length-1];
 
       if (targetNode == null) { // non-existent src
-        NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
-            +"failed to remove "+src+" because it does not exist");
+        if(NameNode.stateChangeLog.isDebugEnabled()) {
+          NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
+              +"failed to remove "+src+" because it does not exist");
+        }
         return 0;
       }
       if (inodes.length == 1) { // src is the root
@@ -1020,7 +1032,7 @@ class FSDirectory implements Closeable {
       int filesRemoved = targetNode.collectSubtreeBlocksAndClear(collectedBlocks);
       if (NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
-          +src+" is removed");
+            +src+" is removed");
       }
       return filesRemoved;
     }
@@ -1376,8 +1388,10 @@ class FSDirectory implements Closeable {
         if (getFSNamesystem() != null)
           NameNode.getNameNodeMetrics().numFilesCreated.inc();
         fsImage.getEditLog().logMkDir(cur, inodes[i]);
-        NameNode.stateChangeLog.debug(
-            "DIR* FSDirectory.mkdirs: created directory " + cur);
+        if(NameNode.stateChangeLog.isDebugEnabled()) {
+          NameNode.stateChangeLog.debug(
+              "DIR* FSDirectory.mkdirs: created directory " + cur);
+        }
       }
     }
     return true;
@@ -1911,8 +1925,10 @@ class FSDirectory implements Closeable {
     }
     fsImage.getEditLog().logSymlink(path, target, modTime, modTime, newNode);
     
-    NameNode.stateChangeLog.debug("DIR* FSDirectory.addSymlink: "
-                                  +path+" is added to the file system");
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("DIR* FSDirectory.addSymlink: "
+          +path+" is added to the file system");
+    }
     return newNode;
   }
 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Sat Aug 28 23:06:00 2010
@@ -867,7 +867,10 @@ public class FSEditLog {
       ArrayList<EditLogOutputStream> errorStreams = null;
       long start = now();
       for(EditLogOutputStream eStream : editStreams) {
-        FSImage.LOG.debug("loggin edits into " + eStream.getName()  + " stream");
+        if(FSImage.LOG.isDebugEnabled()) {
+          FSImage.LOG.debug("loggin edits into " + eStream.getName() +
+              " stream");
+        }
         if(!eStream.isOperationSupported(op))
           continue;
         try {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Sat Aug 28 23:06:00 2010
@@ -1697,7 +1697,9 @@ public class FSImage extends Storage {
       }
     }
     editLog.purgeEditLog(); // renamed edits.new to edits
-    LOG.debug("rollFSImage after purgeEditLog: storageList=" + listStorageDirectories());
+    if(LOG.isDebugEnabled()) {
+      LOG.debug("rollFSImage after purgeEditLog: storageList=" + listStorageDirectories());
+    }
     //
     // Renames new image
     //
@@ -1717,7 +1719,9 @@ public class FSImage extends Storage {
       File curFile = getImageFile(sd, NameNodeFile.IMAGE);
       // renameTo fails on Windows if the destination file 
       // already exists.
-      LOG.debug("renaming  " + ckpt.getAbsolutePath() + " to "  + curFile.getAbsolutePath());
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("renaming  " + ckpt.getAbsolutePath() + " to "  + curFile.getAbsolutePath());
+      }
       if (!ckpt.renameTo(curFile)) {
         if (!curFile.delete() || !ckpt.renameTo(curFile)) {
           LOG.warn("renaming  " + ckpt.getAbsolutePath() + " to "  + 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Sat Aug 28 23:06:00 2010
@@ -849,7 +849,10 @@ public class FSNamesystem implements FSC
    */
   public void concat(String target, String [] srcs) 
     throws IOException, UnresolvedLinkException {
-    FSNamesystem.LOG.debug("concat " + Arrays.toString(srcs) + " to " + target);
+    if(FSNamesystem.LOG.isDebugEnabled()) {
+      FSNamesystem.LOG.debug("concat " + Arrays.toString(srcs) +
+          " to " + target);
+    }
     // check safe mode
     if (isInSafeMode()) {
       throw new SafeModeException("concat: cannot concat " + target, safeMode);
@@ -963,8 +966,10 @@ public class FSNamesystem implements FSC
         throw new IllegalArgumentException("at least two files are the same");
       }
 
-      NameNode.stateChangeLog.debug("DIR* NameSystem.concat: " + 
-          Arrays.toString(srcs) + " to " + target);
+      if(NameNode.stateChangeLog.isDebugEnabled()) {
+        NameNode.stateChangeLog.debug("DIR* NameSystem.concat: " + 
+            Arrays.toString(srcs) + " to " + target);
+      }
 
       dir.concatInternal(target,srcs);
     }
@@ -1453,8 +1458,11 @@ public class FSNamesystem implements FSC
     DatanodeDescriptor clientNode = null;
     Block newBlock = null;
 
-    NameNode.stateChangeLog.debug("BLOCK* NameSystem.getAdditionalBlock: file "
-                                  +src+" for "+clientName);
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug(
+          "BLOCK* NameSystem.getAdditionalBlock: file "
+          +src+" for "+clientName);
+    }
 
     synchronized (this) {
       if (isInSafeMode()) {
@@ -1528,13 +1536,17 @@ public class FSNamesystem implements FSC
     //
     // Remove the block from the pending creates list
     //
-    NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: "
-                                  +b+"of file "+src);
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: "
+                                    +b+"of file "+src);
+    }
     INodeFileUnderConstruction file = checkLease(src, holder);
     dir.removeBlock(src, file, b);
-    NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: "
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: "
                                     + b
                                     + " is removed from pendingCreates");
+    }
     return true;
   }
   
@@ -1588,7 +1600,10 @@ public class FSNamesystem implements FSC
   private synchronized boolean completeFileInternal(String src, 
       String holder, Block last) throws SafeModeException,
       UnresolvedLinkException, IOException {
-    NameNode.stateChangeLog.debug("DIR* NameSystem.completeFile: " + src + " for " + holder);
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("DIR* NameSystem.completeFile: " +
+          src + " for " + holder);
+    }
     if (isInSafeMode())
       throw new SafeModeException("Cannot complete file " + src, safeMode);
 
@@ -1722,7 +1737,10 @@ public class FSNamesystem implements FSC
   @Deprecated
   private synchronized boolean renameToInternal(String src, String dst)
     throws IOException, UnresolvedLinkException {
-    NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: " + src + " to " + dst);
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: " + src +
+          " to " + dst);
+    }
     if (isInSafeMode())
       throw new SafeModeException("Cannot rename " + src, safeMode);
     if (!DFSUtil.isValidName(dst)) {
@@ -1924,7 +1942,9 @@ public class FSNamesystem implements FSC
   private synchronized boolean mkdirsInternal(String src,
       PermissionStatus permissions, boolean createParent) 
       throws IOException, UnresolvedLinkException {
-    NameNode.stateChangeLog.debug("DIR* NameSystem.mkdirs: " + src);
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("DIR* NameSystem.mkdirs: " + src);
+    }
     if (isPermissionEnabled) {
       checkTraverse(src);
     }
@@ -2368,8 +2388,10 @@ public class FSNamesystem implements FSC
         // The same datanode has been just restarted to serve the same data 
         // storage. We do not need to remove old data blocks, the delta will
         // be calculated on the next block report from the datanode
-        NameNode.stateChangeLog.debug("BLOCK* NameSystem.registerDatanode: "
-                                      + "node restarted.");
+        if(NameNode.stateChangeLog.isDebugEnabled()) {
+          NameNode.stateChangeLog.debug("BLOCK* NameSystem.registerDatanode: "
+                                        + "node restarted.");
+        }
       } else {
         // nodeS is found
         /* The registering datanode is a replacement node for the existing 
@@ -2412,9 +2434,11 @@ public class FSNamesystem implements FSC
       // this data storage has never been registered
       // it is either empty or was created by pre-storageID version of DFS
       nodeReg.storageID = newStorageID();
-      NameNode.stateChangeLog.debug(
-                                    "BLOCK* NameSystem.registerDatanode: "
-                                    + "new storageID " + nodeReg.getStorageID() + " assigned.");
+      if(NameNode.stateChangeLog.isDebugEnabled()) {
+        NameNode.stateChangeLog.debug(
+            "BLOCK* NameSystem.registerDatanode: "
+            + "new storageID " + nodeReg.getStorageID() + " assigned.");
+      }
     }
     // register new datanode
     DatanodeDescriptor nodeDescr 
@@ -2741,9 +2765,11 @@ public class FSNamesystem implements FSC
   void unprotectedRemoveDatanode(DatanodeDescriptor nodeDescr) {
     nodeDescr.resetBlocks();
     blockManager.removeFromInvalidates(nodeDescr.getStorageID());
-    NameNode.stateChangeLog.debug(
-                                  "BLOCK* NameSystem.unprotectedRemoveDatanode: "
-                                  + nodeDescr.getName() + " is out of service now.");
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug(
+          "BLOCK* NameSystem.unprotectedRemoveDatanode: "
+          + nodeDescr.getName() + " is out of service now.");
+    }
   }
     
   void unprotectedAddDatanode(DatanodeDescriptor nodeDescr) {
@@ -2755,9 +2781,11 @@ public class FSNamesystem implements FSC
                             datanodeMap.put(nodeDescr.getStorageID(), nodeDescr));
     host2DataNodeMap.add(nodeDescr);
       
-    NameNode.stateChangeLog.debug(
-                                  "BLOCK* NameSystem.unprotectedAddDatanode: "
-                                  + "node " + nodeDescr.getName() + " is added to datanodeMap.");
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug(
+          "BLOCK* NameSystem.unprotectedAddDatanode: "
+          + "node " + nodeDescr.getName() + " is added to datanodeMap.");
+    }
   }
 
   /**
@@ -2768,10 +2796,12 @@ public class FSNamesystem implements FSC
   void wipeDatanode(DatanodeID nodeID) throws IOException {
     String key = nodeID.getStorageID();
     host2DataNodeMap.remove(datanodeMap.remove(key));
-    NameNode.stateChangeLog.debug(
-                                  "BLOCK* NameSystem.wipeDatanode: "
-                                  + nodeID.getName() + " storage " + key 
-                                  + " is removed from datanodeMap.");
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug(
+          "BLOCK* NameSystem.wipeDatanode: "
+          + nodeID.getName() + " storage " + key 
+          + " is removed from datanodeMap.");
+    }
   }
 
   FSImage getFSImage() {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Sat Aug 28 23:06:00 2010
@@ -191,12 +191,14 @@ class INodeDirectory extends INode {
         existing[index] = curNode;
       }
       if (curNode.isLink() && (!lastComp || (lastComp && resolveLink))) {
-        NameNode.stateChangeLog.debug("UnresolvedPathException " +
-           " count: " + count +
-           " componenent: " + DFSUtil.bytes2String(components[count]) +
-           " full path: " + constructPath(components, 0) +
-           " remaining path: " + constructPath(components, count+1) +
-           " symlink: " + ((INodeSymlink)curNode).getLinkValue());
+        if(NameNode.stateChangeLog.isDebugEnabled()) {
+          NameNode.stateChangeLog.debug("UnresolvedPathException " +
+              " count: " + count +
+              " componenent: " + DFSUtil.bytes2String(components[count]) +
+              " full path: " + constructPath(components, 0) +
+              " remaining path: " + constructPath(components, count+1) +
+              " symlink: " + ((INodeSymlink)curNode).getLinkValue());
+        }
         final String linkTarget = ((INodeSymlink)curNode).getLinkValue();
         throw new UnresolvedPathException(constructPath(components, 0),
                                           constructPath(components, count+1),

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Sat Aug 28 23:06:00 2010
@@ -804,8 +804,10 @@ public class NameNode implements Namenod
                                Block previous,
                                DatanodeInfo[] excludedNodes)
       throws IOException {
-    stateChangeLog.debug("*BLOCK* NameNode.addBlock: file "
-                         +src+" for "+clientName);
+    if(stateChangeLog.isDebugEnabled()) {
+      stateChangeLog.debug("*BLOCK* NameNode.addBlock: file "
+          +src+" for "+clientName);
+    }
     HashMap<Node, Node> excludedNodesSet = null;
     if (excludedNodes != null) {
       excludedNodesSet = new HashMap<Node, Node>(excludedNodes.length);
@@ -825,8 +827,10 @@ public class NameNode implements Namenod
    */
   public void abandonBlock(Block b, String src, String holder)
       throws IOException {
-    stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: "
-                         +b+" of file "+src);
+    if(stateChangeLog.isDebugEnabled()) {
+      stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: "
+          +b+" of file "+src);
+    }
     if (!namesystem.abandonBlock(b, src, holder)) {
       throw new IOException("Cannot abandon block during write to " + src);
     }
@@ -835,7 +839,10 @@ public class NameNode implements Namenod
   /** {@inheritDoc} */
   public boolean complete(String src, String clientName, Block last)
       throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.complete: " + src + " for " + clientName);
+    if(stateChangeLog.isDebugEnabled()) {
+      stateChangeLog.debug("*DIR* NameNode.complete: "
+          + src + " for " + clientName);
+    }
     return namesystem.completeFile(src, clientName, last);
   }
 
@@ -890,7 +897,9 @@ public class NameNode implements Namenod
   @Deprecated
   @Override
   public boolean rename(String src, String dst) throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
+    if(stateChangeLog.isDebugEnabled()) {
+      stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
+    }
     if (!checkPathLength(dst)) {
       throw new IOException("rename: Pathname too long.  Limit " 
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
@@ -913,7 +922,9 @@ public class NameNode implements Namenod
   @Override
   public void rename(String src, String dst, Options.Rename... options)
       throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
+    if(stateChangeLog.isDebugEnabled()) {
+      stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
+    }
     if (!checkPathLength(dst)) {
       throw new IOException("rename: Pathname too long.  Limit " 
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
@@ -956,7 +967,9 @@ public class NameNode implements Namenod
   /** {@inheritDoc} */
   public boolean mkdirs(String src, FsPermission masked, boolean createParent)
       throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src);
+    if(stateChangeLog.isDebugEnabled()) {
+      stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src);
+    }
     if (!checkPathLength(src)) {
       throw new IOException("mkdirs: Pathname too long.  Limit " 
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
@@ -1212,8 +1225,11 @@ public class NameNode implements Namenod
                                      long[] blocks) throws IOException {
     verifyRequest(nodeReg);
     BlockListAsLongs blist = new BlockListAsLongs(blocks);
-    stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
-           +"from "+nodeReg.getName()+" "+blist.getNumberOfBlocks() +" blocks");
+    if(stateChangeLog.isDebugEnabled()) {
+      stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
+           + "from " + nodeReg.getName() + " " + blist.getNumberOfBlocks()
+           + " blocks");
+    }
 
     namesystem.processReport(nodeReg, blist);
     if (getFSImage().isUpgradeFinalized())
@@ -1225,8 +1241,10 @@ public class NameNode implements Namenod
                             Block blocks[],
                             String delHints[]) throws IOException {
     verifyRequest(nodeReg);
-    stateChangeLog.debug("*BLOCK* NameNode.blockReceived: "
-                         +"from "+nodeReg.getName()+" "+blocks.length+" blocks.");
+    if(stateChangeLog.isDebugEnabled()) {
+      stateChangeLog.debug("*BLOCK* NameNode.blockReceived: "
+          +"from "+nodeReg.getName()+" "+blocks.length+" blocks.");
+    }
     for (int i = 0; i < blocks.length; i++) {
       namesystem.blockReceived(nodeReg, blocks[i], delHints[i]);
     }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java Sat Aug 28 23:06:00 2010
@@ -85,7 +85,10 @@ class PendingReplicationBlocks {
     synchronized (pendingReplications) {
       PendingBlockInfo found = pendingReplications.get(block);
       if (found != null) {
-        FSNamesystem.LOG.debug("Removing pending replication for block" + block);
+        if(FSNamesystem.LOG.isDebugEnabled()) {
+          FSNamesystem.LOG.debug("Removing pending replication for block" +
+              block);
+        }
         found.decrementReplicas();
         if (found.getNumReplicas() <= 0) {
           pendingReplications.remove(block);
@@ -181,8 +184,10 @@ class PendingReplicationBlocks {
           pendingReplicationCheck();
           Thread.sleep(period);
         } catch (InterruptedException ie) {
-          FSNamesystem.LOG.debug(
+          if(FSNamesystem.LOG.isDebugEnabled()) {
+            FSNamesystem.LOG.debug(
                 "PendingReplicationMonitor thread received exception. " + ie);
+          }
         }
       }
     }
@@ -195,7 +200,9 @@ class PendingReplicationBlocks {
         Iterator<Map.Entry<Block, PendingBlockInfo>> iter =
                                     pendingReplications.entrySet().iterator();
         long now = now();
-        FSNamesystem.LOG.debug("PendingReplicationMonitor checking Q");
+        if(FSNamesystem.LOG.isDebugEnabled()) {
+          FSNamesystem.LOG.debug("PendingReplicationMonitor checking Q");
+        }
         while (iter.hasNext()) {
           Map.Entry<Block, PendingBlockInfo> entry = iter.next();
           PendingBlockInfo pendingBlock = entry.getValue();

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Sat Aug 28 23:06:00 2010
@@ -393,7 +393,9 @@ public class SecondaryNameNode implement
       }
       return fsName.getHost() + ":" + sockAddr.getPort();
     } else {
-      LOG.debug("configuredAddress = " + configuredAddress);
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("configuredAddress = " + configuredAddress);
+      }
       return configuredAddress;
     }
   }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java Sat Aug 28 23:06:00 2010
@@ -108,13 +108,15 @@ class UnderReplicatedBlocks implements I
     int priLevel = getPriority(block, curReplicas, decomissionedReplicas,
                                expectedReplicas);
     if(priLevel != LEVEL && priorityQueues.get(priLevel).add(block)) {
-      NameNode.stateChangeLog.debug(
-                                    "BLOCK* NameSystem.UnderReplicationBlock.add:"
-                                    + block
-                                    + " has only "+curReplicas
-                                    + " replicas and need " + expectedReplicas
-                                    + " replicas so is added to neededReplications"
-                                    + " at priority level " + priLevel);
+      if(NameNode.stateChangeLog.isDebugEnabled()) {
+        NameNode.stateChangeLog.debug(
+          "BLOCK* NameSystem.UnderReplicationBlock.add:"
+          + block
+          + " has only "+curReplicas
+          + " replicas and need " + expectedReplicas
+          + " replicas so is added to neededReplications"
+          + " at priority level " + priLevel);
+      }
       return true;
     }
     return false;
@@ -135,18 +137,22 @@ class UnderReplicatedBlocks implements I
   boolean remove(Block block, int priLevel) {
     if(priLevel >= 0 && priLevel < LEVEL 
         && priorityQueues.get(priLevel).remove(block)) {
-      NameNode.stateChangeLog.debug(
-                                    "BLOCK* NameSystem.UnderReplicationBlock.remove: "
-                                    + "Removing block " + block
-                                    + " from priority queue "+ priLevel);
+      if(NameNode.stateChangeLog.isDebugEnabled()) {
+        NameNode.stateChangeLog.debug(
+          "BLOCK* NameSystem.UnderReplicationBlock.remove: "
+          + "Removing block " + block
+          + " from priority queue "+ priLevel);
+      }
       return true;
     } else {
       for(int i=0; i<LEVEL; i++) {
         if(i!=priLevel && priorityQueues.get(i).remove(block)) {
-          NameNode.stateChangeLog.debug(
-                                        "BLOCK* NameSystem.UnderReplicationBlock.remove: "
-                                        + "Removing block " + block
-                                        + " from priority queue "+ i);
+          if(NameNode.stateChangeLog.isDebugEnabled()) {
+            NameNode.stateChangeLog.debug(
+              "BLOCK* NameSystem.UnderReplicationBlock.remove: "
+              + "Removing block " + block
+              + " from priority queue "+ i);
+          }
           return true;
         }
       }
@@ -163,25 +169,29 @@ class UnderReplicatedBlocks implements I
     int oldExpectedReplicas = curExpectedReplicas-expectedReplicasDelta;
     int curPri = getPriority(block, curReplicas, decommissionedReplicas, curExpectedReplicas);
     int oldPri = getPriority(block, oldReplicas, decommissionedReplicas, oldExpectedReplicas);
-    NameNode.stateChangeLog.debug("UnderReplicationBlocks.update " + 
-                                  block +
-                                  " curReplicas " + curReplicas +
-                                  " curExpectedReplicas " + curExpectedReplicas +
-                                  " oldReplicas " + oldReplicas +
-                                  " oldExpectedReplicas  " + oldExpectedReplicas +
-                                  " curPri  " + curPri +
-                                  " oldPri  " + oldPri);
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("UnderReplicationBlocks.update " + 
+        block +
+        " curReplicas " + curReplicas +
+        " curExpectedReplicas " + curExpectedReplicas +
+        " oldReplicas " + oldReplicas +
+        " oldExpectedReplicas  " + oldExpectedReplicas +
+        " curPri  " + curPri +
+        " oldPri  " + oldPri);
+    }
     if(oldPri != LEVEL && oldPri != curPri) {
       remove(block, oldPri);
     }
     if(curPri != LEVEL && priorityQueues.get(curPri).add(block)) {
-      NameNode.stateChangeLog.debug(
-                                    "BLOCK* NameSystem.UnderReplicationBlock.update:"
-                                    + block
-                                    + " has only "+curReplicas
-                                    + " replicas and need " + curExpectedReplicas
-                                    + " replicas so is added to neededReplications"
-                                    + " at priority level " + curPri);
+      if(NameNode.stateChangeLog.isDebugEnabled()) {
+        NameNode.stateChangeLog.debug(
+          "BLOCK* NameSystem.UnderReplicationBlock.update:"
+          + block
+          + " has only "+curReplicas
+          + " replicas and need " + curExpectedReplicas
+          + " replicas so is added to neededReplications"
+          + " at priority level " + curPri);
+      }
     }
   }
 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java Sat Aug 28 23:06:00 2010
@@ -72,10 +72,12 @@ class UpgradeManagerNamenode extends Upg
 
   synchronized UpgradeCommand processUpgradeCommand(UpgradeCommand command
                                                     ) throws IOException {
-    NameNode.LOG.debug("\n   Distributed upgrade for NameNode version " 
-        + getUpgradeVersion() + " to current LV " 
-        + FSConstants.LAYOUT_VERSION + " is processing upgrade command: "
-        + command.getAction() + " status = " + getUpgradeStatus() + "%");
+    if(NameNode.LOG.isDebugEnabled()) {
+      NameNode.LOG.debug("\n   Distributed upgrade for NameNode version " 
+          + getUpgradeVersion() + " to current LV " 
+          + FSConstants.LAYOUT_VERSION + " is processing upgrade command: "
+          + command.getAction() + " status = " + getUpgradeStatus() + "%");
+    }
     if(currentUpgrades == null) {
       NameNode.LOG.info("Ignoring upgrade command: " 
           + command.getAction() + " version " + command.getVersion()

Modified: hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/fi/ProbabilityModel.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/fi/ProbabilityModel.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/fi/ProbabilityModel.java (original)
+++ hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/fi/ProbabilityModel.java Sat Aug 28 23:06:00 2010
@@ -95,7 +95,9 @@ public class ProbabilityModel {
 
     float ret = conf.getFloat(newProbName,
         conf.getFloat(ALL_PROBABILITIES, DEFAULT_PROB));
-    LOG.debug("Request for " + newProbName + " returns=" + ret);
+    if(LOG.isDebugEnabled()) {
+      LOG.debug("Request for " + newProbName + " returns=" + ret);
+    }
     // Make sure that probability level is valid.
     if (ret < DEFAULT_PROB || ret > MAX_PROB) 
       ret = conf.getFloat(ALL_PROBABILITIES, DEFAULT_PROB);

Modified: hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java?rev=990466&r1=990465&r2=990466&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java (original)
+++ hadoop/hdfs/trunk/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java Sat Aug 28 23:06:00 2010
@@ -67,8 +67,10 @@ public class PipelinesTestUtil extends D
           }
           assertTrue("FI: Wrong receiving length",
               counterPartsBytes <= n.bytes);
-          FiTestUtil.LOG.debug("FI: before compare of Recv bytes. Expected " +
-              n.bytes + ", got " + counterPartsBytes);
+          if(FiTestUtil.LOG.isDebugEnabled()) {
+            FiTestUtil.LOG.debug("FI: before compare of Recv bytes. Expected "
+                + n.bytes + ", got " + counterPartsBytes);
+          }
         }
       }
     }
@@ -104,8 +106,11 @@ public class PipelinesTestUtil extends D
           }
           assertTrue("FI: Wrong acknowledged length",
               counterPartsBytes == n.bytes);
-          FiTestUtil.LOG.debug("FI: before compare of Acked bytes. Expected " +
-              n.bytes + ", got " + counterPartsBytes);
+          if(FiTestUtil.LOG.isDebugEnabled()) {
+            FiTestUtil.LOG.debug(
+                "FI: before compare of Acked bytes. Expected " +
+                n.bytes + ", got " + counterPartsBytes);
+          }
         }
       }
     }



Mime
View raw message