hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1407706 [1/2] - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/ src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/ src/...
Date Sat, 10 Nov 2012 00:49:28 GMT
Author: szetszwo
Date: Sat Nov 10 00:49:15 2012
New Revision: 1407706

URL: http://svn.apache.org/viewvc?rev=1407706&view=rev
Log:
Merge r1406415 through r1407703 from trunk.

Added:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/
      - copied from r1407703, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/
      - copied from r1407703, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm
      - copied unchanged from r1407703, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm
      - copied unchanged from r1407703, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm
      - copied unchanged from r1407703, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
      - copied unchanged from r1407703, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
Modified:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1406415-1407703

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sat Nov 10 00:49:15 2012
@@ -157,6 +157,8 @@ Trunk (Unreleased)
     HDFS-4152. Add a new class BlocksMapUpdateInfo for the parameter in
     INode.collectSubtreeBlocksAndClear(..). (Jing Zhao via szetszwo)
 
+    HDFS-4153. Add START_MSG/SHUTDOWN_MSG for JournalNode. (liang xie via atm)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -223,9 +225,6 @@ Trunk (Unreleased)
     HDFS-3614. Revert unused MiniDFSCluster constructor from HDFS-3049.
     (acmurthy via eli)
 
-    HDFS-3625. Fix TestBackupNode by properly initializing edit log during
-    startup. (Junping Du via todd)
-
     HDFS-3792. Fix two findbugs introduced by HDFS-3695 (todd)
 
     HDFS-3827. TestHASafeMode#assertSafemode method should be made static.
@@ -249,6 +248,9 @@ Trunk (Unreleased)
     HDFS-4106. BPServiceActor#lastHeartbeat, lastBlockReport and
     lastDeletedReport should be volatile. (Jing Zhao via suresh)
 
+    HDFS-4165. Faulty sanity check in FsDirectory.unprotectedSetQuota.
+    (Binglin Chang via suresh)
+
   BREAKDOWN OF HDFS-3077 SUBTASKS
 
     HDFS-3077. Quorum-based protocol for reading and writing edit logs.
@@ -459,6 +461,9 @@ Release 2.0.3-alpha - Unreleased 
     HDFS-4046. Rename ChecksumTypeProto enum NULL since it is illegal in
     C/C++. (Binglin Chang via suresh)
 
+    HDFS-4048. Use ERROR instead of INFO for volume failure logs.
+    (Stephen Chu via eli)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -561,6 +566,17 @@ Release 2.0.3-alpha - Unreleased 
     HDFS-3979. For hsync, datanode should wait for the local sync to complete
     before sending ack. (Lars Hofhansl via szetszwo)
 
+    HDFS-3625. Fix TestBackupNode by properly initializing edit log during
+    startup. (Junping Du via todd)
+
+    HDFS-4138. BackupNode startup fails due to uninitialized edit log.
+    (Kihwal Lee via shv)
+
+    HDFS-3810. Implement format() for BKJM (Ivan Kelly via umamahesh)
+
+    HDFS-4162. Some malformed and unquoted HTML strings are returned from 
+    datanode web ui. (Darek Dagit via suresh)
+
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES
@@ -1942,6 +1958,9 @@ Release 0.23.5 - UNRELEASED
 
   INCOMPATIBLE CHANGES
 
+    HDFS-4080. Add a separate logger for block state change logs to enable turning
+    off those logs. (Kihwal Lee via suresh)
+
   NEW FEATURES
 
   IMPROVEMENTS
@@ -1950,6 +1969,8 @@ Release 0.23.5 - UNRELEASED
 
     HDFS-4075. Reduce recommissioning overhead (Kihwal Lee via daryn)
 
+    HDFS-3990.  NN's health report has severe performance problems (daryn)
+
   BUG FIXES
 
     HDFS-3829. TestHftpURLTimeouts fails intermittently with JDK7  (Trevor

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java Sat Nov 10 00:49:15 2012
@@ -39,6 +39,7 @@ import org.apache.zookeeper.KeeperExcept
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.AsyncCallback.StringCallback;
+import org.apache.zookeeper.ZKUtil;
 
 import java.util.Collection;
 import java.util.Collections;
@@ -46,6 +47,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.io.IOException;
 
 import java.net.URI;
@@ -142,13 +144,16 @@ public class BookKeeperJournalManager im
   private final Configuration conf;
   private final BookKeeper bkc;
   private final CurrentInprogress ci;
+  private final String basePath;
   private final String ledgerPath;
+  private final String versionPath;
   private final MaxTxId maxTxId;
   private final int ensembleSize;
   private final int quorumSize;
   private final String digestpw;
   private final CountDownLatch zkConnectLatch;
   private final NamespaceInfo nsInfo;
+  private boolean initialized = false;
   private LedgerHandle currentLedger = null;
 
   /**
@@ -160,16 +165,16 @@ public class BookKeeperJournalManager im
     this.nsInfo = nsInfo;
 
     String zkConnect = uri.getAuthority().replace(";", ",");
-    String zkPath = uri.getPath();
+    basePath = uri.getPath();
     ensembleSize = conf.getInt(BKJM_BOOKKEEPER_ENSEMBLE_SIZE,
                                BKJM_BOOKKEEPER_ENSEMBLE_SIZE_DEFAULT);
     quorumSize = conf.getInt(BKJM_BOOKKEEPER_QUORUM_SIZE,
                              BKJM_BOOKKEEPER_QUORUM_SIZE_DEFAULT);
 
-    ledgerPath = zkPath + "/ledgers";
-    String maxTxIdPath = zkPath + "/maxtxid";
-    String currentInprogressNodePath = zkPath + "/CurrentInprogress";
-    String versionPath = zkPath + "/version";
+    ledgerPath = basePath + "/ledgers";
+    String maxTxIdPath = basePath + "/maxtxid";
+    String currentInprogressNodePath = basePath + "/CurrentInprogress";
+    versionPath = basePath + "/version";
     digestpw = conf.get(BKJM_BOOKKEEPER_DIGEST_PW,
                         BKJM_BOOKKEEPER_DIGEST_PW_DEFAULT);
 
@@ -180,47 +185,7 @@ public class BookKeeperJournalManager im
       if (!zkConnectLatch.await(6000, TimeUnit.MILLISECONDS)) {
         throw new IOException("Error connecting to zookeeper");
       }
-      if (zkc.exists(zkPath, false) == null) {
-        zkc.create(zkPath, new byte[] {'0'},
-            Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
-      }
 
-      Stat versionStat = zkc.exists(versionPath, false);
-      if (versionStat != null) {
-        byte[] d = zkc.getData(versionPath, false, versionStat);
-        VersionProto.Builder builder = VersionProto.newBuilder();
-        TextFormat.merge(new String(d, UTF_8), builder);
-        if (!builder.isInitialized()) {
-          throw new IOException("Invalid/Incomplete data in znode");
-        }
-        VersionProto vp = builder.build();
-
-        // There's only one version at the moment
-        assert vp.getLayoutVersion() == BKJM_LAYOUT_VERSION;
-
-        NamespaceInfo readns = PBHelper.convert(vp.getNamespaceInfo());
-
-        if (nsInfo.getNamespaceID() != readns.getNamespaceID() ||
-            !nsInfo.clusterID.equals(readns.getClusterID()) ||
-            !nsInfo.getBlockPoolID().equals(readns.getBlockPoolID())) {
-          String err = String.format("Environment mismatch. Running process %s"
-                                     +", stored in ZK %s", nsInfo, readns);
-          LOG.error(err);
-          throw new IOException(err);
-        }
-      } else if (nsInfo.getNamespaceID() > 0) {
-        VersionProto.Builder builder = VersionProto.newBuilder();
-        builder.setNamespaceInfo(PBHelper.convert(nsInfo))
-          .setLayoutVersion(BKJM_LAYOUT_VERSION);
-        byte[] data = TextFormat.printToString(builder.build()).getBytes(UTF_8);
-        zkc.create(versionPath, data,
-                   Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
-      }
-
-      if (zkc.exists(ledgerPath, false) == null) {
-        zkc.create(ledgerPath, new byte[] {'0'},
-            Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
-      }
       prepareBookKeeperEnv();
       bkc = new BookKeeper(new ClientConfiguration(), zkc);
     } catch (KeeperException e) {
@@ -244,6 +209,7 @@ public class BookKeeperJournalManager im
         BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT);
     final CountDownLatch zkPathLatch = new CountDownLatch(1);
 
+    final AtomicBoolean success = new AtomicBoolean(false);
     StringCallback callback = new StringCallback() {
       @Override
       public void processResult(int rc, String path, Object ctx, String name) {
@@ -251,22 +217,23 @@ public class BookKeeperJournalManager im
             || KeeperException.Code.NODEEXISTS.intValue() == rc) {
           LOG.info("Successfully created bookie available path : "
               + zkAvailablePath);
-          zkPathLatch.countDown();
+          success.set(true);
         } else {
           KeeperException.Code code = KeeperException.Code.get(rc);
-          LOG
-              .error("Error : "
+          LOG.error("Error : "
                   + KeeperException.create(code, path).getMessage()
                   + ", failed to create bookie available path : "
                   + zkAvailablePath);
         }
+        zkPathLatch.countDown();
       }
     };
     ZkUtils.createFullPathOptimistic(zkc, zkAvailablePath, new byte[0],
         Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, callback, null);
 
     try {
-      if (!zkPathLatch.await(zkc.getSessionTimeout(), TimeUnit.MILLISECONDS)) {
+      if (!zkPathLatch.await(zkc.getSessionTimeout(), TimeUnit.MILLISECONDS)
+          || !success.get()) {
         throw new IOException("Couldn't create bookie available path :"
             + zkAvailablePath + ", timed out " + zkc.getSessionTimeout()
             + " millis");
@@ -281,19 +248,101 @@ public class BookKeeperJournalManager im
 
   @Override
   public void format(NamespaceInfo ns) throws IOException {
-    // Currently, BKJM automatically formats itself when first accessed.
-    // TODO: change over to explicit formatting so that the admin can
-    // clear out the BK storage when reformatting a cluster.
-    LOG.info("Not formatting " + this + " - BKJM does not currently " +
-        "support reformatting. If it has not been used before, it will" +
-        "be formatted automatically upon first use.");
+    try {
+      // delete old info
+      Stat baseStat = null;
+      Stat ledgerStat = null;
+      if ((baseStat = zkc.exists(basePath, false)) != null) {
+        if ((ledgerStat = zkc.exists(ledgerPath, false)) != null) {
+          for (EditLogLedgerMetadata l : getLedgerList(true)) {
+            try {
+              bkc.deleteLedger(l.getLedgerId());
+            } catch (BKException.BKNoSuchLedgerExistsException bke) {
+              LOG.warn("Ledger " + l.getLedgerId() + " does not exist;"
+                       + " Cannot delete.");
+            }
+          }
+        }
+        ZKUtil.deleteRecursive(zkc, basePath);
+      }
+
+      // should be clean now.
+      zkc.create(basePath, new byte[] {'0'},
+          Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
+
+      VersionProto.Builder builder = VersionProto.newBuilder();
+      builder.setNamespaceInfo(PBHelper.convert(ns))
+        .setLayoutVersion(BKJM_LAYOUT_VERSION);
+
+      byte[] data = TextFormat.printToString(builder.build()).getBytes(UTF_8);
+      zkc.create(versionPath, data,
+                 Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
+
+      zkc.create(ledgerPath, new byte[] {'0'},
+                 Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
+    } catch (KeeperException ke) {
+      LOG.error("Error accessing zookeeper to format", ke);
+      throw new IOException("Error accessing zookeeper to format", ke);
+    } catch (InterruptedException ie) {
+      Thread.currentThread().interrupt();
+      throw new IOException("Interrupted during format", ie);
+    } catch (BKException bke) {
+      throw new IOException("Error cleaning up ledgers during format", bke);
+    }
   }
   
   @Override
   public boolean hasSomeData() throws IOException {
-    // Don't confirm format on BKJM, since format() is currently a
-    // no-op anyway
-    return false;
+    try {
+      return zkc.exists(basePath, false) != null;
+    } catch (KeeperException ke) {
+      throw new IOException("Couldn't contact zookeeper", ke);
+    } catch (InterruptedException ie) {
+      Thread.currentThread().interrupt();
+      throw new IOException("Interrupted while checking for data", ie);
+    }
+  }
+
+  synchronized private void checkEnv() throws IOException {
+    if (!initialized) {
+      try {
+        Stat versionStat = zkc.exists(versionPath, false);
+        if (versionStat == null) {
+          throw new IOException("Environment not initialized. "
+                                +"Have you forgotten to format?");
+        }
+        byte[] d = zkc.getData(versionPath, false, versionStat);
+
+        VersionProto.Builder builder = VersionProto.newBuilder();
+        TextFormat.merge(new String(d, UTF_8), builder);
+        if (!builder.isInitialized()) {
+          throw new IOException("Invalid/Incomplete data in znode");
+        }
+        VersionProto vp = builder.build();
+
+        // There's only one version at the moment
+        assert vp.getLayoutVersion() == BKJM_LAYOUT_VERSION;
+
+        NamespaceInfo readns = PBHelper.convert(vp.getNamespaceInfo());
+
+        if (nsInfo.getNamespaceID() != readns.getNamespaceID() ||
+            !nsInfo.clusterID.equals(readns.getClusterID()) ||
+            !nsInfo.getBlockPoolID().equals(readns.getBlockPoolID())) {
+          String err = String.format("Environment mismatch. Running process %s"
+                                     +", stored in ZK %s", nsInfo, readns);
+          LOG.error(err);
+          throw new IOException(err);
+        }
+
+        ci.init();
+        initialized = true;
+      } catch (KeeperException ke) {
+        throw new IOException("Cannot access ZooKeeper", ke);
+      } catch (InterruptedException ie) {
+        Thread.currentThread().interrupt();
+        throw new IOException("Interrupted while checking environment", ie);
+      }
+    }
   }
 
   /**
@@ -307,6 +356,8 @@ public class BookKeeperJournalManager im
    */
   @Override
   public EditLogOutputStream startLogSegment(long txId) throws IOException {
+    checkEnv();
+
     if (txId <= maxTxId.get()) {
       throw new IOException("We've already seen " + txId
           + ". A new stream cannot be created with it");
@@ -384,6 +435,8 @@ public class BookKeeperJournalManager im
   @Override
   public void finalizeLogSegment(long firstTxId, long lastTxId)
       throws IOException {
+    checkEnv();
+
     String inprogressPath = inprogressZNode(firstTxId);
     try {
       Stat inprogressStat = zkc.exists(inprogressPath, false);
@@ -537,6 +590,8 @@ public class BookKeeperJournalManager im
 
   @Override
   public void recoverUnfinalizedSegments() throws IOException {
+    checkEnv();
+
     synchronized (this) {
       try {
         List<String> children = zkc.getChildren(ledgerPath, false);
@@ -589,6 +644,8 @@ public class BookKeeperJournalManager im
   @Override
   public void purgeLogsOlderThan(long minTxIdToKeep)
       throws IOException {
+    checkEnv();
+
     for (EditLogLedgerMetadata l : getLedgerList(false)) {
       if (l.getLastTxId() < minTxIdToKeep) {
         try {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java Sat Nov 10 00:49:15 2012
@@ -56,6 +56,9 @@ class CurrentInprogress {
   CurrentInprogress(ZooKeeper zkc, String lockpath) throws IOException {
     this.currentInprogressNode = lockpath;
     this.zkc = zkc;
+  }
+
+  void init() throws IOException {
     try {
       Stat isCurrentInprogressNodeExists = zkc.exists(currentInprogressNode,
                                                       false);
@@ -96,15 +99,14 @@ class CurrentInprogress {
           this.versionNumberForPermission);
     } catch (KeeperException e) {
       throw new IOException("Exception when setting the data "
-          + "[layout version number,hostname,inprogressNode path]= [" + content
-          + "] to CurrentInprogress. ", e);
+          + "[" + content + "] to CurrentInprogress. ", e);
     } catch (InterruptedException e) {
       throw new IOException("Interrupted while setting the data "
-          + "[layout version number,hostname,inprogressNode path]= [" + content
-          + "] to CurrentInprogress", e);
+          + "[" + content + "] to CurrentInprogress", e);
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Updated data[" + content + "] to CurrentInprogress");
     }
-    LOG.info("Updated data[layout version number,hostname,inprogressNode path]"
-        + "= [" + content + "] to CurrentInprogress");
   }
 
   /**
@@ -136,7 +138,7 @@ class CurrentInprogress {
       }
       return builder.build().getPath();
     } else {
-      LOG.info("No data available in CurrentInprogress");
+      LOG.debug("No data available in CurrentInprogress");
     }
     return null;
   }
@@ -152,7 +154,7 @@ class CurrentInprogress {
       throw new IOException(
           "Interrupted when setting the data to CurrentInprogress node", e);
     }
-    LOG.info("Cleared the data from CurrentInprogress");
+    LOG.debug("Cleared the data from CurrentInprogress");
   }
 
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java Sat Nov 10 00:49:15 2012
@@ -149,6 +149,7 @@ public class TestBookKeeperConfiguration
     bkjm = new BookKeeperJournalManager(conf,
         URI.create("bookkeeper://" + HOSTPORT + "/hdfsjournal-WithBKPath"),
         nsi);
+    bkjm.format(nsi);
     Assert.assertNotNull("Bookie available path : " + bkAvailablePath
         + " doesn't exists", zkc.exists(bkAvailablePath, false));
   }
@@ -166,6 +167,7 @@ public class TestBookKeeperConfiguration
     bkjm = new BookKeeperJournalManager(conf,
         URI.create("bookkeeper://" + HOSTPORT + "/hdfsjournal-DefaultBKPath"),
         nsi);
+    bkjm.format(nsi);
     Assert.assertNotNull("Bookie available path : " + BK_ROOT_PATH
         + " doesn't exists", zkc.exists(BK_ROOT_PATH, false));
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java Sat Nov 10 00:49:15 2012
@@ -29,8 +29,16 @@ import org.mockito.Mockito;
 import java.io.IOException;
 import java.net.URI;
 import java.util.List;
+import java.util.ArrayList;
 import java.util.Random;
 
+import java.util.concurrent.Executors;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
 import org.apache.hadoop.conf.Configuration;
 
 import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
@@ -90,6 +98,7 @@ public class TestBookKeeperJournalManage
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-simplewrite"), nsi);
+    bkjm.format(nsi);
 
     EditLogOutputStream out = bkjm.startLogSegment(1);
     for (long i = 1 ; i <= 100; i++) {
@@ -112,6 +121,8 @@ public class TestBookKeeperJournalManage
 
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-txncount"), nsi);
+    bkjm.format(nsi);
+
     EditLogOutputStream out = bkjm.startLogSegment(1);
     for (long i = 1 ; i <= 100; i++) {
       FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
@@ -130,6 +141,7 @@ public class TestBookKeeperJournalManage
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-gaps"), nsi);
+    bkjm.format(nsi);
 
     long txid = 1;
     for (long i = 0; i < 3; i++) {
@@ -167,6 +179,7 @@ public class TestBookKeeperJournalManage
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-inprogressAtEnd"), nsi);
+    bkjm.format(nsi);
 
     long txid = 1;
     for (long i = 0; i < 3; i++) {
@@ -208,6 +221,7 @@ public class TestBookKeeperJournalManage
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-restartFrom1"), nsi);
+    bkjm.format(nsi);
 
     long txid = 1;
     long start = txid;
@@ -266,6 +280,7 @@ public class TestBookKeeperJournalManage
 
     BookKeeperJournalManager bkjm1 = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi);
+    bkjm1.format(nsi);
 
     BookKeeperJournalManager bkjm2 = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi);
@@ -288,6 +303,7 @@ public class TestBookKeeperJournalManage
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-simpleread"),
         nsi);
+    bkjm.format(nsi);
 
     final long numTransactions = 10000;
     EditLogOutputStream out = bkjm.startLogSegment(1);
@@ -315,6 +331,7 @@ public class TestBookKeeperJournalManage
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
         BKJMUtil.createJournalURI("/hdfsjournal-simplerecovery"),
         nsi);
+    bkjm.format(nsi);
 
     EditLogOutputStream out = bkjm.startLogSegment(1);
     for (long i = 1 ; i <= 100; i++) {
@@ -365,6 +382,7 @@ public class TestBookKeeperJournalManage
       BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
           BKJMUtil.createJournalURI("/hdfsjournal-allbookiefailure"),
           nsi);
+      bkjm.format(nsi);
       EditLogOutputStream out = bkjm.startLogSegment(txid);
 
       for (long i = 1 ; i <= 3; i++) {
@@ -450,6 +468,7 @@ public class TestBookKeeperJournalManage
       BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
           BKJMUtil.createJournalURI("/hdfsjournal-onebookiefailure"),
           nsi);
+      bkjm.format(nsi);
 
       EditLogOutputStream out = bkjm.startLogSegment(txid);
       for (long i = 1 ; i <= 3; i++) {
@@ -500,6 +519,7 @@ public class TestBookKeeperJournalManage
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
                                                                  nsi);
+    bkjm.format(nsi);
 
     EditLogOutputStream out = bkjm.startLogSegment(1);
     for (long i = 1; i <= 100; i++) {
@@ -541,6 +561,7 @@ public class TestBookKeeperJournalManage
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
                                                                  nsi);
+    bkjm.format(nsi);
 
     EditLogOutputStream out = bkjm.startLogSegment(1);
     for (long i = 1; i <= 100; i++) {
@@ -583,6 +604,7 @@ public class TestBookKeeperJournalManage
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
                                                                  nsi);
+    bkjm.format(nsi);
 
     EditLogOutputStream out = bkjm.startLogSegment(1);
     for (long i = 1; i <= 100; i++) {
@@ -622,6 +644,7 @@ public class TestBookKeeperJournalManage
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
                                                                  nsi);
+    bkjm.format(nsi);
 
     EditLogOutputStream out = bkjm.startLogSegment(1);
     for (long i = 1; i <= 100; i++) {
@@ -669,6 +692,7 @@ public class TestBookKeeperJournalManage
     NamespaceInfo nsi = newNSInfo();
     BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
                                                                  nsi);
+    bkjm.format(nsi);
 
     try {
       // start new inprogress log segment with txid=1
@@ -697,6 +721,81 @@ public class TestBookKeeperJournalManage
     }
   }
 
+  private enum ThreadStatus {
+    COMPLETED, GOODEXCEPTION, BADEXCEPTION;
+  };
+
+  /**
+   * Tests that concurrent calls to format will still allow one to succeed.
+   */
+  @Test
+  public void testConcurrentFormat() throws Exception {
+    final URI uri = BKJMUtil.createJournalURI("/hdfsjournal-concurrentformat");
+    final NamespaceInfo nsi = newNSInfo();
+
+    // populate with data first
+    BookKeeperJournalManager bkjm
+      = new BookKeeperJournalManager(conf, uri, nsi);
+    bkjm.format(nsi);
+    for (int i = 1; i < 100*2; i += 2) {
+      bkjm.startLogSegment(i);
+      bkjm.finalizeLogSegment(i, i+1);
+    }
+    bkjm.close();
+
+    final int numThreads = 40;
+    List<Callable<ThreadStatus>> threads
+      = new ArrayList<Callable<ThreadStatus>>();
+    final CyclicBarrier barrier = new CyclicBarrier(numThreads);
+
+    for (int i = 0; i < numThreads; i++) {
+      threads.add(new Callable<ThreadStatus>() {
+          public ThreadStatus call() {
+            BookKeeperJournalManager bkjm = null;
+            try {
+              bkjm = new BookKeeperJournalManager(conf, uri, nsi);
+              barrier.await();
+              bkjm.format(nsi);
+              return ThreadStatus.COMPLETED;
+            } catch (IOException ioe) {
+              LOG.info("Exception formatting ", ioe);
+              return ThreadStatus.GOODEXCEPTION;
+            } catch (InterruptedException ie) {
+              LOG.error("Interrupted. Something is broken", ie);
+              Thread.currentThread().interrupt();
+              return ThreadStatus.BADEXCEPTION;
+            } catch (Exception e) {
+              LOG.error("Some other bad exception", e);
+              return ThreadStatus.BADEXCEPTION;
+            } finally {
+              if (bkjm != null) {
+                try {
+                  bkjm.close();
+                } catch (IOException ioe) {
+                  LOG.error("Error closing journal manager", ioe);
+                }
+              }
+            }
+          }
+        });
+    }
+    ExecutorService service = Executors.newFixedThreadPool(numThreads);
+    List<Future<ThreadStatus>> statuses = service.invokeAll(threads, 60,
+                                                      TimeUnit.SECONDS);
+    int numCompleted = 0;
+    for (Future<ThreadStatus> s : statuses) {
+      assertTrue(s.isDone());
+      assertTrue("Thread threw invalid exception",
+          s.get() == ThreadStatus.COMPLETED
+          || s.get() == ThreadStatus.GOODEXCEPTION);
+      if (s.get() == ThreadStatus.COMPLETED) {
+        numCompleted++;
+      }
+    }
+    LOG.info("Completed " + numCompleted + " formats");
+    assertTrue("No thread managed to complete formatting", numCompleted > 0);
+  }
+
   private String startAndFinalizeLogSegment(BookKeeperJournalManager bkjm,
       int startTxid, int endTxid) throws IOException, KeeperException,
       InterruptedException {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java Sat Nov 10 00:49:15 2012
@@ -118,6 +118,7 @@ public class TestCurrentInprogress {
   public void testReadShouldReturnTheZnodePathAfterUpdate() throws Exception {
     String data = "inprogressNode";
     CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
+    ci.init();
     ci.update(data);
     String inprogressNodePath = ci.read();
     assertEquals("Not returning inprogressZnode", "inprogressNode",
@@ -131,6 +132,7 @@ public class TestCurrentInprogress {
   @Test
   public void testReadShouldReturnNullAfterClear() throws Exception {
     CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
+    ci.init();
     ci.update("myInprogressZnode");
     ci.read();
     ci.clear();
@@ -146,6 +148,7 @@ public class TestCurrentInprogress {
   public void testUpdateShouldFailWithIOEIfVersionNumberChangedAfterRead()
       throws Exception {
     CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
+    ci.init();
     ci.update("myInprogressZnode");
     assertEquals("Not returning myInprogressZnode", "myInprogressZnode", ci
         .read());
@@ -154,4 +157,4 @@ public class TestCurrentInprogress {
     ci.update("myInprogressZnode");
   }
 
-}
\ No newline at end of file
+}

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1406415-1407703

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java Sat Nov 10 00:49:15 2012
@@ -38,7 +38,8 @@ public class DatanodeID implements Compa
   public static final DatanodeID[] EMPTY_ARRAY = {};
 
   private String ipAddr;     // IP address
-  private String hostName;   // hostname
+  private String hostName;   // hostname claimed by datanode
+  private String peerHostName; // hostname from the actual connection
   private String storageID;  // unique per cluster storageID
   private int xferPort;      // data streaming port
   private int infoPort;      // info server port
@@ -51,6 +52,7 @@ public class DatanodeID implements Compa
         from.getXferPort(),
         from.getInfoPort(),
         from.getIpcPort());
+    this.peerHostName = from.getPeerHostName();
   }
   
   /**
@@ -76,6 +78,10 @@ public class DatanodeID implements Compa
     this.ipAddr = ipAddr;
   }
 
+  public void setPeerHostName(String peerHostName) {
+    this.peerHostName = peerHostName;
+  }
+  
   public void setStorageID(String storageID) {
     this.storageID = storageID;
   }
@@ -95,6 +101,13 @@ public class DatanodeID implements Compa
   }
 
   /**
+   * @return hostname from the actual connection 
+   */
+  public String getPeerHostName() {
+    return peerHostName;
+  }
+  
+  /**
    * @return IP:xferPort string
    */
   public String getXferAddr() {
@@ -202,6 +215,7 @@ public class DatanodeID implements Compa
   public void updateRegInfo(DatanodeID nodeReg) {
     ipAddr = nodeReg.getIpAddr();
     hostName = nodeReg.getHostName();
+    peerHostName = nodeReg.getPeerHostName();
     xferPort = nodeReg.getXferPort();
     infoPort = nodeReg.getInfoPort();
     ipcPort = nodeReg.getIpcPort();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java Sat Nov 10 00:49:15 2012
@@ -35,6 +35,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -230,6 +231,7 @@ public class JournalNode implements Tool
   }
 
   public static void main(String[] args) throws Exception {
+    StringUtils.startupShutdownMessage(JournalNode.class, args, LOG);
     System.exit(ToolRunner.run(new JournalNode(), args));
   }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java Sat Nov 10 00:49:15 2012
@@ -233,7 +233,7 @@ public class BlockInfoUnderConstruction 
     setBlockUCState(BlockUCState.UNDER_RECOVERY);
     blockRecoveryId = recoveryId;
     if (replicas.size() == 0) {
-      NameNode.stateChangeLog.warn("BLOCK*"
+      NameNode.blockStateChangeLog.warn("BLOCK*"
         + " BlockInfoUnderConstruction.initLeaseRecovery:"
         + " No blocks found, lease removed.");
     }
@@ -245,7 +245,7 @@ public class BlockInfoUnderConstruction 
         primaryNodeIndex = j;
         DatanodeDescriptor primary = replicas.get(j).getExpectedLocation(); 
         primary.addBlockToBeRecovered(this);
-        NameNode.stateChangeLog.info("BLOCK* " + this
+        NameNode.blockStateChangeLog.info("BLOCK* " + this
           + " recovery started, primary=" + primary);
         return;
       }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Sat Nov 10 00:49:15 2012
@@ -83,6 +83,7 @@ import com.google.common.collect.Sets;
 public class BlockManager {
 
   static final Log LOG = LogFactory.getLog(BlockManager.class);
+  static final Log blockLog = NameNode.blockStateChangeLog;
 
   /** Default load factor of map */
   public static final float DEFAULT_MAP_LOAD_FACTOR = 0.75f;
@@ -872,7 +873,7 @@ public class BlockManager {
       final long size) throws UnregisteredNodeException {
     final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
     if (node == null) {
-      NameNode.stateChangeLog.warn("BLOCK* getBlocks: "
+      blockLog.warn("BLOCK* getBlocks: "
           + "Asking for blocks from an unrecorded node " + datanode);
       throw new HadoopIllegalArgumentException(
           "Datanode " + datanode + " not found.");
@@ -950,7 +951,7 @@ public class BlockManager {
       datanodes.append(node).append(" ");
     }
     if (datanodes.length() != 0) {
-      NameNode.stateChangeLog.info("BLOCK* addToInvalidates: " + b + " "
+      blockLog.info("BLOCK* addToInvalidates: " + b + " "
           + datanodes);
     }
   }
@@ -971,7 +972,7 @@ public class BlockManager {
       // ignore the request for now. This could happen when BlockScanner
       // thread of Datanode reports bad block before Block reports are sent
       // by the Datanode on startup
-      NameNode.stateChangeLog.info("BLOCK* findAndMarkBlockAsCorrupt: "
+      blockLog.info("BLOCK* findAndMarkBlockAsCorrupt: "
           + blk + " not found");
       return;
     }
@@ -988,7 +989,7 @@ public class BlockManager {
 
     BlockCollection bc = b.corrupted.getBlockCollection();
     if (bc == null) {
-      NameNode.stateChangeLog.info("BLOCK markBlockAsCorrupt: " + b
+      blockLog.info("BLOCK markBlockAsCorrupt: " + b
           + " cannot be marked as corrupt as it does not belong to any file");
       addToInvalidates(b.corrupted, node);
       return;
@@ -1013,7 +1014,7 @@ public class BlockManager {
    */
   private void invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn
       ) throws IOException {
-    NameNode.stateChangeLog.info("BLOCK* invalidateBlock: " + b + " on " + dn);
+    blockLog.info("BLOCK* invalidateBlock: " + b + " on " + dn);
     DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
     if (node == null) {
       throw new IOException("Cannot invalidate " + b
@@ -1023,7 +1024,7 @@ public class BlockManager {
     // Check how many copies we have of the block
     NumberReplicas nr = countNodes(b.stored);
     if (nr.replicasOnStaleNodes() > 0) {
-      NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: postponing " +
+      blockLog.info("BLOCK* invalidateBlocks: postponing " +
           "invalidation of " + b + " on " + dn + " because " +
           nr.replicasOnStaleNodes() + " replica(s) are located on nodes " +
           "with potentially out-of-date block reports");
@@ -1033,12 +1034,12 @@ public class BlockManager {
       // If we have at least one copy on a live node, then we can delete it.
       addToInvalidates(b.corrupted, dn);
       removeStoredBlock(b.stored, node);
-      if(NameNode.stateChangeLog.isDebugEnabled()) {
-        NameNode.stateChangeLog.debug("BLOCK* invalidateBlocks: "
+      if(blockLog.isDebugEnabled()) {
+        blockLog.debug("BLOCK* invalidateBlocks: "
             + b + " on " + dn + " listed for deletion.");
       }
     } else {
-      NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: " + b
+      blockLog.info("BLOCK* invalidateBlocks: " + b
           + " on " + dn + " is the only copy and was not deleted");
     }
   }
@@ -1160,7 +1161,7 @@ public class BlockManager {
                    (blockHasEnoughRacks(block)) ) {
                 neededReplications.remove(block, priority); // remove from neededReplications
                 neededReplications.decrementReplicationIndex(priority);
-                NameNode.stateChangeLog.info("BLOCK* Removing " + block
+                blockLog.info("BLOCK* Removing " + block
                     + " from neededReplications as it has enough replicas");
                 continue;
               }
@@ -1235,7 +1236,7 @@ public class BlockManager {
               neededReplications.remove(block, priority); // remove from neededReplications
               neededReplications.decrementReplicationIndex(priority);
               rw.targets = null;
-              NameNode.stateChangeLog.info("BLOCK* Removing " + block
+              blockLog.info("BLOCK* Removing " + block
                   + " from neededReplications as it has enough replicas");
               continue;
             }
@@ -1261,8 +1262,8 @@ public class BlockManager {
           // The reason we use 'pending' is so we can retry
           // replications that fail after an appropriate amount of time.
           pendingReplications.increment(block, targets.length);
-          if(NameNode.stateChangeLog.isDebugEnabled()) {
-            NameNode.stateChangeLog.debug(
+          if(blockLog.isDebugEnabled()) {
+            blockLog.debug(
                 "BLOCK* block " + block
                 + " is moved from neededReplications to pendingReplications");
           }
@@ -1278,7 +1279,7 @@ public class BlockManager {
       namesystem.writeUnlock();
     }
 
-    if (NameNode.stateChangeLog.isInfoEnabled()) {
+    if (blockLog.isInfoEnabled()) {
       // log which blocks have been scheduled for replication
       for(ReplicationWork rw : work){
         DatanodeDescriptor[] targets = rw.targets;
@@ -1288,13 +1289,13 @@ public class BlockManager {
             targetList.append(' ');
             targetList.append(targets[k]);
           }
-          NameNode.stateChangeLog.info("BLOCK* ask " + rw.srcNode
+          blockLog.info("BLOCK* ask " + rw.srcNode
               + " to replicate " + rw.block + " to " + targetList);
         }
       }
     }
-    if(NameNode.stateChangeLog.isDebugEnabled()) {
-        NameNode.stateChangeLog.debug(
+    if(blockLog.isDebugEnabled()) {
+        blockLog.debug(
           "BLOCK* neededReplications = " + neededReplications.size()
           + " pendingReplications = " + pendingReplications.size());
     }
@@ -1504,7 +1505,7 @@ public class BlockManager {
       // To minimize startup time, we discard any second (or later) block reports
       // that we receive while still in startup phase.
       if (namesystem.isInStartupSafeMode() && !node.isFirstBlockReport()) {
-        NameNode.stateChangeLog.info("BLOCK* processReport: "
+        blockLog.info("BLOCK* processReport: "
             + "discarded non-initial block report from " + nodeID
             + " because namenode still in startup phase");
         return;
@@ -1536,7 +1537,7 @@ public class BlockManager {
 
     // Log the block report processing stats from Namenode perspective
     NameNode.getNameNodeMetrics().addBlockReport((int) (endTime - startTime));
-    NameNode.stateChangeLog.info("BLOCK* processReport: from "
+    blockLog.info("BLOCK* processReport: from "
         + nodeID + ", blocks: " + newReport.getNumberOfBlocks()
         + ", processing time: " + (endTime - startTime) + " msecs");
   }
@@ -1596,7 +1597,7 @@ public class BlockManager {
       addStoredBlock(b, node, null, true);
     }
     for (Block b : toInvalidate) {
-      NameNode.stateChangeLog.info("BLOCK* processReport: "
+      blockLog.info("BLOCK* processReport: "
           + b + " on " + node + " size " + b.getNumBytes()
           + " does not belong to any file");
       addToInvalidates(b, node);
@@ -2034,7 +2035,7 @@ assert storedBlock.findDatanode(dn) < 0 
     }
     if (storedBlock == null || storedBlock.getBlockCollection() == null) {
       // If this block does not belong to anyfile, then we are done.
-      NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on "
+      blockLog.info("BLOCK* addStoredBlock: " + block + " on "
           + node + " size " + block.getNumBytes()
           + " but it does not belong to any file");
       // we could add this block to invalidate set of this datanode.
@@ -2056,7 +2057,7 @@ assert storedBlock.findDatanode(dn) < 0 
       }
     } else {
       curReplicaDelta = 0;
-      NameNode.stateChangeLog.warn("BLOCK* addStoredBlock: "
+      blockLog.warn("BLOCK* addStoredBlock: "
           + "Redundant addStoredBlock request received for " + storedBlock
           + " on " + node + " size " + storedBlock.getNumBytes());
     }
@@ -2115,7 +2116,7 @@ assert storedBlock.findDatanode(dn) < 0 
   }
 
   private void logAddStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
-    if (!NameNode.stateChangeLog.isInfoEnabled()) {
+    if (!blockLog.isInfoEnabled()) {
       return;
     }
     
@@ -2126,7 +2127,7 @@ assert storedBlock.findDatanode(dn) < 0 
     storedBlock.appendStringTo(sb);
     sb.append(" size " )
       .append(storedBlock.getNumBytes());
-    NameNode.stateChangeLog.info(sb);
+    blockLog.info(sb);
   }
   /**
    * Invalidate corrupt replicas.
@@ -2153,7 +2154,7 @@ assert storedBlock.findDatanode(dn) < 0 
       try {
         invalidateBlock(new BlockToMarkCorrupt(blk, null), node);
       } catch (IOException e) {
-        NameNode.stateChangeLog.info("invalidateCorruptReplicas "
+        blockLog.info("invalidateCorruptReplicas "
             + "error in deleting bad block " + blk + " on " + node, e);
         gotException = true;
       }
@@ -2391,7 +2392,7 @@ assert storedBlock.findDatanode(dn) < 0 
       // upon giving instructions to the namenode.
       //
       addToInvalidates(b, cur);
-      NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: "
+      blockLog.info("BLOCK* chooseExcessReplicates: "
                 +"("+cur+", "+b+") is added to invalidated blocks set");
     }
   }
@@ -2405,8 +2406,8 @@ assert storedBlock.findDatanode(dn) < 0 
     }
     if (excessBlocks.add(block)) {
       excessBlocksCount++;
-      if(NameNode.stateChangeLog.isDebugEnabled()) {
-        NameNode.stateChangeLog.debug("BLOCK* addToExcessReplicate:"
+      if(blockLog.isDebugEnabled()) {
+        blockLog.debug("BLOCK* addToExcessReplicate:"
             + " (" + dn + ", " + block
             + ") is added to excessReplicateMap");
       }
@@ -2418,15 +2419,15 @@ assert storedBlock.findDatanode(dn) < 0 
    * removed block is still valid.
    */
   public void removeStoredBlock(Block block, DatanodeDescriptor node) {
-    if(NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: "
+    if(blockLog.isDebugEnabled()) {
+      blockLog.debug("BLOCK* removeStoredBlock: "
           + block + " from " + node);
     }
     assert (namesystem.hasWriteLock());
     {
       if (!blocksMap.removeNode(block, node)) {
-        if(NameNode.stateChangeLog.isDebugEnabled()) {
-          NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: "
+        if(blockLog.isDebugEnabled()) {
+          blockLog.debug("BLOCK* removeStoredBlock: "
               + block + " has already been removed from node " + node);
         }
         return;
@@ -2453,8 +2454,8 @@ assert storedBlock.findDatanode(dn) < 0 
       if (excessBlocks != null) {
         if (excessBlocks.remove(block)) {
           excessBlocksCount--;
-          if(NameNode.stateChangeLog.isDebugEnabled()) {
-            NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: "
+          if(blockLog.isDebugEnabled()) {
+            blockLog.debug("BLOCK* removeStoredBlock: "
                 + block + " is removed from excessBlocks");
           }
           if (excessBlocks.size() == 0) {
@@ -2497,7 +2498,7 @@ assert storedBlock.findDatanode(dn) < 0 
     if (delHint != null && delHint.length() != 0) {
       delHintNode = datanodeManager.getDatanode(delHint);
       if (delHintNode == null) {
-        NameNode.stateChangeLog.warn("BLOCK* blockReceived: " + block
+        blockLog.warn("BLOCK* blockReceived: " + block
             + " is expected to be removed from an unrecorded node " + delHint);
       }
     }
@@ -2532,7 +2533,7 @@ assert storedBlock.findDatanode(dn) < 0 
       addStoredBlock(b, node, delHintNode, true);
     }
     for (Block b : toInvalidate) {
-      NameNode.stateChangeLog.info("BLOCK* addBlock: block "
+      blockLog.info("BLOCK* addBlock: block "
           + b + " on " + node + " size " + b.getNumBytes()
           + " does not belong to any file");
       addToInvalidates(b, node);
@@ -2558,7 +2559,7 @@ assert storedBlock.findDatanode(dn) < 0 
     try {
       final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID);
       if (node == null || !node.isAlive) {
-        NameNode.stateChangeLog
+        blockLog
             .warn("BLOCK* processIncrementalBlockReport"
                 + " is received from dead or unregistered node "
                 + nodeID);
@@ -2585,19 +2586,19 @@ assert storedBlock.findDatanode(dn) < 0 
           String msg = 
             "Unknown block status code reported by " + nodeID +
             ": " + rdbi;
-          NameNode.stateChangeLog.warn(msg);
+          blockLog.warn(msg);
           assert false : msg; // if assertions are enabled, throw.
           break;
         }
-        if (NameNode.stateChangeLog.isDebugEnabled()) {
-          NameNode.stateChangeLog.debug("BLOCK* block "
+        if (blockLog.isDebugEnabled()) {
+          blockLog.debug("BLOCK* block "
               + (rdbi.getStatus()) + ": " + rdbi.getBlock()
               + " is received from " + nodeID);
         }
       }
     } finally {
       namesystem.writeUnlock();
-      NameNode.stateChangeLog
+      blockLog
           .debug("*BLOCK* NameNode.processIncrementalBlockReport: " + "from "
               + nodeID
               +  " receiving: " + receiving + ", "
@@ -2890,8 +2891,8 @@ assert storedBlock.findDatanode(dn) < 0 
     } finally {
       namesystem.writeUnlock();
     }
-    if (NameNode.stateChangeLog.isInfoEnabled()) {
-      NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
+    if (blockLog.isInfoEnabled()) {
+      blockLog.info("BLOCK* " + getClass().getSimpleName()
           + ": ask " + dn + " to delete " + toInvalidate);
     }
     return toInvalidate.size();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java Sat Nov 10 00:49:15 2012
@@ -63,13 +63,13 @@ public class CorruptReplicasMap{
     
     if (!nodes.contains(dn)) {
       nodes.add(dn);
-      NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
+      NameNode.blockStateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
                                    blk.getBlockName() +
                                    " added as corrupt on " + dn +
                                    " by " + Server.getRemoteIp() +
                                    reasonText);
     } else {
-      NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
+      NameNode.blockStateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
                                    "duplicate requested for " + 
                                    blk.getBlockName() + " to add as corrupt " +
                                    "on " + dn +

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Sat Nov 10 00:49:15 2012
@@ -540,28 +540,16 @@ public class DatanodeManager {
   private static boolean checkInList(final DatanodeID node,
       final Set<String> hostsList,
       final boolean isExcludeList) {
-    final InetAddress iaddr;
-
-    try {
-      iaddr = InetAddress.getByName(node.getIpAddr());
-    } catch (UnknownHostException e) {
-      LOG.warn("Unknown IP: " + node.getIpAddr(), e);
-      return isExcludeList;
-    }
-
     // if include list is empty, host is in include list
     if ( (!isExcludeList) && (hostsList.isEmpty()) ){
       return true;
     }
-    return // compare ipaddress(:port)
-    (hostsList.contains(iaddr.getHostAddress().toString()))
-        || (hostsList.contains(iaddr.getHostAddress().toString() + ":"
-            + node.getXferPort()))
-        // compare hostname(:port)
-        || (hostsList.contains(iaddr.getHostName()))
-        || (hostsList.contains(iaddr.getHostName() + ":" + node.getXferPort()))
-        || ((node instanceof DatanodeInfo) && hostsList
-            .contains(((DatanodeInfo) node).getHostName()));
+    for (String name : getNodeNamesForHostFiltering(node)) {
+      if (hostsList.contains(name)) {
+        return true;
+      }
+    }
+    return false;
   }
 
   /**
@@ -644,16 +632,20 @@ public class DatanodeManager {
    */
   public void registerDatanode(DatanodeRegistration nodeReg)
       throws DisallowedDatanodeException {
-    String dnAddress = Server.getRemoteAddress();
-    if (dnAddress == null) {
-      // Mostly called inside an RPC.
-      // But if not, use address passed by the data-node.
-      dnAddress = nodeReg.getIpAddr();
+    InetAddress dnAddress = Server.getRemoteIp();
+    if (dnAddress != null) {
+      // Mostly called inside an RPC, update ip and peer hostname
+      String hostname = dnAddress.getHostName();
+      String ip = dnAddress.getHostAddress();
+      if (hostname.equals(ip)) {
+        LOG.warn("Unresolved datanode registration from " + ip);
+        throw new DisallowedDatanodeException(nodeReg);
+      }
+      // update node registration with the ip and hostname from rpc request
+      nodeReg.setIpAddr(ip);
+      nodeReg.setPeerHostName(hostname);
     }
 
-    // Update the IP to the address of the RPC request that is
-    // registering this datanode.
-    nodeReg.setIpAddr(dnAddress);
     nodeReg.setExportedKeys(blockManager.getBlockKeys());
 
     // Checks if the node is not on the hosts list.  If it is not, then
@@ -1033,19 +1025,8 @@ public class DatanodeManager {
         if ( (isDead && listDeadNodes) || (!isDead && listLiveNodes) ) {
           nodes.add(dn);
         }
-        // Remove any nodes we know about from the map
-        try {
-          InetAddress inet = InetAddress.getByName(dn.getIpAddr());
-          // compare hostname(:port)
-          mustList.remove(inet.getHostName());
-          mustList.remove(inet.getHostName()+":"+dn.getXferPort());
-          // compare ipaddress(:port)
-          mustList.remove(inet.getHostAddress().toString());
-          mustList.remove(inet.getHostAddress().toString()+ ":" +dn.getXferPort());
-        } catch (UnknownHostException e) {
-          mustList.remove(dn.getName());
-          mustList.remove(dn.getIpAddr());
-          LOG.warn(e);
+        for (String name : getNodeNamesForHostFiltering(dn)) {
+          mustList.remove(name);
         }
       }
     }
@@ -1066,6 +1047,25 @@ public class DatanodeManager {
     return nodes;
   }
   
+  private static List<String> getNodeNamesForHostFiltering(DatanodeID node) {
+    String ip = node.getIpAddr();
+    String regHostName = node.getHostName();
+    int xferPort = node.getXferPort();
+    
+    List<String> names = new ArrayList<String>(); 
+    names.add(ip);
+    names.add(ip + ":" + xferPort);
+    names.add(regHostName);
+    names.add(regHostName + ":" + xferPort);
+
+    String peerHostName = node.getPeerHostName();
+    if (peerHostName != null) {
+      names.add(peerHostName);
+      names.add(peerHostName + ":" + xferPort);
+    }
+    return names;
+  }
+  
   private void setDatanodeDead(DatanodeDescriptor node) {
     node.setLastUpdate(0);
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java Sat Nov 10 00:49:15 2012
@@ -86,7 +86,7 @@ class InvalidateBlocks {
     if (set.add(block)) {
       numBlocks++;
       if (log) {
-        NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
+        NameNode.blockStateChangeLog.info("BLOCK* " + getClass().getSimpleName()
             + ": add " + block + " to " + datanode);
       }
     }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java Sat Nov 10 00:49:15 2012
@@ -190,8 +190,8 @@ class UnderReplicatedBlocks implements I
     int priLevel = getPriority(block, curReplicas, decomissionedReplicas,
                                expectedReplicas);
     if(priorityQueues.get(priLevel).add(block)) {
-      if(NameNode.stateChangeLog.isDebugEnabled()) {
-        NameNode.stateChangeLog.debug(
+      if(NameNode.blockStateChangeLog.isDebugEnabled()) {
+        NameNode.blockStateChangeLog.debug(
           "BLOCK* NameSystem.UnderReplicationBlock.add:"
           + block
           + " has only " + curReplicas
@@ -233,8 +233,8 @@ class UnderReplicatedBlocks implements I
   boolean remove(Block block, int priLevel) {
     if(priLevel >= 0 && priLevel < LEVEL 
         && priorityQueues.get(priLevel).remove(block)) {
-      if(NameNode.stateChangeLog.isDebugEnabled()) {
-        NameNode.stateChangeLog.debug(
+      if(NameNode.blockStateChangeLog.isDebugEnabled()) {
+        NameNode.blockStateChangeLog.debug(
           "BLOCK* NameSystem.UnderReplicationBlock.remove: "
           + "Removing block " + block
           + " from priority queue "+ priLevel);
@@ -245,8 +245,8 @@ class UnderReplicatedBlocks implements I
       // not found in the queue for the given priority level.
       for (int i = 0; i < LEVEL; i++) {
         if (priorityQueues.get(i).remove(block)) {
-          if(NameNode.stateChangeLog.isDebugEnabled()) {
-            NameNode.stateChangeLog.debug(
+          if(NameNode.blockStateChangeLog.isDebugEnabled()) {
+            NameNode.blockStateChangeLog.debug(
               "BLOCK* NameSystem.UnderReplicationBlock.remove: "
               + "Removing block " + block
               + " from priority queue "+ i);
@@ -296,8 +296,8 @@ class UnderReplicatedBlocks implements I
       remove(block, oldPri);
     }
     if(priorityQueues.get(curPri).add(block)) {
-      if(NameNode.stateChangeLog.isDebugEnabled()) {
-        NameNode.stateChangeLog.debug(
+      if(NameNode.blockStateChangeLog.isDebugEnabled()) {
+        NameNode.blockStateChangeLog.debug(
           "BLOCK* NameSystem.UnderReplicationBlock.update:"
           + block
           + " has only "+ curReplicas

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java Sat Nov 10 00:49:15 2012
@@ -408,15 +408,15 @@ public class JspHelper {
         if (!parts[i].equals("")) {
           tempPath.append(parts[i]);
           out.print("<a href=\"browseDirectory.jsp" + "?dir="
-              + tempPath.toString() + "&namenodeInfoPort=" + namenodeInfoPort
+              + HtmlQuoting.quoteHtmlChars(tempPath.toString()) + "&namenodeInfoPort=" + namenodeInfoPort
               + getDelegationTokenUrlParam(tokenString)
               + getUrlParam(NAMENODE_ADDRESS, nnAddress));
-          out.print("\">" + parts[i] + "</a>" + Path.SEPARATOR);
+          out.print("\">" + HtmlQuoting.quoteHtmlChars(parts[i]) + "</a>" + Path.SEPARATOR);
           tempPath.append(Path.SEPARATOR);
         }
       }
       if(parts.length > 0) {
-        out.print(parts[parts.length-1]);
+        out.print(HtmlQuoting.quoteHtmlChars(parts[parts.length-1]));
       }
     }
     catch (UnsupportedEncodingException ex) {
@@ -431,16 +431,16 @@ public class JspHelper {
                                    String nnAddress) throws IOException {
     out.print("<form action=\"browseDirectory.jsp\" method=\"get\" name=\"goto\">");
     out.print("Goto : ");
-    out.print("<input name=\"dir\" type=\"text\" width=\"50\" id\"dir\" value=\""+ file+"\">");
-    out.print("<input name=\"go\" type=\"submit\" value=\"go\">");
+    out.print("<input name=\"dir\" type=\"text\" width=\"50\" id=\"dir\" value=\""+ HtmlQuoting.quoteHtmlChars(file)+"\"/>");
+    out.print("<input name=\"go\" type=\"submit\" value=\"go\"/>");
     out.print("<input name=\"namenodeInfoPort\" type=\"hidden\" "
-        + "value=\"" + namenodeInfoPort  + "\">");
+        + "value=\"" + namenodeInfoPort  + "\"/>");
     if (UserGroupInformation.isSecurityEnabled()) {
       out.print("<input name=\"" + DELEGATION_PARAMETER_NAME
-          + "\" type=\"hidden\" value=\"" + tokenString + "\">");
+          + "\" type=\"hidden\" value=\"" + tokenString + "\"/>");
     }
     out.print("<input name=\""+ NAMENODE_ADDRESS +"\" type=\"hidden\" "
-        + "value=\"" + nnAddress  + "\">");
+        + "value=\"" + nnAddress  + "\"/>");
     out.print("</form>");
   }
   

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java Sat Nov 10 00:49:15 2012
@@ -433,7 +433,7 @@ public abstract class Storage extends St
         if (!root.exists()) {
           // storage directory does not exist
           if (startOpt != StartupOption.FORMAT) {
-            LOG.info("Storage directory " + rootPath + " does not exist");
+            LOG.warn("Storage directory " + rootPath + " does not exist");
             return StorageState.NON_EXISTENT;
           }
           LOG.info(rootPath + " does not exist. Creating ...");
@@ -442,15 +442,15 @@ public abstract class Storage extends St
         }
         // or is inaccessible
         if (!root.isDirectory()) {
-          LOG.info(rootPath + "is not a directory");
+          LOG.warn(rootPath + "is not a directory");
           return StorageState.NON_EXISTENT;
         }
         if (!root.canWrite()) {
-          LOG.info("Cannot access storage directory " + rootPath);
+          LOG.warn("Cannot access storage directory " + rootPath);
           return StorageState.NON_EXISTENT;
         }
       } catch(SecurityException ex) {
-        LOG.info("Cannot access storage directory " + rootPath, ex);
+        LOG.warn("Cannot access storage directory " + rootPath, ex);
         return StorageState.NON_EXISTENT;
       }
 

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java Sat Nov 10 00:49:15 2012
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.http.HtmlQuoting;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -119,7 +120,7 @@ public class DatanodeJspHelper {
     String target = dir;
     final HdfsFileStatus targetStatus = dfs.getFileInfo(target);
     if (targetStatus == null) { // not exists
-      out.print("<h3>File or directory : " + target + " does not exist</h3>");
+      out.print("<h3>File or directory : " + StringEscapeUtils.escapeHtml(target) + " does not exist</h3>");
       JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, target,
           nnAddr);
     } else {
@@ -203,7 +204,7 @@ public class DatanodeJspHelper {
               + JspHelper.getDelegationTokenUrlParam(tokenString)
               + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
             cols[0] = "<a href=\"" + datanodeUrl + "\">"
-              + localFileName + "</a>";
+              + HtmlQuoting.quoteHtmlChars(localFileName) + "</a>";
             cols[5] = lsDateFormat.format(new Date((files[i]
               .getModificationTime())));
             cols[6] = files[i].getPermission().toString();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java Sat Nov 10 00:49:15 2012
@@ -137,7 +137,7 @@ class FsVolumeList {
     if (removedVols != null && removedVols.size() > 0) {
       // Replace volume list
       volumes = Collections.unmodifiableList(volumeList);
-      FsDatasetImpl.LOG.info("Completed checkDirs. Removed " + removedVols.size()
+      FsDatasetImpl.LOG.warn("Completed checkDirs. Removed " + removedVols.size()
           + " volumes. Current volumes: " + this);
     }
 

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Sat Nov 10 00:49:15 2012
@@ -78,10 +78,6 @@ public class BackupNode extends NameNode
   String nnHttpAddress;
   /** Checkpoint manager */
   Checkpointer checkpointManager;
-  /** ClusterID to which BackupNode belongs to */
-  String clusterId;
-  /** Block pool Id of the peer namenode of this BackupNode */
-  String blockPoolId;
   
   BackupNode(Configuration conf, NamenodeRole role) throws IOException {
     super(conf, role);
@@ -145,6 +141,7 @@ public class BackupNode extends NameNode
                  CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
     NamespaceInfo nsInfo = handshake(conf);
     super.initialize(conf);
+    namesystem.setBlockPoolId(nsInfo.getBlockPoolID());
 
     if (false == namesystem.isInSafeMode()) {
       namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
@@ -154,9 +151,6 @@ public class BackupNode extends NameNode
     // therefore lease hard limit should never expire.
     namesystem.leaseManager.setLeasePeriod(
         HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE);
-    
-    clusterId = nsInfo.getClusterID();
-    blockPoolId = nsInfo.getBlockPoolID();
 
     // register with the active name-node 
     registerWith(nsInfo);
@@ -219,7 +213,7 @@ public class BackupNode extends NameNode
   }
   
   /* @Override */// NameNode
-  public boolean setSafeMode(@SuppressWarnings("unused") SafeModeAction action)
+  public boolean setSafeMode(SafeModeAction action)
       throws IOException {
     throw new UnsupportedActionException("setSafeMode");
   }
@@ -415,14 +409,6 @@ public class BackupNode extends NameNode
     return nsInfo;
   }
   
-  String getBlockPoolId() {
-    return blockPoolId;
-  }
-  
-  String getClusterId() {
-    return clusterId;
-  }
-  
   @Override
   protected NameNodeHAContext createHAContext() {
     return new BNHAContext();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Sat Nov 10 00:49:15 2012
@@ -1938,9 +1938,9 @@ public class FSDirectory implements Clos
     assert hasWriteLock();
     // sanity check
     if ((nsQuota < 0 && nsQuota != HdfsConstants.QUOTA_DONT_SET && 
-         nsQuota < HdfsConstants.QUOTA_RESET) || 
+         nsQuota != HdfsConstants.QUOTA_RESET) || 
         (dsQuota < 0 && dsQuota != HdfsConstants.QUOTA_DONT_SET && 
-          dsQuota < HdfsConstants.QUOTA_RESET)) {
+          dsQuota != HdfsConstants.QUOTA_RESET)) {
       throw new IllegalArgumentException("Illegal value for nsQuota or " +
                                          "dsQuota : " + nsQuota + " and " +
                                          dsQuota);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Sat Nov 10 00:49:15 2012
@@ -3550,7 +3550,7 @@ public class FSNamesystem implements Nam
   @Metric({"TransactionsSinceLastLogRoll",
       "Number of transactions since last edit log roll"})
   public long getTransactionsSinceLastLogRoll() {
-    if (isInStandbyState()) {
+    if (isInStandbyState() || !getEditLog().isSegmentOpen()) {
       return 0;
     } else {
       return getEditLog().getLastWrittenTxId() -

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Sat Nov 10 00:49:15 2012
@@ -227,6 +227,7 @@ public class NameNode {
   public static final int DEFAULT_PORT = 8020;
   public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
   public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange");
+  public static final Log blockStateChangeLog = LogFactory.getLog("BlockStateChange");
   public static final HAState ACTIVE_STATE = new ActiveState();
   public static final HAState STANDBY_STATE = new StandbyState();
   

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Sat Nov 10 00:49:15 2012
@@ -132,6 +132,7 @@ class NameNodeRpcServer implements Namen
   
   private static final Log LOG = NameNode.LOG;
   private static final Log stateChangeLog = NameNode.stateChangeLog;
+  private static final Log blockStateChangeLog = NameNode.blockStateChangeLog;
   
   // Dependencies from other parts of NN.
   protected final FSNamesystem namesystem;
@@ -889,8 +890,8 @@ class NameNodeRpcServer implements Namen
       String poolId, StorageBlockReport[] reports) throws IOException {
     verifyRequest(nodeReg);
     BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
-    if(stateChangeLog.isDebugEnabled()) {
-      stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
+    if(blockStateChangeLog.isDebugEnabled()) {
+      blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
            + "from " + nodeReg + " " + blist.getNumberOfBlocks()
            + " blocks");
     }
@@ -905,8 +906,8 @@ class NameNodeRpcServer implements Namen
   public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId,
       StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks) throws IOException {
     verifyRequest(nodeReg);
-    if(stateChangeLog.isDebugEnabled()) {
-      stateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: "
+    if(blockStateChangeLog.isDebugEnabled()) {
+      blockStateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: "
           +"from "+nodeReg+" "+receivedAndDeletedBlocks.length
           +" blocks.");
     }

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1406415-1407703

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1406415-1407703

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1406415-1407703

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1406415-1407703

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1406415-1407703

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java?rev=1407706&r1=1407705&r2=1407706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java Sat Nov 10 00:49:15 2012
@@ -46,6 +46,7 @@ import org.junit.Test;
 public class TestDatanodeDeath {
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)NameNode.blockStateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);



Mime
View raw message