hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1538408 - in /hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/server/namen...
Date Sun, 03 Nov 2013 17:51:54 GMT
Author: szetszwo
Date: Sun Nov  3 17:51:52 2013
New Revision: 1538408

URL: http://svn.apache.org/r1538408
Log:
Merge r1537584 through r1538407 from trunk.

Added:
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java
      - copied unchanged from r1538407, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java
Modified:
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props
changed)
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ActiveState.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshottableDir.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1537584-1538407

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1538408&r1=1538407&r2=1538408&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sun Nov 
3 17:51:52 2013
@@ -458,12 +458,10 @@ Release 2.3.0 - UNRELEASED
     (Qus-Jiawei via kihwal)
 
   BUG FIXES
+
     HDFS-5034.  Remove debug prints from GetFileLinkInfo (Andrew Wang via Colin
     Patrick McCabe)
 
-    HDFS-5035.  getFileLinkStatus and rename do not correctly check permissions
-    of symlinks.  (Andrew Wang via Colin Patrick McCabe)
-
     HDFS-4816. transitionToActive blocks if the SBN is doing checkpoint image
     transfer. (Andrew Wang)
 
@@ -515,6 +513,8 @@ Release 2.2.1 - UNRELEASED
     report to a configurable value.  (Aaron T. Myers via Colin Patrick
     McCabe)
 
+    HDFS-5344. Make LsSnapshottableDir as Tool interface implementation. (Sathish via umamahesh)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -574,6 +574,11 @@ Release 2.2.1 - UNRELEASED
     HDFS-4633 TestDFSClientExcludedNodes fails sporadically if excluded nodes
     cache expires too quickly  (Chris Nauroth via Sanjay)
 
+    HDFS-5037. Active NN should trigger its own edit log rolls (wang)
+
+    HDFS-5035.  getFileLinkStatus and rename do not correctly check permissions
+    of symlinks.  (Andrew Wang via Colin Patrick McCabe)
+
 Release 2.2.0 - 2013-10-13
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1537252-1538407

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1538408&r1=1538407&r2=1538408&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
Sun Nov  3 17:51:52 2013
@@ -188,6 +188,11 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum";
   public static final int     DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1;
 
+  public static final String  DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD = "dfs.namenode.edit.log.autoroll.multiplier.threshold";
+  public static final float   DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD_DEFAULT
= 2.0f;
+  public static final String  DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS = "dfs.namenode.edit.log.autoroll.check.interval.ms";
+  public static final int     DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS_DEFAULT =
5*60*1000;
+  
   public static final String  DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH = "dfs.namenode.edits.noeditlogchannelflush";
   public static final boolean DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH_DEFAULT = false;
   

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java?rev=1538408&r1=1538407&r2=1538408&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
Sun Nov  3 17:51:52 2013
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdfs;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configuration.DeprecationDelta;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 
@@ -62,48 +63,83 @@ public class HdfsConfiguration extends C
   public static void init() {
   }
 
-  private static void deprecate(String oldKey, String newKey) {
-    Configuration.addDeprecation(oldKey, newKey);
-  }
-
   private static void addDeprecatedKeys() {
-    deprecate("dfs.backup.address", DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY);
-    deprecate("dfs.backup.http.address", DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY);
-    deprecate("dfs.balance.bandwidthPerSec", DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY);
-    deprecate("dfs.data.dir", DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
-    deprecate("dfs.http.address", DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
-    deprecate("dfs.https.address", DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
-    deprecate("dfs.max.objects", DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY);
-    deprecate("dfs.name.dir", DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
-    deprecate("dfs.name.dir.restore", DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY);
-    deprecate("dfs.name.edits.dir", DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY);
-    deprecate("dfs.read.prefetch.size", DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY);
-    deprecate("dfs.safemode.extension", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY);
-    deprecate("dfs.safemode.threshold.pct", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY);
-    deprecate("dfs.secondary.http.address", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
-    deprecate("dfs.socket.timeout", DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY);
-    deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
-    deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
-    deprecate("fs.checkpoint.period", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY);
-    deprecate("heartbeat.recheck.interval", DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY);
-    deprecate("dfs.https.client.keystore.resource", DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY);
-    deprecate("dfs.https.need.client.auth", DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY);
-    deprecate("slave.host.name", DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY);
-    deprecate("session.id", DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
-    deprecate("dfs.access.time.precision", DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY);
-    deprecate("dfs.replication.considerLoad", DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY);
-    deprecate("dfs.replication.interval", DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY);
-    deprecate("dfs.replication.min", DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY);
-    deprecate("dfs.replication.pending.timeout.sec", DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY);
-    deprecate("dfs.max-repl-streams", DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY);
-    deprecate("dfs.permissions", DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY);
-    deprecate("dfs.permissions.supergroup", DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY);
-    deprecate("dfs.write.packet.size", DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY);
-    deprecate("dfs.block.size", DFSConfigKeys.DFS_BLOCK_SIZE_KEY);
-    deprecate("dfs.datanode.max.xcievers", DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY);
-    deprecate("io.bytes.per.checksum", DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY);
-    deprecate("dfs.federation.nameservices", DFSConfigKeys.DFS_NAMESERVICES);
-    deprecate("dfs.federation.nameservice.id", DFSConfigKeys.DFS_NAMESERVICE_ID);
+    Configuration.addDeprecations(new DeprecationDelta[] {
+      new DeprecationDelta("dfs.backup.address",
+        DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY),
+      new DeprecationDelta("dfs.backup.http.address",
+        DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY),
+      new DeprecationDelta("dfs.balance.bandwidthPerSec",
+        DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY),
+      new DeprecationDelta("dfs.data.dir",
+        DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY),
+      new DeprecationDelta("dfs.http.address",
+        DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY),
+      new DeprecationDelta("dfs.https.address",
+        DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY),
+      new DeprecationDelta("dfs.max.objects",
+        DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY),
+      new DeprecationDelta("dfs.name.dir",
+        DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY),
+      new DeprecationDelta("dfs.name.dir.restore",
+        DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY),
+      new DeprecationDelta("dfs.name.edits.dir",
+        DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY),
+      new DeprecationDelta("dfs.read.prefetch.size",
+        DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY),
+      new DeprecationDelta("dfs.safemode.extension",
+        DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY),
+      new DeprecationDelta("dfs.safemode.threshold.pct",
+        DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY),
+      new DeprecationDelta("dfs.secondary.http.address",
+        DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY),
+      new DeprecationDelta("dfs.socket.timeout",
+        DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY),
+      new DeprecationDelta("fs.checkpoint.dir",
+        DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY),
+      new DeprecationDelta("fs.checkpoint.edits.dir",
+        DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY),
+      new DeprecationDelta("fs.checkpoint.period",
+        DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY),
+      new DeprecationDelta("heartbeat.recheck.interval",
+        DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY),
+      new DeprecationDelta("dfs.https.client.keystore.resource",
+        DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY),
+      new DeprecationDelta("dfs.https.need.client.auth",
+        DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY),
+      new DeprecationDelta("slave.host.name",
+        DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY),
+      new DeprecationDelta("session.id",
+        DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
+      new DeprecationDelta("dfs.access.time.precision",
+        DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY),
+      new DeprecationDelta("dfs.replication.considerLoad",
+        DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY),
+      new DeprecationDelta("dfs.replication.interval",
+        DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY),
+      new DeprecationDelta("dfs.replication.min",
+        DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY),
+      new DeprecationDelta("dfs.replication.pending.timeout.sec",
+        DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY),
+      new DeprecationDelta("dfs.max-repl-streams",
+        DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY),
+      new DeprecationDelta("dfs.permissions",
+        DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY),
+      new DeprecationDelta("dfs.permissions.supergroup",
+        DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY),
+      new DeprecationDelta("dfs.write.packet.size",
+        DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY),
+      new DeprecationDelta("dfs.block.size",
+        DFSConfigKeys.DFS_BLOCK_SIZE_KEY),
+      new DeprecationDelta("dfs.datanode.max.xcievers",
+        DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY),
+      new DeprecationDelta("io.bytes.per.checksum",
+        DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY),
+      new DeprecationDelta("dfs.federation.nameservices",
+        DFSConfigKeys.DFS_NAMESERVICES),
+      new DeprecationDelta("dfs.federation.nameservice.id",
+        DFSConfigKeys.DFS_NAMESERVICE_ID)
+    });
   }
 
   public static void main(String[] args) {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1538408&r1=1538407&r2=1538408&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Sun Nov  3 17:51:52 2013
@@ -38,6 +38,8 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY;
@@ -49,6 +51,10 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT;
@@ -390,6 +396,16 @@ public class FSNamesystem implements Nam
   
   Daemon nnrmthread = null; // NamenodeResourceMonitor thread
 
+  Daemon nnEditLogRoller = null; // NameNodeEditLogRoller thread
+  /**
+   * When an active namenode will roll its own edit log, in # edits
+   */
+  private final long editLogRollerThreshold;
+  /**
+   * Check interval of an active namenode's edit log roller thread 
+   */
+  private final int editLogRollerInterval;
+
   private volatile boolean hasResourcesAvailable = false;
   private volatile boolean fsRunning = true;
   
@@ -703,7 +719,17 @@ public class FSNamesystem implements Nam
       
       this.standbyShouldCheckpoint = conf.getBoolean(
           DFS_HA_STANDBY_CHECKPOINTS_KEY, DFS_HA_STANDBY_CHECKPOINTS_DEFAULT);
-      
+      // # edit autoroll threshold is a multiple of the checkpoint threshold 
+      this.editLogRollerThreshold = (long)
+          (conf.getFloat(
+              DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD,
+              DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD_DEFAULT) *
+          conf.getLong(
+              DFS_NAMENODE_CHECKPOINT_TXNS_KEY,
+              DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT));
+      this.editLogRollerInterval = conf.getInt(
+          DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS,
+          DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS_DEFAULT);
       this.inodeId = new INodeId();
       
       // For testing purposes, allow the DT secret manager to be started regardless
@@ -978,6 +1004,11 @@ public class FSNamesystem implements Nam
       //ResourceMonitor required only at ActiveNN. See HDFS-2914
       this.nnrmthread = new Daemon(new NameNodeResourceMonitor());
       nnrmthread.start();
+
+      nnEditLogRoller = new Daemon(new NameNodeEditLogRoller(
+          editLogRollerThreshold, editLogRollerInterval));
+      nnEditLogRoller.start();
+
       cacheManager.activate();
       blockManager.getDatanodeManager().setSendCachingCommands(true);
     } finally {
@@ -1017,6 +1048,10 @@ public class FSNamesystem implements Nam
         ((NameNodeResourceMonitor) nnrmthread.getRunnable()).stopMonitor();
         nnrmthread.interrupt();
       }
+      if (nnEditLogRoller != null) {
+        ((NameNodeEditLogRoller)nnEditLogRoller.getRunnable()).stop();
+        nnEditLogRoller.interrupt();
+      }
       if (dir != null && dir.fsImage != null) {
         if (dir.fsImage.editLog != null) {
           dir.fsImage.editLog.close();
@@ -4159,7 +4194,48 @@ public class FSNamesystem implements Nam
       shouldNNRmRun = false;
     }
  }
-  
+
+  class NameNodeEditLogRoller implements Runnable {
+
+    private boolean shouldRun = true;
+    private final long rollThreshold;
+    private final long sleepIntervalMs;
+
+    public NameNodeEditLogRoller(long rollThreshold, int sleepIntervalMs) {
+        this.rollThreshold = rollThreshold;
+        this.sleepIntervalMs = sleepIntervalMs;
+    }
+
+    @Override
+    public void run() {
+      while (fsRunning && shouldRun) {
+        try {
+          FSEditLog editLog = getFSImage().getEditLog();
+          long numEdits =
+              editLog.getLastWrittenTxId() - editLog.getCurSegmentTxId();
+          if (numEdits > rollThreshold) {
+            FSNamesystem.LOG.info("NameNode rolling its own edit log because"
+                + " number of edits in open segment exceeds threshold of "
+                + rollThreshold);
+            rollEditLog();
+          }
+          Thread.sleep(sleepIntervalMs);
+        } catch (InterruptedException e) {
+          FSNamesystem.LOG.info(NameNodeEditLogRoller.class.getSimpleName()
+              + " was interrupted, exiting");
+          break;
+        } catch (Exception e) {
+          FSNamesystem.LOG.error("Swallowing exception in "
+              + NameNodeEditLogRoller.class.getSimpleName() + ":", e);
+        }
+      }
+    }
+
+    public void stop() {
+      shouldRun = false;
+    }
+  }
+
   public FSImage getFSImage() {
     return dir.fsImage;
   }
@@ -5176,7 +5252,9 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.JOURNAL);
       checkNameNodeSafeMode("Log not rolled");
-      LOG.info("Roll Edit Log from " + Server.getRemoteAddress());
+      if (Server.isRpcInvocation()) {
+        LOG.info("Roll Edit Log from " + Server.getRemoteAddress());
+      }
       return getFSImage().rollEditLog();
     } finally {
       writeUnlock();

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ActiveState.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ActiveState.java?rev=1538408&r1=1538407&r2=1538408&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ActiveState.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ActiveState.java
Sun Nov  3 17:51:52 2013
@@ -38,7 +38,7 @@ public class ActiveState extends HAState
 
   @Override
   public void checkOperation(HAContext context, OperationCategory op) {
-    return; // Other than journal all operations are allowed in active state
+    return; // All operations are allowed in active state
   }
   
   @Override

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshottableDir.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshottableDir.java?rev=1538408&r1=1538407&r2=1538408&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshottableDir.java
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshottableDir.java
Sun Nov  3 17:51:52 2013
@@ -21,9 +21,12 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
 
 /**
  * A tool used to list all snapshottable directories that are owned by the 
@@ -31,23 +34,23 @@ import org.apache.hadoop.hdfs.protocol.S
  * is a super user.
  */
 @InterfaceAudience.Private
-public class LsSnapshottableDir {
-  public static void main(String[] argv) throws IOException {
+public class LsSnapshottableDir extends Configured implements Tool {
+  @Override
+  public int run(String[] argv) throws Exception {
     String description = "LsSnapshottableDir: \n" +
         "\tGet the list of snapshottable directories that are owned by the current user.\n"
+
         "\tReturn all the snapshottable directories if the current user is a super user.\n";
 
     if(argv.length != 0) {
       System.err.println("Usage: \n" + description);
-      System.exit(1);
+      return 1;
     }
     
-    Configuration conf = new Configuration();
-    FileSystem fs = FileSystem.get(conf);
+    FileSystem fs = FileSystem.get(getConf());
     if (! (fs instanceof DistributedFileSystem)) {
       System.err.println(
           "LsSnapshottableDir can only be used in DistributedFileSystem");
-      System.exit(1);
+      return 1;
     }
     DistributedFileSystem dfs = (DistributedFileSystem) fs;
     
@@ -57,7 +60,12 @@ public class LsSnapshottableDir {
     } catch (IOException e) {
       String[] content = e.getLocalizedMessage().split("\n");
       System.err.println("lsSnapshottableDir: " + content[0]);
+      return 1;
     }
+    return 0;
+  }
+  public static void main(String[] argv) throws Exception {
+    int rc = ToolRunner.run(new LsSnapshottableDir(), argv);
+    System.exit(rc);
   }
-
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1538408&r1=1538407&r2=1538408&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
(original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
Sun Nov  3 17:51:52 2013
@@ -1543,4 +1543,29 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.namenode.edit.log.autoroll.multiplier.threshold</name>
+  <value>2.0</value>
+  <description>
+    Determines when an active namenode will roll its own edit log.
+    The actual threshold (in number of edits) is determined by multiplying
+    this value by dfs.namenode.checkpoint.txns.
+
+    This prevents extremely large edit files from accumulating on the active
+    namenode, which can cause timeouts during namenode startup and pose an
+    administrative hassle. This behavior is intended as a failsafe for when
+    the standby or secondary namenode fail to roll the edit log by the normal
+    checkpoint threshold.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.edit.log.autoroll.check.interval.ms</name>
+  <value>300000</value>
+  <description>
+    How often an active namenode will check if it needs to roll its edit log,
+    in milliseconds.
+  </description>
+</property>
+
 </configuration>



Mime
View raw message