hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1177127 [1/2] - in /hadoop/common/branches/HDFS-1623/hadoop-hdfs-project: ./ hadoop-hdfs/ hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ hadoop-hdfs/s...
Date Thu, 29 Sep 2011 00:33:50 GMT
Author: suresh
Date: Thu Sep 29 00:33:34 2011
New Revision: 1177127

URL: http://svn.apache.org/viewvc?rev=1177127&view=rev
Log:
Reverting the previous trunk merge since it added other unintended changes in addition

Removed:
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/ParamFilter.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenewerParam.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
Modified:
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/   (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ActiveState.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/HAState.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/   (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/   (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/   (props changed)
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
    hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/
            ('svn:ignore' removed)

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 29 00:33:34 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1152502-1177115
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1152502-1173011
 /hadoop/core/branches/branch-0.19/hdfs:713112
 /hadoop/hdfs/branches/HDFS-1052:987665-1095512
 /hadoop/hdfs/branches/HDFS-265:796829-820463

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Sep 29 00:33:34 2011
@@ -16,9 +16,6 @@ Trunk (unreleased changes)
     HDFS-2318. Provide authentication to webhdfs using SPNEGO and delegation
     tokens.  (szetszwo)
 
-    HDFS-2340. Support getFileBlockLocations and getDelegationToken in webhdfs.
-    (szetszwo)
-
   IMPROVEMENTS
 
     HADOOP-7524 Change RPC to allow multiple protocols including multuple versions of the same protocol (sanjay Radia)
@@ -38,18 +35,6 @@ Trunk (unreleased changes)
     not use ArrayWritable for writing non-array items.  (Uma Maheswara Rao G
     via szetszwo)
 
-    HDFS-2351 Change Namenode and Datanode to register each of their protocols
-    seperately. (Sanjay Radia)
-
-    HDFS-2356.  Support case insensitive query parameter names in webhdfs.
-    (szetszwo)
-
-    HDFS-2368.  Move SPNEGO conf properties from hdfs-default.xml to
-    hdfs-site.xml.  (szetszwo)
-
-    HDFS-2355. Federation: enable using the same configuration file across 
-    all the nodes in the cluster. (suresh)
-
   BUG FIXES
     HDFS-2287. TestParallelRead has a small off-by-one bug. (todd)
 
@@ -72,17 +57,6 @@ Trunk (unreleased changes)
     IOExceptions of stream closures can mask root exceptions.  (Uma Maheswara
     Rao G via szetszwo)
 
-    HDFS-46.   Change default namespace quota of root directory from
-    Integer.MAX_VALUE to Long.MAX_VALUE.  (Uma Maheswara Rao G via szetszwo)
-
-    HDFS-2366. Initialize WebHdfsFileSystem.ugi in object construction.
-    (szetszwo)
-
-    HDFS-2373. Commands using webhdfs and hftp print unnecessary debug 
-    info on the console with security enabled. (Arpit Gupta via suresh)
-
-    HDFS-2361. hftp is broken, fixed username checks in JspHelper. (jitendra)
-
 Release 0.23.0 - Unreleased
 
   INCOMPATIBLE CHANGES
@@ -765,12 +739,6 @@ Release 0.23.0 - Unreleased
     HDFS-1217.  Change some NameNode methods from public to package private.
     (Laxman via szetszwo)
 
-    HDFS-2332. Add test for HADOOP-7629 (using an immutable FsPermission
-    object as an RPC parameter fails). (todd)
-
-    HDFS-2363. Move datanodes size printing from FSNamesystem.metasave(..)
-    to BlockManager.  (Uma Maheswara Rao G via szetszwo)
-
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
@@ -1639,11 +1607,7 @@ Release 0.22.0 - Unreleased
     HDFS-2232. Generalize regular expressions in TestHDFSCLI.
     (Plamen Jeliazkov via shv)
 
-    HDFS-2290. Block with corrupt replica is not getting replicated.
-    (Benoy Antony via shv)
-
 Release 0.21.1 - Unreleased
-
     HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
 
     HDFS-874. TestHDFSFileContextMainOperations fails on weirdly 

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Sep 29 00:33:34 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1159757-1177115
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1159757-1173011
 /hadoop/core/branches/branch-0.19/hdfs/src/java:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/main/java:713112
 /hadoop/core/trunk/src/hdfs:776175-785643,785929-786278

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Thu Sep 29 00:33:34 2011
@@ -244,6 +244,9 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY = "dfs.corruptfilesreturned.max";
   public static final int     DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED = 500;
 
+  // HA related configuration
+  public static final String DFS_HA_NAMENODE_IDS_KEY = "dfs.ha.namenode.ids";
+  public static final String DFS_HA_NAMENODE_IDS_DEFAULT = "";
 
   // property for fsimage compression
   public static final String DFS_IMAGE_COMPRESS_KEY = "dfs.image.compress";

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Thu Sep 29 00:33:34 2011
@@ -38,7 +38,6 @@ import java.util.Random;
 import java.util.StringTokenizer;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
@@ -64,6 +63,7 @@ import org.apache.hadoop.security.UserGr
 
 @InterfaceAudience.Private
 public class DFSUtil {
+  private DFSUtil() { /* Hidden constructor */ }
   private static final ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() {
     @Override
     protected Random initialValue() {
@@ -577,6 +577,17 @@ public class DFSUtil {
     }
   }
   
+  /**
+   * Returns the configured nameservice Id
+   * 
+   * @param conf
+   *          Configuration object to lookup the nameserviceId
+   * @return nameserviceId string from conf
+   */
+  public static String getNameServiceId(Configuration conf) {
+    return conf.get(DFS_FEDERATION_NAMESERVICE_ID);
+  }
+  
   /** Return used as percentage of capacity */
   public static float getPercentUsed(long used, long capacity) {
     return capacity <= 0 ? 100 : ((float)used * 100.0f)/(float)capacity; 
@@ -696,77 +707,4 @@ public class DFSUtil {
     // TODO:HA configuration changes pending
     return false;
   }
-  
-  /**
-   * Get name service Id for the {@link NameNode} based on namenode RPC address
-   * matching the local node address.
-   */
-  public static String getNamenodeNameServiceId(Configuration conf) {
-    return getNameServiceId(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
-  }
-  
-  /**
-   * Get name service Id for the BackupNode based on backup node RPC address
-   * matching the local node address.
-   */
-  public static String getBackupNameServiceId(Configuration conf) {
-    return getNameServiceId(conf, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
-  }
-  
-  /**
-   * Get name service Id for the secondary node based on secondary http address
-   * matching the local node address.
-   */
-  public static String getSecondaryNameServiceId(Configuration conf) {
-    return getNameServiceId(conf, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
-  }
-  
-  /**
-   * Get the nameservice Id by matching the {@code addressKey} with the
-   * the address of the local node. 
-   * 
-   * If {@link DFSConfigKeys#DFS_FEDERATION_NAMESERVICE_ID} is not specifically
-   * configured, this method determines the nameservice Id by matching the local
-   * nodes address with the configured addresses. When a match is found, it
-   * returns the nameservice Id from the corresponding configuration key.
-   * 
-   * @param conf Configuration
-   * @param addressKey configuration key to get the address.
-   * @return name service Id on success, null on failure.
-   * @throws HadoopIllegalArgumentException on error
-   */
-  private static String getNameServiceId(Configuration conf, String addressKey) {
-    String nameserviceId = conf.get(DFS_FEDERATION_NAMESERVICE_ID);
-    if (nameserviceId != null) {
-      return nameserviceId;
-    }
-    
-    Collection<String> ids = getNameServiceIds(conf);
-    if (ids == null || ids.size() == 0) {
-      // Not federation configuration, hence no nameservice Id
-      return null;
-    }
-    
-    // Match the rpc address with that of local address
-    int found = 0;
-    for (String id : ids) {
-      String addr = conf.get(getNameServiceIdKey(addressKey, id));
-      InetSocketAddress s = NetUtils.createSocketAddr(addr);
-      if (NetUtils.isLocalAddress(s.getAddress())) {
-        nameserviceId = id;
-        found++;
-      }
-    }
-    if (found > 1) { // Only one address must match the local address
-      throw new HadoopIllegalArgumentException(
-          "Configuration has multiple RPC addresses that matches "
-              + "the local node's address. Please configure the system with "
-              + "the parameter " + DFS_FEDERATION_NAMESERVICE_ID);
-    }
-    if (found == 0) {
-      throw new HadoopIllegalArgumentException("Configuration address "
-          + addressKey + " is missing in configuration with name service Id");
-    }
-    return nameserviceId;
-  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java Thu Sep 29 00:33:34 2011
@@ -115,26 +115,6 @@ public class DatanodeInfo extends Datano
     this.location = location;
     this.hostName = hostName;
   }
-
-  /** Constructor */
-  public DatanodeInfo(final String name, final String storageID,
-      final int infoPort, final int ipcPort,
-      final long capacity, final long dfsUsed, final long remaining,
-      final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
-      final String networkLocation, final String hostName,
-      final AdminStates adminState) {
-    super(name, storageID, infoPort, ipcPort);
-
-    this.capacity = capacity;
-    this.dfsUsed = dfsUsed;
-    this.remaining = remaining;
-    this.blockPoolUsed = blockPoolUsed;
-    this.lastUpdate = lastUpdate;
-    this.xceiverCount = xceiverCount;
-    this.location = networkLocation;
-    this.hostName = hostName;
-    this.adminState = adminState;
-  }
   
   /** The raw capacity. */
   public long getCapacity() { return capacity; }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Thu Sep 29 00:33:34 2011
@@ -308,11 +308,6 @@ public class BlockManager {
   /** Dump meta data to out. */
   public void metaSave(PrintWriter out) {
     assert namesystem.hasWriteLock();
-    final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
-    final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
-    datanodeManager.fetchDatanodes(live, dead, false);
-    out.println("Live Datanodes: " + live.size());
-    out.println("Dead Datanodes: " + dead.size());
     //
     // Dump contents of neededReplication
     //
@@ -847,7 +842,7 @@ public class BlockManager {
 
     // Add this replica to corruptReplicas Map
     corruptReplicas.addToCorruptReplicasMap(storedBlock, node);
-    if (countNodes(storedBlock).liveReplicas() >= inode.getReplication()) {
+    if (countNodes(storedBlock).liveReplicas() > inode.getReplication()) {
       // the block is over-replicated so invalidate the replicas immediately
       invalidateBlock(storedBlock, node);
     } else if (namesystem.isPopulatingReplQueues()) {
@@ -872,7 +867,7 @@ public class BlockManager {
     // Check how many copies we have of the block. If we have at least one
     // copy on a live node, then we can delete it.
     int count = countNodes(blk).liveReplicas();
-    if (count >= 1) {
+    if (count > 1) {
       addToInvalidates(blk, dn);
       removeStoredBlock(blk, node);
       if(NameNode.stateChangeLog.isDebugEnabled()) {

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java Thu Sep 29 00:33:34 2011
@@ -54,13 +54,11 @@ import org.apache.hadoop.hdfs.security.t
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
-import org.apache.hadoop.hdfs.web.resources.DelegationParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.http.HtmlQuoting;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.Token;
@@ -70,7 +68,7 @@ import org.apache.hadoop.util.VersionInf
 public class JspHelper {
   public static final String CURRENT_CONF = "current.conf";
   final static public String WEB_UGI_PROPERTY_NAME = DFSConfigKeys.DFS_WEB_UGI_KEY;
-  public static final String DELEGATION_PARAMETER_NAME = DelegationParam.NAME;
+  public static final String DELEGATION_PARAMETER_NAME = "delegation";
   public static final String NAMENODE_ADDRESS = "nnaddr";
   static final String SET_DELEGATION = "&" + DELEGATION_PARAMETER_NAME +
                                               "=";
@@ -553,8 +551,7 @@ public class JspHelper {
         DelegationTokenIdentifier id = new DelegationTokenIdentifier();
         id.readFields(in);
         ugi = id.getUser();
-        checkUsername(ugi.getShortUserName(), usernameFromQuery);
-        checkUsername(ugi.getShortUserName(), user);
+        checkUsername(ugi.getUserName(), user);
         ugi.addToken(token);
         ugi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
       } else {
@@ -563,11 +560,13 @@ public class JspHelper {
                                 "authenticated by filter");
         }
         ugi = UserGroupInformation.createRemoteUser(user);
-        checkUsername(ugi.getShortUserName(), usernameFromQuery);
         // This is not necessarily true, could have been auth'ed by user-facing
         // filter
         ugi.setAuthenticationMethod(secureAuthMethod);
       }
+
+      checkUsername(user, usernameFromQuery);
+
     } else { // Security's not on, pull from url
       ugi = usernameFromQuery == null?
           getDefaultWebUser(conf) // not specified in request
@@ -580,18 +579,10 @@ public class JspHelper {
     return ugi;
   }
 
-  /**
-   * Expected user name should be a short name.
-   */
   private static void checkUsername(final String expected, final String name
       ) throws IOException {
-    if (name == null) {
-      return;
-    }
-    KerberosName u = new KerberosName(name);
-    String shortName = u.getShortName();
-    if (!shortName.equals(expected)) {
-      throw new IOException("Usernames not matched: name=" + shortName
+    if (name != null && !name.equals(expected)) {
+      throw new IOException("Usernames not matched: name=" + name
           + " != expected=" + expected);
     }
   }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Thu Sep 29 00:33:34 2011
@@ -425,7 +425,7 @@ public class DataNode extends Configured
   private List<ServicePlugin> plugins;
   
   // For InterDataNodeProtocol
-  public RPC.Server ipcServer;
+  public Server ipcServer;
 
   private SecureResources secureResources = null;
   private AbstractList<File> dataDirs;
@@ -575,15 +575,11 @@ public class DataNode extends Configured
   private void initIpcServer(Configuration conf) throws IOException {
     InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
         conf.get("dfs.datanode.ipc.address"));
-    
-    // Add all the RPC protocols that the Datanode implements
-    ipcServer = RPC.getServer(ClientDatanodeProtocol.class, this, ipcAddr.getHostName(),
+    ipcServer = RPC.getServer(DataNode.class, this, ipcAddr.getHostName(),
                               ipcAddr.getPort(), 
                               conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY, 
                                           DFS_DATANODE_HANDLER_COUNT_DEFAULT), 
                               false, conf, blockPoolTokenSecretManager);
-    ipcServer.addProtocol(InterDatanodeProtocol.class, this);
-    
     // set service-level authorization security policy
     if (conf.getBoolean(
         CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java Thu Sep 29 00:33:34 2011
@@ -50,7 +50,6 @@ import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.web.ParamFilter;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
 import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
@@ -67,11 +66,8 @@ import org.apache.hadoop.hdfs.web.resour
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 
-import com.sun.jersey.spi.container.ResourceFilters;
-
 /** Web-hdfs DataNode implementation. */
 @Path("")
-@ResourceFilters(ParamFilter.class)
 public class DatanodeWebHdfsMethods {
   public static final Log LOG = LogFactory.getLog(DatanodeWebHdfsMethods.class);
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Thu Sep 29 00:33:34 2011
@@ -25,7 +25,6 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
@@ -81,13 +80,13 @@ public class BackupNode extends NameNode
   // Common NameNode methods implementation for backup node.
   /////////////////////////////////////////////////////
   @Override // NameNode
-  protected InetSocketAddress getRpcServerAddress(Configuration conf) {
+  protected InetSocketAddress getRpcServerAddress(Configuration conf) throws IOException {
     String addr = conf.get(BN_ADDRESS_NAME_KEY, BN_ADDRESS_DEFAULT);
     return NetUtils.createSocketAddr(addr);
   }
   
   @Override
-  protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) {
+  protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) throws IOException {
     String addr = conf.get(BN_SERVICE_RPC_ADDRESS_KEY);
     if (addr == null || addr.isEmpty()) {
       return null;
@@ -135,6 +134,11 @@ public class BackupNode extends NameNode
                  CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
     NamespaceInfo nsInfo = handshake(conf);
     super.initialize(conf);
+    // Backup node should never do lease recovery,
+    // therefore lease hard limit should never expire.
+    namesystem.leaseManager.setLeasePeriod(
+        HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE);
+    
     clusterId = nsInfo.getClusterID();
     blockPoolId = nsInfo.getBlockPoolID();
 
@@ -368,9 +372,4 @@ public class BackupNode extends NameNode
       throw new UnsupportedActionException(msg);
     }
   }
-  
-  @Override
-  protected String getNameServiceId(Configuration conf) {
-    return DFSUtil.getBackupNameServiceId(conf);
-  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Thu Sep 29 00:33:34 2011
@@ -120,7 +120,7 @@ public class FSDirectory implements Clos
     this.cond = dirLock.writeLock().newCondition();
     rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
         ns.createFsOwnerPermissions(new FsPermission((short)0755)),
-        Long.MAX_VALUE, UNKNOWN_DISK_SPACE);
+        Integer.MAX_VALUE, UNKNOWN_DISK_SPACE);
     this.fsImage = fsImage;
     int configuredLimit = conf.getInt(
         DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Sep 29 00:33:34 2011
@@ -130,6 +130,7 @@ import org.apache.hadoop.security.token.
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
 import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.VersionInfo;
 import org.mortbay.util.ajax.JSON;
 
@@ -346,30 +347,28 @@ public class FSNamesystem implements Nam
     dir.imageLoadComplete();
   }
 
-  void startSecrectManager() throws IOException {
+  void activateSecretManager() throws IOException {
     if (dtSecretManager != null) {
       dtSecretManager.startThreads();
     }
   }
   
-  void stopSecretManager() {
-    if (dtSecretManager != null) {
-      dtSecretManager.stopThreads();
-    }
-  }
-  
-  /** 
-   * Start services common to both active and standby states
-   * @throws IOException
+  /**
+   * Activate FSNamesystem daemons.
    */
-  void startCommonServices(Configuration conf) throws IOException {
+  void activate(Configuration conf) throws IOException {
     this.registerMBean(); // register the MBean for the FSNamesystemState
+
     writeLock();
     try {
       nnResourceChecker = new NameNodeResourceChecker(conf);
       checkAvailableResources();
+
       setBlockTotal();
       blockManager.activate(conf);
+
+      this.lmthread = new Daemon(leaseManager.new Monitor());
+      lmthread.start();
       this.nnrmthread = new Daemon(new NameNodeResourceMonitor());
       nnrmthread.start();
     } finally {
@@ -379,70 +378,7 @@ public class FSNamesystem implements Nam
     registerMXBean();
     DefaultMetricsSystem.instance().register(this);
   }
-  
-  /** 
-   * Stop services common to both active and standby states
-   * @throws IOException
-   */
-  void stopCommonServices() {
-    writeLock();
-    try {
-      if (blockManager != null) blockManager.close();
-      if (nnrmthread != null) nnrmthread.interrupt();
-    } finally {
-      writeUnlock();
-    }
-  }
-  
-  /**
-   * Start services required in active state
-   * @throws IOException
-   */
-  void startActiveServices() throws IOException {
-    LOG.info("Starting services required for active state");
-    writeLock();
-    try {
-      startSecrectManager();
-      lmthread = new Daemon(leaseManager.new Monitor());
-      lmthread.start();
-    } finally {
-      writeUnlock();
-    }
-  }
-  
-  /** 
-   * Start services required in active state 
-   * @throws InterruptedException
-   */
-  void stopActiveServices() {
-    LOG.info("Stopping services started for active state");
-    writeLock();
-    try {
-      stopSecretManager();
-      if (lmthread != null) {
-        try {
-          lmthread.interrupt();
-          lmthread.join(3000);
-        } catch (InterruptedException ie) {
-          LOG.warn("Encountered exception ", ie);
-        }
-        lmthread = null;
-      }
-    } finally {
-      writeUnlock();
-    }
-  }
-  
-  /** Start services required in standby state */
-  void startStandbyServices() {
-    LOG.info("Starting services required for standby state");
-  }
 
-  /** Stop services required in standby state */
-  void stopStandbyServices() {
-    LOG.info("Stopping services started for standby state");
-  }
-  
   public static Collection<URI> getNamespaceDirs(Configuration conf) {
     return getStorageDirs(conf, DFS_NAMENODE_NAME_DIR_KEY);
   }
@@ -566,7 +502,7 @@ public class FSNamesystem implements Nam
   }
 
   /**
-   * Version of @see #getNamespaceInfo() that is not protected by a lock.
+   * Version of {@see #getNamespaceInfo()} that is not protected by a lock.
    */
   NamespaceInfo unprotectedGetNamespaceInfo() {
     return new NamespaceInfo(dir.fsImage.getStorage().getNamespaceID(),
@@ -583,16 +519,23 @@ public class FSNamesystem implements Nam
   void close() {
     fsRunning = false;
     try {
-      stopCommonServices();
+      if (blockManager != null) blockManager.close();
       if (smmthread != null) smmthread.interrupt();
+      if (dtSecretManager != null) dtSecretManager.stopThreads();
+      if (nnrmthread != null) nnrmthread.interrupt();
+    } catch (Exception e) {
+      LOG.warn("Exception shutting down FSNamesystem", e);
     } finally {
       // using finally to ensure we also wait for lease daemon
       try {
-        stopActiveServices();
-        stopStandbyServices();
+        if (lmthread != null) {
+          lmthread.interrupt();
+          lmthread.join(3000);
+        }
         if (dir != null) {
           dir.close();
         }
+      } catch (InterruptedException ie) {
       } catch (IOException ie) {
         LOG.error("Error closing FSDirectory", ie);
         IOUtils.cleanup(LOG, dir);
@@ -621,6 +564,11 @@ public class FSNamesystem implements Nam
       out.println(totalInodes + " files and directories, " + totalBlocks
           + " blocks = " + (totalInodes + totalBlocks) + " total");
 
+      final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+      final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+      blockManager.getDatanodeManager().fetchDatanodes(live, dead, false);
+      out.println("Live Datanodes: "+live.size());
+      out.println("Dead Datanodes: "+dead.size());
       blockManager.metaSave(out);
 
       out.flush();
@@ -1443,7 +1391,7 @@ public class FSNamesystem implements Nam
     try {
       lb = startFileInternal(src, null, holder, clientMachine, 
                         EnumSet.of(CreateFlag.APPEND), 
-                        false, blockManager.maxReplication, 0);
+                        false, blockManager.maxReplication, (long)0);
     } finally {
       writeUnlock();
     }
@@ -1526,7 +1474,7 @@ public class FSNamesystem implements Nam
       fileLength = pendingFile.computeContentSummary().getLength();
       blockSize = pendingFile.getPreferredBlockSize();
       clientNode = pendingFile.getClientNode();
-      replication = pendingFile.getReplication();
+      replication = (int)pendingFile.getReplication();
     } finally {
       writeUnlock();
     }
@@ -2321,7 +2269,7 @@ public class FSNamesystem implements Nam
   }
   
   Lease reassignLeaseInternal(Lease lease, String src, String newHolder,
-      INodeFileUnderConstruction pendingFile) {
+      INodeFileUnderConstruction pendingFile) throws IOException {
     assert hasWriteLock();
     pendingFile.setClientName(newHolder);
     return leaseManager.reassignLease(lease, src, newHolder);
@@ -2926,9 +2874,13 @@ public class FSNamesystem implements Nam
      * @return true if in safe mode
      */
     private synchronized boolean isOn() {
-      assert isConsistent() : " SafeMode: Inconsistent filesystem state: "
-        + "Total num of blocks, active blocks, or "
-        + "total safe blocks don't match.";
+      try {
+        assert isConsistent() : " SafeMode: Inconsistent filesystem state: "
+          + "Total num of blocks, active blocks, or "
+          + "total safe blocks don't match.";
+      } catch(IOException e) {
+        System.err.print(StringUtils.stringifyException(e));
+      }
       return this.reached >= 0;
     }
       
@@ -3082,7 +3034,7 @@ public class FSNamesystem implements Nam
       this.blockTotal = total;
       this.blockThreshold = (int) (blockTotal * threshold);
       this.blockReplQueueThreshold = 
-        (int) (blockTotal * replQueueThreshold);
+        (int) (((double) blockTotal) * replQueueThreshold);
       checkMode();
     }
       
@@ -3092,7 +3044,7 @@ public class FSNamesystem implements Nam
      * @param replication current replication 
      */
     private synchronized void incrementSafeBlockCount(short replication) {
-      if (replication == safeReplication)
+      if ((int)replication == safeReplication)
         this.blockSafe++;
       checkMode();
     }
@@ -3225,7 +3177,7 @@ public class FSNamesystem implements Nam
      * Checks consistency of the class state.
      * This is costly and currently called only in assert.
      */
-    private boolean isConsistent() {
+    private boolean isConsistent() throws IOException {
       if (blockTotal == -1 && blockSafe == -1) {
         return true; // manual safe mode
       }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Thu Sep 29 00:33:34 2011
@@ -27,7 +27,6 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HealthCheckFailedException;
@@ -38,13 +37,15 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Trash;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.namenode.ha.ActiveState;
-import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
@@ -53,6 +54,9 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NodeRegistration;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
@@ -167,18 +171,19 @@ public class NameNode {
     }
   }
     
+
+
   public static final int DEFAULT_PORT = 8020;
+
   public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
   public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange");
   public static final HAState ACTIVE_STATE = new ActiveState();
   public static final HAState STANDBY_STATE = new StandbyState();
   
   protected FSNamesystem namesystem; 
-  protected final Configuration conf;
   protected NamenodeRole role;
   private HAState state;
   private final boolean haEnabled;
-  private final HAContext haContext;
 
   
   /** httpServer */
@@ -307,11 +312,12 @@ public class NameNode {
    * Given a configuration get the address of the service rpc server
    * If the service rpc is not configured returns null
    */
-  protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) {
+  protected InetSocketAddress getServiceRpcServerAddress(Configuration conf)
+    throws IOException {
     return NameNode.getServiceAddress(conf, false);
   }
 
-  protected InetSocketAddress getRpcServerAddress(Configuration conf) {
+  protected InetSocketAddress getRpcServerAddress(Configuration conf) throws IOException {
     return getAddress(conf);
   }
   
@@ -374,6 +380,7 @@ public class NameNode {
    * @param conf the configuration
    */
   protected void initialize(Configuration conf) throws IOException {
+    initializeGenericKeys(conf);
     UserGroupInformation.setConfiguration(conf);
     loginAsNameNodeUser(conf);
 
@@ -389,7 +396,7 @@ public class NameNode {
       throw e;
     }
 
-    startCommonServices(conf);
+    activate(conf);
   }
   
   /**
@@ -423,10 +430,19 @@ public class NameNode {
     } 
   }
 
-  /** Start the services common to active and standby states */
-  private void startCommonServices(Configuration conf) throws IOException {
-    namesystem.startCommonServices(conf);
+  /**
+   * Activate name-node servers and threads.
+   */
+  void activate(Configuration conf) throws IOException {
+    if ((isRole(NamenodeRole.NAMENODE))
+        && (UserGroupInformation.isSecurityEnabled())) {
+      namesystem.activateSecretManager();
+    }
+    namesystem.activate(conf);
+    startHttpServer(conf);
     rpcServer.start();
+    startTrashEmptier(conf);
+    
     plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
         ServicePlugin.class);
     for (ServicePlugin p: plugins) {
@@ -436,29 +452,13 @@ public class NameNode {
         LOG.warn("ServicePlugin " + p + " could not be started", t);
       }
     }
+    
     LOG.info(getRole() + " up at: " + rpcServer.getRpcAddress());
     if (rpcServer.getServiceRpcAddress() != null) {
-      LOG.info(getRole() + " service server is up at: "
-          + rpcServer.getServiceRpcAddress());
+      LOG.info(getRole() + " service server is up at: " + rpcServer.getServiceRpcAddress()); 
     }
-    startHttpServer(conf);
   }
-  
-  private void stopCommonServices() {
-    if(namesystem != null) namesystem.close();
-    if(rpcServer != null) rpcServer.stop();
-    if (plugins != null) {
-      for (ServicePlugin p : plugins) {
-        try {
-          p.stop();
-        } catch (Throwable t) {
-          LOG.warn("ServicePlugin " + p + " could not be stopped", t);
-        }
-      }
-    }   
-    stopHttpServer();
-  }
-  
+
   private void startTrashEmptier(Configuration conf) throws IOException {
     long trashInterval 
       = conf.getLong(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, 
@@ -470,26 +470,11 @@ public class NameNode {
     this.emptier.start();
   }
   
-  private void stopTrashEmptier() {
-    if (this.emptier != null) {
-      emptier.interrupt();
-      emptier = null;
-    }
-  }
-  
   private void startHttpServer(final Configuration conf) throws IOException {
     httpServer = new NameNodeHttpServer(conf, this, getHttpServerAddress(conf));
     httpServer.start();
     setHttpServerAddress(conf);
   }
-  
-  private void stopHttpServer() {
-    try {
-      if (httpServer != null) httpServer.stop();
-    } catch (Exception e) {
-      LOG.error("Exception while stopping httpserver", e);
-    }
-  }
 
   /**
    * Start NameNode.
@@ -516,36 +501,22 @@ public class NameNode {
    * <code>zero</code> in the conf.
    * 
    * @param conf  confirguration
-   * @throws IOException on error
+   * @throws IOException
    */
   public NameNode(Configuration conf) throws IOException {
     this(conf, NamenodeRole.NAMENODE);
   }
 
   protected NameNode(Configuration conf, NamenodeRole role) 
-      throws IOException {
-    this.conf = conf;
+      throws IOException { 
     this.role = role;
     this.haEnabled = DFSUtil.isHAEnabled(conf);
-    this.haContext = new NameNodeHAContext();
+    this.state = !haEnabled ? ACTIVE_STATE : STANDBY_STATE;
     try {
-      initializeGenericKeys(conf, getNameServiceId(conf));
       initialize(conf);
-      if (!haEnabled) {
-        state = ACTIVE_STATE;
-      } else {
-        state = STANDBY_STATE;;
-      }
-      state.enterState(haContext);
     } catch (IOException e) {
       this.stop();
       throw e;
-    } catch (ServiceFailedException e) {
-      this.stop();
-      throw new IOException("Service failed to start", e);
-    } catch (HadoopIllegalArgumentException e) {
-      this.stop();
-      throw e;
     }
   }
 
@@ -557,7 +528,6 @@ public class NameNode {
     try {
       this.rpcServer.join();
     } catch (InterruptedException ie) {
-      LOG.info("Caught interrupted exception " + ie);
     }
   }
 
@@ -570,12 +540,23 @@ public class NameNode {
         return;
       stopRequested = true;
     }
+    if (plugins != null) {
+      for (ServicePlugin p : plugins) {
+        try {
+          p.stop();
+        } catch (Throwable t) {
+          LOG.warn("ServicePlugin " + p + " could not be stopped", t);
+        }
+      }
+    }
     try {
-      state.exitState(haContext);
-    } catch (ServiceFailedException e) {
-      LOG.info("Encountered exception while exiting state " + e);
+      if (httpServer != null) httpServer.stop();
+    } catch (Exception e) {
+      LOG.error("Exception while stopping httpserver", e);
     }
-    stopCommonServices();
+    if(namesystem != null) namesystem.close();
+    if(emptier != null) emptier.interrupt();
+    if(rpcServer != null) rpcServer.stop();
     if (metrics != null) {
       metrics.shutdown();
     }
@@ -840,16 +821,16 @@ public class NameNode {
    * @param conf
    *          Configuration object to lookup specific key and to set the value
    *          to the key passed. Note the conf object is modified
-   * @param nameserviceId name service Id
    * @see DFSUtil#setGenericConf(Configuration, String, String...)
    */
-  public static void initializeGenericKeys(Configuration conf, String
-      nameserviceId) {
+  public static void initializeGenericKeys(Configuration conf) {
+    final String nameserviceId = DFSUtil.getNameServiceId(conf);
     if ((nameserviceId == null) || nameserviceId.isEmpty()) {
       return;
     }
     
     DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS);
+    
     if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
       URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
           + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
@@ -857,14 +838,6 @@ public class NameNode {
     }
   }
     
-  /** 
-   * Get the name service Id for the node
-   * @return name service Id or null if federation is not configured
-   */
-  protected String getNameServiceId(Configuration conf) {
-    return DFSUtil.getNamenodeNameServiceId(conf);
-  }
-  
   /**
    */
   public static void main(String argv[]) throws Exception {
@@ -891,56 +864,27 @@ public class NameNode {
     if (!haEnabled) {
       throw new ServiceFailedException("HA for namenode is not enabled");
     }
-    state.setState(haContext, ACTIVE_STATE);
+    state.setState(this, ACTIVE_STATE);
   }
   
   synchronized void transitionToStandby() throws ServiceFailedException {
     if (!haEnabled) {
       throw new ServiceFailedException("HA for namenode is not enabled");
     }
-    state.setState(haContext, STANDBY_STATE);
+    state.setState(this, STANDBY_STATE);
   }
   
   /** Check if an operation of given category is allowed */
   protected synchronized void checkOperation(final OperationCategory op)
       throws UnsupportedActionException {
-    state.checkOperation(haContext, op);
+    state.checkOperation(this, op);
   }
   
-  /**
-   * Class used as expose {@link NameNode} as context to {@link HAState}
-   */
-  private class NameNodeHAContext implements HAContext {
-    @Override
-    public void setState(HAState s) {
-      state = s;
-    }
-
-    @Override
-    public HAState getState() {
-      return state;
-    }
-
-    @Override
-    public void startActiveServices() throws IOException {
-      namesystem.startActiveServices();
-      startTrashEmptier(conf);
-    }
-
-    @Override
-    public void stopActiveServices() throws IOException {
-      namesystem.stopActiveServices();
-      stopTrashEmptier();
-    }
-
-    @Override
-    public void startStandbyServices() throws IOException {
-      // TODO:HA Start reading editlog from active
-    }
-
-    @Override
-    public void stopStandbyServices() throws IOException {
-      // TODO:HA Stop reading editlog from active
-    }
+  public synchronized HAState getState() {
+    return state;
+  }
+  
+  public synchronized void setState(final HAState s) {
+    state = s;
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Thu Sep 29 00:33:34 2011
@@ -66,7 +66,6 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
-import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -146,17 +145,10 @@ class NameNodeRpcServer implements Namen
       serviceRpcServer = null;
       serviceRPCAddress = null;
     }
-    // Add all the RPC protocols that the namenode implements
-    this.server = RPC.getServer(ClientProtocol.class, this,
+    this.server = RPC.getServer(NamenodeProtocols.class, this,
                                 socAddr.getHostName(), socAddr.getPort(),
                                 handlerCount, false, conf, 
                                 namesystem.getDelegationTokenSecretManager());
-    this.server.addProtocol(DatanodeProtocol.class, this);
-    this.server.addProtocol(NamenodeProtocol.class, this);
-    this.server.addProtocol(RefreshAuthorizationPolicyProtocol.class, this);
-    this.server.addProtocol(RefreshUserMappingsProtocol.class, this);
-    this.server.addProtocol(GetUserMappingsProtocol.class, this);
-    
 
     // set service-level authorization security policy
     if (serviceAuthEnabled =
@@ -979,11 +971,8 @@ class NameNodeRpcServer implements Namen
   }
 
   private static String getClientMachine() {
-    String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
-    if (clientMachine == null) { //not a web client
-      clientMachine = Server.getRemoteAddress();
-    }
-    if (clientMachine == null) { //not a RPC client
+    String clientMachine = Server.getRemoteAddress();
+    if (clientMachine == null) {
       clientMachine = "";
     }
     return clientMachine;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Thu Sep 29 00:33:34 2011
@@ -38,12 +38,10 @@ import org.apache.commons.cli.ParseExcep
 import org.apache.commons.cli.PosixParser;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
-
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -175,17 +173,12 @@ public class SecondaryNameNode implement
   public SecondaryNameNode(Configuration conf,
       CommandLineOpts commandLineOpts) throws IOException {
     try {
-      NameNode.initializeGenericKeys(conf,
-          DFSUtil.getSecondaryNameServiceId(conf));
+      NameNode.initializeGenericKeys(conf);
       initialize(conf, commandLineOpts);
     } catch(IOException e) {
       shutdown();
       LOG.fatal("Failed to start secondary namenode. ", e);
       throw e;
-    } catch(HadoopIllegalArgumentException e) {
-      shutdown();
-      LOG.fatal("Failed to start secondary namenode. ", e);
-      throw e;
     }
   }
   

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ActiveState.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ActiveState.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ActiveState.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ActiveState.java Thu Sep 29 00:33:34 2011
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import java.io.IOException;
-
 import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
@@ -35,35 +33,27 @@ public class ActiveState extends HAState
   }
 
   @Override
-  public void checkOperation(HAContext context, OperationCategory op)
+  public void checkOperation(NameNode nn, OperationCategory op)
       throws UnsupportedActionException {
     return; // Other than journal all operations are allowed in active state
   }
   
   @Override
-  public void setState(HAContext context, HAState s) throws ServiceFailedException {
+  public void setState(NameNode nn, HAState s) throws ServiceFailedException {
     if (s == NameNode.STANDBY_STATE) {
-      setStateInternal(context, s);
+      setStateInternal(nn, s);
       return;
     }
-    super.setState(context, s);
+    super.setState(nn, s);
   }
 
   @Override
-  public void enterState(HAContext context) throws ServiceFailedException {
-    try {
-      context.startActiveServices();
-    } catch (IOException e) {
-      throw new ServiceFailedException("Failed to start active services", e);
-    }
+  protected void enterState(NameNode nn) throws ServiceFailedException {
+    // TODO:HA
   }
 
   @Override
-  public void exitState(HAContext context) throws ServiceFailedException {
-    try {
-      context.stopActiveServices();
-    } catch (IOException e) {
-      throw new ServiceFailedException("Failed to stop active services", e);
-    }
+  protected void exitState(NameNode nn) throws ServiceFailedException {
+    // TODO:HA
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/HAState.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/HAState.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/HAState.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/HAState.java Thu Sep 29 00:33:34 2011
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.na
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
 
@@ -43,38 +44,38 @@ abstract public class HAState {
    * @param s new state
    * @throws ServiceFailedException on failure to transition to new state.
    */
-  protected final void setStateInternal(final HAContext context, final HAState s)
+  protected final void setStateInternal(final NameNode nn, final HAState s)
       throws ServiceFailedException {
-    exitState(context);
-    context.setState(s);
-    s.enterState(context);
+    exitState(nn);
+    nn.setState(s);
+    s.enterState(nn);
   }
 
   /**
    * Method to be overridden by subclasses to perform steps necessary for
    * entering a state.
-   * @param context HA context
+   * @param nn Namenode
    * @throws ServiceFailedException on failure to enter the state.
    */
-  public abstract void enterState(final HAContext context)
+  protected abstract void enterState(final NameNode nn)
       throws ServiceFailedException;
 
   /**
    * Method to be overridden by subclasses to perform steps necessary for
    * exiting a state.
-   * @param context HA context
+   * @param nn Namenode
    * @throws ServiceFailedException on failure to enter the state.
    */
-  public abstract void exitState(final HAContext context)
+  protected abstract void exitState(final NameNode nn)
       throws ServiceFailedException;
 
   /**
    * Move from the existing state to a new state
-   * @param context HA context
+   * @param nn Namenode
    * @param s new state
    * @throws ServiceFailedException on failure to transition to new state.
    */
-  public void setState(HAContext context, HAState s) throws ServiceFailedException {
+  public void setState(NameNode nn, HAState s) throws ServiceFailedException {
     if (this == s) { // Aleady in the new state
       return;
     }
@@ -84,15 +85,15 @@ abstract public class HAState {
   
   /**
    * Check if an operation is supported in a given state.
-   * @param context HA context
+   * @param nn Namenode
    * @param op Type of the operation.
    * @throws UnsupportedActionException if a given type of operation is not
    *           supported in this state.
    */
-  public void checkOperation(final HAContext context, final OperationCategory op)
+  public void checkOperation(final NameNode nn, final OperationCategory op)
       throws UnsupportedActionException {
     String msg = "Operation category " + op + " is not supported in state "
-        + context.getState();
+        + nn.getState();
     throw new UnsupportedActionException(msg);
   }
   

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java Thu Sep 29 00:33:34 2011
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import java.io.IOException;
-
 import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 
@@ -39,30 +37,22 @@ public class StandbyState extends HAStat
   }
 
   @Override
-  public void setState(HAContext context, HAState s) throws ServiceFailedException {
+  public void setState(NameNode nn, HAState s) throws ServiceFailedException {
     if (s == NameNode.ACTIVE_STATE) {
-      setStateInternal(context, s);
+      setStateInternal(nn, s);
       return;
     }
-    super.setState(context, s);
+    super.setState(nn, s);
   }
 
   @Override
-  public void enterState(HAContext context) throws ServiceFailedException {
-    try {
-      context.startStandbyServices();
-    } catch (IOException e) {
-      throw new ServiceFailedException("Failed to start standby services", e);
-    }
+  protected void enterState(NameNode nn) throws ServiceFailedException {
+    // TODO:HA
   }
 
   @Override
-  public void exitState(HAContext context) throws ServiceFailedException {
-    try {
-      context.stopStandbyServices();
-    } catch (IOException e) {
-      throw new ServiceFailedException("Failed to stop standby services", e);
-    }
+  protected void exitState(NameNode nn) throws ServiceFailedException {
+    // TODO:HA
   }
 }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Thu Sep 29 00:33:34 2011
@@ -57,7 +57,6 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.web.JsonUtil;
-import org.apache.hadoop.hdfs.web.ParamFilter;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
@@ -79,7 +78,6 @@ import org.apache.hadoop.hdfs.web.resour
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
 import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
-import org.apache.hadoop.hdfs.web.resources.RenewerParam;
 import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
 import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
@@ -91,20 +89,10 @@ import org.apache.hadoop.security.UserGr
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 
-import com.sun.jersey.spi.container.ResourceFilters;
-
 /** Web-hdfs NameNode implementation. */
 @Path("")
-@ResourceFilters(ParamFilter.class)
 public class NamenodeWebHdfsMethods {
-  public static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class);
-
-  private static final ThreadLocal<String> REMOTE_ADDRESS = new ThreadLocal<String>(); 
-
-  /** @return the remote client address. */
-  public static String getRemoteAddress() {
-    return REMOTE_ADDRESS.get();
-  }
+  private static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class);
 
   private @Context ServletContext context;
   private @Context HttpServletRequest request;
@@ -227,8 +215,6 @@ public class NamenodeWebHdfsMethods {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException, URISyntaxException {
-        REMOTE_ADDRESS.set(request.getRemoteAddr());
-        try {
 
     final String fullpath = path.getAbsolutePath();
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
@@ -286,10 +272,6 @@ public class NamenodeWebHdfsMethods {
     default:
       throw new UnsupportedOperationException(op + " is not supported");
     }
-
-        } finally {
-          REMOTE_ADDRESS.set(null);
-        }
       }
     });
   }
@@ -319,8 +301,6 @@ public class NamenodeWebHdfsMethods {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException, URISyntaxException {
-        REMOTE_ADDRESS.set(request.getRemoteAddr());
-        try {
 
     final String fullpath = path.getAbsolutePath();
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
@@ -335,10 +315,6 @@ public class NamenodeWebHdfsMethods {
     default:
       throw new UnsupportedOperationException(op + " is not supported");
     }
-
-        } finally {
-          REMOTE_ADDRESS.set(null);
-        }
       }
     });
   }
@@ -359,12 +335,10 @@ public class NamenodeWebHdfsMethods {
           final OffsetParam offset,
       @QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
           final LengthParam length,
-      @QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
-          final RenewerParam renewer,
       @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
           final BufferSizeParam bufferSize
       ) throws IOException, URISyntaxException, InterruptedException {
-    return get(ugi, delegation, ROOT, op, offset, length, renewer, bufferSize);
+    return get(ugi, delegation, ROOT, op, offset, length, bufferSize);
   }
 
   /** Handle HTTP GET request. */
@@ -382,23 +356,19 @@ public class NamenodeWebHdfsMethods {
           final OffsetParam offset,
       @QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
           final LengthParam length,
-      @QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
-          final RenewerParam renewer,
       @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
           final BufferSizeParam bufferSize
       ) throws IOException, URISyntaxException, InterruptedException {
 
     if (LOG.isTraceEnabled()) {
       LOG.trace(op + ": " + path + ", ugi=" + ugi
-          + Param.toSortedString(", ", offset, length, renewer, bufferSize));
+          + Param.toSortedString(", ", offset, length, bufferSize));
     }
 
 
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException, URISyntaxException {
-        REMOTE_ADDRESS.set(request.getRemoteAddr());
-        try {
 
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
     final String fullpath = path.getAbsolutePath();
@@ -411,15 +381,6 @@ public class NamenodeWebHdfsMethods {
           op.getValue(), offset.getValue(), offset, length, bufferSize);
       return Response.temporaryRedirect(uri).build();
     }
-    case GETFILEBLOCKLOCATIONS:
-    {
-      final long offsetValue = offset.getValue();
-      final Long lengthValue = length.getValue();
-      final LocatedBlocks locatedblocks = np.getBlockLocations(fullpath,
-          offsetValue, lengthValue != null? lengthValue: offsetValue + 1);
-      final String js = JsonUtil.toJsonString(locatedblocks);
-      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
-    }
     case GETFILESTATUS:
     {
       final HdfsFileStatus status = np.getFileInfo(fullpath);
@@ -431,20 +392,9 @@ public class NamenodeWebHdfsMethods {
       final StreamingOutput streaming = getListingStream(np, fullpath);
       return Response.ok(streaming).type(MediaType.APPLICATION_JSON).build();
     }
-    case GETDELEGATIONTOKEN:
-    {
-      final Token<? extends TokenIdentifier> token = generateDelegationToken(
-          namenode, ugi, renewer.getValue());
-      final String js = JsonUtil.toJsonString(token);
-      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
-    }
     default:
       throw new UnsupportedOperationException(op + " is not supported");
     }    
-
-        } finally {
-          REMOTE_ADDRESS.set(null);
-        }
       }
     });
   }
@@ -512,9 +462,6 @@ public class NamenodeWebHdfsMethods {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException {
-        REMOTE_ADDRESS.set(request.getRemoteAddr());
-        try {
-
         final NameNode namenode = (NameNode)context.getAttribute("name.node");
         final String fullpath = path.getAbsolutePath();
 
@@ -528,10 +475,6 @@ public class NamenodeWebHdfsMethods {
         default:
           throw new UnsupportedOperationException(op + " is not supported");
         }
-
-        } finally {
-          REMOTE_ADDRESS.set(null);
-        }
       }
     });
   }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1177127&r1=1177126&r2=1177127&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Thu Sep 29 00:33:34 2011
@@ -149,9 +149,7 @@ public class DelegationTokenFetcher {
                 DataInputStream in = new DataInputStream(
                     new ByteArrayInputStream(token.getIdentifier()));
                 id.readFields(in);
-                if(LOG.isDebugEnabled()) {
-                  LOG.debug("Token (" + id + ") for " + token.getService());
-                }
+                System.out.println("Token (" + id + ") for " + token.getService());
               }
               return null;
             }
@@ -162,28 +160,22 @@ public class DelegationTokenFetcher {
                 for (Token<?> token : readTokens(tokenFile, conf)) {
                   result = renewDelegationToken(webUrl,
                       (Token<DelegationTokenIdentifier>) token);
-                  if(LOG.isDebugEnabled()) {
-                	  LOG.debug("Renewed token via " + webUrl + " for "
-                          + token.getService() + " until: " + new Date(result));
-                  }
+                  System.out.println("Renewed token via " + webUrl + " for "
+                      + token.getService() + " until: " + new Date(result));
                 }
               } else if (cancel) {
                 for (Token<?> token : readTokens(tokenFile, conf)) {
                   cancelDelegationToken(webUrl,
                       (Token<DelegationTokenIdentifier>) token);
-                  if(LOG.isDebugEnabled()) {
-                    LOG.debug("Cancelled token via " + webUrl + " for "
-                	    + token.getService());
-                  }
+                  System.out.println("Cancelled token via " + webUrl + " for "
+                      + token.getService());
                 }
               } else {
                 Credentials creds = getDTfromRemote(webUrl, renewer);
                 creds.writeTokenStorageFile(tokenFile, conf);
                 for (Token<?> token : creds.getAllTokens()) {
-                  if(LOG.isDebugEnabled()) {	
-                    LOG.debug("Fetched token via " + webUrl + " for "
-                        + token.getService() + " into " + tokenFile);
-                  }
+                  System.out.println("Fetched token via " + webUrl + " for "
+                      + token.getService() + " into " + tokenFile);
                 }
               }
             } else {
@@ -192,30 +184,24 @@ public class DelegationTokenFetcher {
                 for (Token<?> token : readTokens(tokenFile, conf)) {
                   ((DistributedFileSystem) fs)
                       .cancelDelegationToken((Token<DelegationTokenIdentifier>) token);
-                  if(LOG.isDebugEnabled()) {
-                    LOG.debug("Cancelled token for "
-                        + token.getService());
-                  }
+                  System.out.println("Cancelled token for "
+                      + token.getService());
                 }
               } else if (renew) {
                 long result;
                 for (Token<?> token : readTokens(tokenFile, conf)) {
                   result = ((DistributedFileSystem) fs)
                       .renewDelegationToken((Token<DelegationTokenIdentifier>) token);
-                  if(LOG.isDebugEnabled()) {
-                    LOG.debug("Renewed token for " + token.getService()
-                        + " until: " + new Date(result));
-                  }
+                  System.out.println("Renewed token for " + token.getService()
+                      + " until: " + new Date(result));
                 }
               } else {
                 Token<?> token = fs.getDelegationToken(renewer);
                 Credentials cred = new Credentials();
                 cred.addToken(token.getService(), token);
                 cred.writeTokenStorageFile(tokenFile, conf);
-                if(LOG.isDebugEnabled()) {
-                  LOG.debug("Fetched token for " + token.getService()
-                      + " into " + tokenFile);
-                }
+                System.out.println("Fetched token for " + token.getService()
+                    + " into " + tokenFile);
               }
             }
             return null;
@@ -235,11 +221,6 @@ public class DelegationTokenFetcher {
       } else {
         url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC);
       }
-      
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Retrieving token from: " + url);
-      }
-      
       URL remoteURL = new URL(url.toString());
       SecurityUtil.fetchServiceTicket(remoteURL);
       URLConnection connection = remoteURL.openConnection();



Mime
View raw message