hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cnaur...@apache.org
Subject svn commit: r1548386 [1/2] - in /hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs: ./ src/contrib/libwebhdfs/src/ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apac...
Date Fri, 06 Dec 2013 06:57:18 GMT
Author: cnauroth
Date: Fri Dec  6 06:57:15 2013
New Revision: 1548386

URL: http://svn.apache.org/r1548386
Log:
Merge trunk to HDFS-4685.

Added:
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java
      - copied unchanged from r1548385, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolIterator.java
      - copied unchanged from r1548385, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolIterator.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
      - copied unchanged from r1548385, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServer.java
      - copied unchanged from r1548385, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServer.java
Modified:
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.c
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/   (props changed)
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1547224-1548385

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Dec  6 06:57:15 2013
@@ -228,6 +228,11 @@ Trunk (Unreleased)
 
     HDFS-5430. Support TTL on CacheDirectives. (wang)
 
+    HDFS-5536. Implement HTTP policy for Namenode and DataNode. (Haohui Mai via
+    jing9)
+
+    HDFS-5630. Hook up cache directive and pool usage statistics. (wang)
+
   OPTIMIZATIONS
     HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
 
@@ -421,6 +426,11 @@ Trunk (Unreleased)
     HDFS-5562. TestCacheDirectives and TestFsDatasetCache should stub out
     native mlock. (Colin McCabe and Akira Ajisaka via wang)
 
+    HDFS-5555. CacheAdmin commands fail when first listed NameNode is in
+    Standby (jxiang via cmccabe)
+
+    HDFS-5626. dfsadmin -report shows incorrect cache values. (cmccabe)
+
 Release 2.4.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -449,6 +459,8 @@ Release 2.4.0 - UNRELEASED
     HDFS-5444. Choose default web UI based on browser capabilities. (Haohui Mai
     via jing9)
 
+    HDFS-5514. FSNamesystem's fsLock should allow custom implementation (daryn)
+
   IMPROVEMENTS
 
     HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)
@@ -564,6 +576,8 @@ Release 2.4.0 - UNRELEASED
     HDFS-5581. NameNodeFsck should use only one instance of
     BlockPlacementPolicy. (vinay via cmccabe)
 
+    HDFS-5633. Improve OfflineImageViewer to use less memory. (jing9)
+
   OPTIMIZATIONS
 
     HDFS-5239.  Allow FSNamesystem lock fairness to be configurable (daryn)
@@ -767,6 +781,14 @@ Release 2.3.0 - UNRELEASED
     HDFS-5563. NFS gateway should commit the buffered data when read request comes
     after write to the same file (brandonli)
 
+    HDFS-4997. libhdfs doesn't return correct error codes in most cases (cmccabe)
+
+    HDFS-5587. add debug information when NFS fails to start with duplicate user
+    or group names (brandonli)
+
+    HDFS-5590. Block ID and generation stamp may be reused when persistBlocks is 
+    set to false. (jing9)
+
 Release 2.2.0 - 2013-10-13
 
   INCOMPATIBLE CHANGES
@@ -4063,6 +4085,9 @@ Release 0.23.10 - UNRELEASED
     HDFS-5557. Write pipeline recovery for the last packet in the block may
     cause rejection of valid replicas. (kihwal)
 
+    HDFS-5558. LeaseManager monitor thread can crash if the last block is
+    complete but another block is not. (kihwal)
+
 Release 0.23.9 - 2013-07-08
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.c?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.c (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.c Fri Dec  6 06:57:15 2013
@@ -39,14 +39,6 @@ struct jsonException {
   const char *message;
 };
 
-static void dotsToSlashes(char *str)
-{
-    for (; *str != '\0'; str++) {
-        if (*str == '.')
-            *str = '/';
-    }
-}
-
 /** Print out the JSON exception information */
 static int printJsonExceptionV(struct jsonException *exc, int noPrintFlags,
                                const char *fmt, va_list ap)
@@ -62,7 +54,6 @@ static int printJsonExceptionV(struct js
         fprintf(stderr, "printJsonExceptionV: internal out of memory error\n");
         return EINTERNAL;
     }
-    dotsToSlashes(javaClassName);
     getExceptionInfo(javaClassName, noPrintFlags, &excErrno, &shouldPrint);
     free(javaClassName);
     

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1547224-1548385

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Fri Dec  6 06:57:15 2013
@@ -109,8 +109,10 @@ import org.apache.hadoop.hdfs.client.Cli
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolIterator;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -2324,12 +2326,7 @@ public class DFSClient implements java.i
   
   public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
       CacheDirectiveInfo filter) throws IOException {
-    checkOpen();
-    try {
-      return namenode.listCacheDirectives(0, filter);
-    } catch (RemoteException re) {
-      throw re.unwrapRemoteException();
-    }
+    return new CacheDirectiveIterator(namenode, filter);
   }
 
   public void addCachePool(CachePoolInfo info) throws IOException {
@@ -2360,12 +2357,7 @@ public class DFSClient implements java.i
   }
 
   public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
-    checkOpen();
-    try {
-      return namenode.listCachePools("");
-    } catch (RemoteException re) {
-      throw re.unwrapRemoteException();
-    }
+    return new CachePoolIterator(namenode);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Fri Dec  6 06:57:15 2013
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
+import org.apache.hadoop.http.HttpConfig;
 
 /** 
  * This class contains constants for configuration keys used
@@ -165,8 +166,6 @@ public class DFSConfigKeys extends Commo
   public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true;
   public static final String  DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";
   public static final boolean DFS_PERMISSIONS_ENABLED_DEFAULT = true;
-  public static final String  DFS_PERSIST_BLOCKS_KEY = "dfs.persist.blocks";
-  public static final boolean DFS_PERSIST_BLOCKS_DEFAULT = false;
   public static final String  DFS_PERMISSIONS_SUPERUSERGROUP_KEY = "dfs.permissions.superusergroup";
   public static final String  DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT = "supergroup";
   public static final String  DFS_ADMIN = "dfs.cluster.administrators";
@@ -358,6 +357,8 @@ public class DFSConfigKeys extends Commo
   public static final boolean DFS_SUPPORT_APPEND_DEFAULT = true;
   public static final String  DFS_HTTPS_ENABLE_KEY = "dfs.https.enable";
   public static final boolean DFS_HTTPS_ENABLE_DEFAULT = false;
+  public static final String  DFS_HTTP_POLICY_KEY = "dfs.http.policy";
+  public static final String  DFS_HTTP_POLICY_DEFAULT =  HttpConfig.Policy.HTTP_ONLY.name();
   public static final String  DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY = "dfs.default.chunk.view.size";
   public static final int     DFS_DEFAULT_CHUNK_VIEW_SIZE_DEFAULT = 32*1024;
   public static final String  DFS_DATANODE_HTTPS_ADDRESS_KEY = "dfs.datanode.https.address";

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Fri Dec  6 06:57:15 2013
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
@@ -65,6 +67,7 @@ import org.apache.hadoop.HadoopIllegalAr
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
@@ -78,6 +81,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
@@ -1415,12 +1419,58 @@ public class DFSUtil {
         defaultKey : DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY;
   }
 
-  public static HttpServer.Builder loadSslConfToHttpServerBuilder(
-      HttpServer.Builder builder, Configuration sslConf) {
+  /**
+   * Get http policy. Http Policy is chosen as follows:
+   * <ol>
+   * <li>If hadoop.ssl.enabled is set, http endpoints are not started. Only
+   * https endpoints are started on configured https ports</li>
+   * <li>This configuration is overridden by dfs.https.enable configuration, if
+   * it is set to true. In that case, both http and https endpoints are stared.</li>
+   * <li>All the above configurations are overridden by dfs.http.policy
+   * configuration. With this configuration you can set http-only, https-only
+   * and http-and-https endpoints.</li>
+   * </ol>
+   * See hdfs-default.xml documentation for more details on each of the above
+   * configuration settings.
+   */
+  public static HttpConfig.Policy getHttpPolicy(Configuration conf) {
+    String httpPolicy = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY,
+        DFSConfigKeys.DFS_HTTP_POLICY_DEFAULT);
+
+    HttpConfig.Policy policy = HttpConfig.Policy.fromString(httpPolicy);
+
+    if (policy == HttpConfig.Policy.HTTP_ONLY) {
+      boolean httpsEnabled = conf.getBoolean(
+          DFSConfigKeys.DFS_HTTPS_ENABLE_KEY,
+          DFSConfigKeys.DFS_HTTPS_ENABLE_DEFAULT);
+
+      boolean hadoopSslEnabled = conf.getBoolean(
+          CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY,
+          CommonConfigurationKeys.HADOOP_SSL_ENABLED_DEFAULT);
+
+      if (hadoopSslEnabled) {
+        LOG.warn(CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY
+            + " is deprecated. Please use "
+            + DFSConfigKeys.DFS_HTTPS_ENABLE_KEY + ".");
+        policy = HttpConfig.Policy.HTTPS_ONLY;
+      } else if (httpsEnabled) {
+        LOG.warn(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY
+            + " is deprecated. Please use "
+            + DFSConfigKeys.DFS_HTTPS_ENABLE_KEY + ".");
+        policy = HttpConfig.Policy.HTTP_AND_HTTPS;
+      }
+    }
+
+    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
+    return policy;
+  }
+
+  public static HttpServer.Builder loadSslConfToHttpServerBuilder(HttpServer.Builder builder,
+      Configuration sslConf) {
     return builder
         .needsClientAuth(
-            sslConf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
-                DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT))
+            sslConf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
+                DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT))
         .keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
         .keyStore(sslConf.get("ssl.server.keystore.location"),
             sslConf.get("ssl.server.keystore.password"),

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java Fri Dec  6 06:57:15 2013
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hdfs;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configuration.DeprecationDelta;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java Fri Dec  6 06:57:15 2013
@@ -46,7 +46,9 @@ public final class CacheDirective implem
 
   private long bytesNeeded;
   private long bytesCached;
-  private long filesAffected;
+  private long filesNeeded;
+  private long filesCached;
+
   private Element prev;
   private Element next;
 
@@ -58,9 +60,6 @@ public final class CacheDirective implem
     Preconditions.checkArgument(replication > 0);
     this.replication = replication;
     this.expiryTime = expiryTime;
-    this.bytesNeeded = 0;
-    this.bytesCached = 0;
-    this.filesAffected = 0;
   }
 
   public long getId() {
@@ -112,7 +111,8 @@ public final class CacheDirective implem
     return new CacheDirectiveStats.Builder().
         setBytesNeeded(bytesNeeded).
         setBytesCached(bytesCached).
-        setFilesAffected(filesAffected).
+        setFilesNeeded(filesNeeded).
+        setFilesCached(filesCached).
         setHasExpired(new Date().getTime() > expiryTime).
         build();
   }
@@ -131,7 +131,8 @@ public final class CacheDirective implem
       append(", expiryTime: ").append(getExpiryTimeString()).
       append(", bytesNeeded:").append(bytesNeeded).
       append(", bytesCached:").append(bytesCached).
-      append(", filesAffected:").append(filesAffected).
+      append(", filesNeeded:").append(filesNeeded).
+      append(", filesCached:").append(filesCached).
       append(" }");
     return builder.toString();
   }
@@ -152,42 +153,60 @@ public final class CacheDirective implem
     return new HashCodeBuilder().append(id).toHashCode();
   }
 
-  public long getBytesNeeded() {
-    return bytesNeeded;
+  //
+  // Stats related getters and setters
+  //
+
+  /**
+   * Resets the byte and file statistics being tracked by this CacheDirective.
+   */
+  public void resetStatistics() {
+    bytesNeeded = 0;
+    bytesCached = 0;
+    filesNeeded = 0;
+    filesCached = 0;
   }
 
-  public void clearBytesNeeded() {
-    this.bytesNeeded = 0;
+  public long getBytesNeeded() {
+    return bytesNeeded;
   }
 
-  public void addBytesNeeded(long toAdd) {
-    this.bytesNeeded += toAdd;
+  public void addBytesNeeded(long bytes) {
+    this.bytesNeeded += bytes;
+    pool.addBytesNeeded(bytes);
   }
 
   public long getBytesCached() {
     return bytesCached;
   }
 
-  public void clearBytesCached() {
-    this.bytesCached = 0;
+  public void addBytesCached(long bytes) {
+    this.bytesCached += bytes;
+    pool.addBytesCached(bytes);
   }
 
-  public void addBytesCached(long toAdd) {
-    this.bytesCached += toAdd;
+  public long getFilesNeeded() {
+    return filesNeeded;
   }
 
-  public long getFilesAffected() {
-    return filesAffected;
+  public void addFilesNeeded(long files) {
+    this.filesNeeded += files;
+    pool.addFilesNeeded(files);
   }
 
-  public void clearFilesAffected() {
-    this.filesAffected = 0;
+  public long getFilesCached() {
+    return filesCached;
   }
 
-  public void incrementFilesAffected() {
-    this.filesAffected++;
+  public void addFilesCached(long files) {
+    this.filesCached += files;
+    pool.addFilesCached(files);
   }
 
+  //
+  // IntrusiveCollection.Element implementation
+  //
+
   @SuppressWarnings("unchecked")
   @Override // IntrusiveCollection.Element
   public void insertInternal(IntrusiveCollection<? extends Element> list,

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java Fri Dec  6 06:57:15 2013
@@ -29,7 +29,8 @@ public class CacheDirectiveStats {
   public static class Builder {
     private long bytesNeeded;
     private long bytesCached;
-    private long filesAffected;
+    private long filesNeeded;
+    private long filesCached;
     private boolean hasExpired;
 
     /**
@@ -38,8 +39,8 @@ public class CacheDirectiveStats {
      * @return New CacheDirectiveStats.
      */
     public CacheDirectiveStats build() {
-      return new CacheDirectiveStats(bytesNeeded, bytesCached, filesAffected,
-          hasExpired);
+      return new CacheDirectiveStats(bytesNeeded, bytesCached, filesNeeded,
+          filesCached, hasExpired);
     }
 
     /**
@@ -71,13 +72,23 @@ public class CacheDirectiveStats {
     }
 
     /**
-     * Sets the files affected by this directive.
+     * Sets the files needed by this directive.
+     * @param filesNeeded The number of files needed
+     * @return This builder, for call chaining.
+     */
+    public Builder setFilesNeeded(long filesNeeded) {
+      this.filesNeeded = filesNeeded;
+      return this;
+    }
+
+    /**
+     * Sets the files cached by this directive.
      * 
-     * @param filesAffected The files affected.
+     * @param filesCached The number of files cached.
      * @return This builder, for call chaining.
      */
-    public Builder setFilesAffected(long filesAffected) {
-      this.filesAffected = filesAffected;
+    public Builder setFilesCached(long filesCached) {
+      this.filesCached = filesCached;
       return this;
     }
 
@@ -95,14 +106,16 @@ public class CacheDirectiveStats {
 
   private final long bytesNeeded;
   private final long bytesCached;
-  private final long filesAffected;
+  private final long filesNeeded;
+  private final long filesCached;
   private final boolean hasExpired;
 
   private CacheDirectiveStats(long bytesNeeded, long bytesCached,
-      long filesAffected, boolean hasExpired) {
+      long filesNeeded, long filesCached, boolean hasExpired) {
     this.bytesNeeded = bytesNeeded;
     this.bytesCached = bytesCached;
-    this.filesAffected = filesAffected;
+    this.filesNeeded = filesNeeded;
+    this.filesCached = filesCached;
     this.hasExpired = hasExpired;
   }
 
@@ -121,10 +134,17 @@ public class CacheDirectiveStats {
   }
 
   /**
-   * @return The files affected.
+   * @return The number of files needed.
+   */
+  public long getFilesNeeded() {
+    return filesNeeded;
+  }
+
+  /**
+   * @return The number of files cached.
    */
-  public long getFilesAffected() {
-    return filesAffected;
+  public long getFilesCached() {
+    return filesCached;
   }
 
   /**
@@ -140,7 +160,8 @@ public class CacheDirectiveStats {
     builder.append("{");
     builder.append("bytesNeeded: ").append(bytesNeeded);
     builder.append(", ").append("bytesCached: ").append(bytesCached);
-    builder.append(", ").append("filesAffected: ").append(filesAffected);
+    builder.append(", ").append("filesNeeded: ").append(filesNeeded);
+    builder.append(", ").append("filesCached: ").append(filesCached);
     builder.append(", ").append("hasExpired: ").append(hasExpired);
     builder.append("}");
     return builder.toString();

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java Fri Dec  6 06:57:15 2013
@@ -30,7 +30,8 @@ public class CachePoolStats {
   public static class Builder {
     private long bytesNeeded;
     private long bytesCached;
-    private long filesAffected;
+    private long filesNeeded;
+    private long filesCached;
 
     public Builder() {
     }
@@ -45,24 +46,33 @@ public class CachePoolStats {
       return this;
     }
 
-    public Builder setFilesAffected(long filesAffected) {
-      this.filesAffected = filesAffected;
+    public Builder setFilesNeeded(long filesNeeded) {
+      this.filesNeeded = filesNeeded;
+      return this;
+    }
+
+    public Builder setFilesCached(long filesCached) {
+      this.filesCached = filesCached;
       return this;
     }
 
     public CachePoolStats build() {
-      return new CachePoolStats(bytesNeeded, bytesCached, filesAffected);
+      return new CachePoolStats(bytesNeeded, bytesCached, filesNeeded,
+          filesCached);
     }
   };
 
   private final long bytesNeeded;
   private final long bytesCached;
-  private final long filesAffected;
+  private final long filesNeeded;
+  private final long filesCached;
 
-  private CachePoolStats(long bytesNeeded, long bytesCached, long filesAffected) {
+  private CachePoolStats(long bytesNeeded, long bytesCached, long filesNeeded,
+      long filesCached) {
     this.bytesNeeded = bytesNeeded;
     this.bytesCached = bytesCached;
-    this.filesAffected = filesAffected;
+    this.filesNeeded = filesNeeded;
+    this.filesCached = filesCached;
   }
 
   public long getBytesNeeded() {
@@ -70,18 +80,23 @@ public class CachePoolStats {
   }
 
   public long getBytesCached() {
-    return bytesNeeded;
+    return bytesCached;
+  }
+
+  public long getFilesNeeded() {
+    return filesNeeded;
   }
 
-  public long getFilesAffected() {
-    return filesAffected;
+  public long getFilesCached() {
+    return filesCached;
   }
 
   public String toString() {
     return new StringBuilder().append("{").
       append("bytesNeeded:").append(bytesNeeded).
       append(", bytesCached:").append(bytesCached).
-      append(", filesAffected:").append(filesAffected).
+      append(", filesNeeded:").append(filesNeeded).
+      append(", filesCached:").append(filesCached).
       append("}").toString();
   }
 }

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Fri Dec  6 06:57:15 2013
@@ -28,9 +28,9 @@ import org.apache.hadoop.fs.FileAlreadyE
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
-import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -1134,10 +1134,10 @@ public interface ClientProtocol {
    *               listCacheDirectives.
    * @param filter Parameters to use to filter the list results, 
    *               or null to display all directives visible to us.
-   * @return A RemoteIterator which returns CacheDirectiveInfo objects.
+   * @return A batch of CacheDirectiveEntry objects.
    */
   @Idempotent
-  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
+  public BatchedEntries<CacheDirectiveEntry> listCacheDirectives(
       long prevId, CacheDirectiveInfo filter) throws IOException;
 
   /**
@@ -1175,9 +1175,9 @@ public interface ClientProtocol {
    * 
    * @param prevPool name of the last pool listed, or the empty string if this is
    *          the first invocation of listCachePools
-   * @return A RemoteIterator which returns CachePool objects.
+   * @return A batch of CachePoolEntry objects.
    */
   @Idempotent
-  public RemoteIterator<CachePoolEntry> listCachePools(String prevPool)
+  public BatchedEntries<CachePoolEntry> listCachePools(String prevPool)
       throws IOException;
 }

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java Fri Dec  6 06:57:15 2013
@@ -315,9 +315,9 @@ public class DatanodeInfo extends Datano
     buffer.append("DFS Remaining: " +r+ " ("+StringUtils.byteDesc(r)+")"+"\n");
     buffer.append("DFS Used%: "+percent2String(usedPercent) + "\n");
     buffer.append("DFS Remaining%: "+percent2String(remainingPercent) + "\n");
-    buffer.append("Configured Cache Capacity: "+c+" ("+StringUtils.byteDesc(cc)+")"+"\n");
-    buffer.append("Cache Used: "+cu+" ("+StringUtils.byteDesc(u)+")"+"\n");
-    buffer.append("Cache Remaining: " +cr+ " ("+StringUtils.byteDesc(r)+")"+"\n");
+    buffer.append("Configured Cache Capacity: "+cc+" ("+StringUtils.byteDesc(cc)+")"+"\n");
+    buffer.append("Cache Used: "+cu+" ("+StringUtils.byteDesc(cu)+")"+"\n");
+    buffer.append("Cache Remaining: " +cr+ " ("+StringUtils.byteDesc(cr)+")"+"\n");
     buffer.append("Cache Used%: "+percent2String(cacheUsedPercent) + "\n");
     buffer.append("Cache Remaining%: "+percent2String(cacheRemainingPercent) + "\n");
 

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java Fri Dec  6 06:57:15 2013
@@ -24,12 +24,9 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
 import org.apache.hadoop.fs.Options.Rename;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
-import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
@@ -52,8 +49,6 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
@@ -109,7 +104,6 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
@@ -176,9 +170,7 @@ import org.apache.hadoop.security.proto.
 import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
 import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto;
 import org.apache.hadoop.security.token.Token;
-import org.apache.commons.lang.StringUtils;
 
-import com.google.common.primitives.Shorts;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 
@@ -1079,21 +1071,13 @@ public class ClientNamenodeProtocolServe
     try {
       CacheDirectiveInfo filter =
           PBHelper.convert(request.getFilter());
-      RemoteIterator<CacheDirectiveEntry> iter =
-         server.listCacheDirectives(request.getPrevId(), filter);
+      BatchedEntries<CacheDirectiveEntry> entries =
+        server.listCacheDirectives(request.getPrevId(), filter);
       ListCacheDirectivesResponseProto.Builder builder =
           ListCacheDirectivesResponseProto.newBuilder();
-      long prevId = 0;
-      while (iter.hasNext()) {
-        CacheDirectiveEntry entry = iter.next();
-        builder.addElements(PBHelper.convert(entry));
-        prevId = entry.getInfo().getId();
-      }
-      if (prevId == 0) {
-        builder.setHasMore(false);
-      } else {
-        iter = server.listCacheDirectives(prevId, filter);
-        builder.setHasMore(iter.hasNext());
+      builder.setHasMore(entries.hasMore());
+      for (int i=0, n=entries.size(); i<n; i++) {
+        builder.addElements(PBHelper.convert(entries.get(i)));
       }
       return builder.build();
     } catch (IOException e) {
@@ -1138,22 +1122,13 @@ public class ClientNamenodeProtocolServe
   public ListCachePoolsResponseProto listCachePools(RpcController controller,
       ListCachePoolsRequestProto request) throws ServiceException {
     try {
-      RemoteIterator<CachePoolEntry> iter =
+      BatchedEntries<CachePoolEntry> entries =
         server.listCachePools(request.getPrevPoolName());
       ListCachePoolsResponseProto.Builder responseBuilder =
         ListCachePoolsResponseProto.newBuilder();
-      String prevPoolName = null;
-      while (iter.hasNext()) {
-        CachePoolEntry entry = iter.next();
-        responseBuilder.addEntries(PBHelper.convert(entry));
-        prevPoolName = entry.getInfo().getPoolName();
-      }
-      // fill in hasNext
-      if (prevPoolName == null) {
-        responseBuilder.setHasMore(false);
-      } else {
-        iter = server.listCachePools(prevPoolName);
-        responseBuilder.setHasMore(iter.hasNext());
+      responseBuilder.setHasMore(entries.hasMore());
+      for (int i=0, n=entries.size(); i<n; i++) {
+        responseBuilder.addEntries(PBHelper.convert(entries.get(i)));
       }
       return responseBuilder.build();
     } catch (IOException e) {

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java Fri Dec  6 06:57:15 2013
@@ -24,7 +24,6 @@ import java.util.Arrays;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.BatchedRemoteIterator;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
@@ -32,7 +31,6 @@ import org.apache.hadoop.fs.FileAlreadyE
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
-import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
@@ -1062,46 +1060,23 @@ public class ClientNamenodeProtocolTrans
     }
   }
 
-  private class CacheEntriesIterator
-    extends BatchedRemoteIterator<Long, CacheDirectiveEntry> {
-      private final CacheDirectiveInfo filter;
-
-    public CacheEntriesIterator(long prevKey,
-        CacheDirectiveInfo filter) {
-      super(prevKey);
-      this.filter = filter;
-    }
-
-    @Override
-    public BatchedEntries<CacheDirectiveEntry> makeRequest(
-        Long nextKey) throws IOException {
-      ListCacheDirectivesResponseProto response;
-      try {
-        response = rpcProxy.listCacheDirectives(null,
-            ListCacheDirectivesRequestProto.newBuilder().
-                setPrevId(nextKey).
-                setFilter(PBHelper.convert(filter)).
-                build());
-      } catch (ServiceException e) {
-        throw ProtobufHelper.getRemoteException(e);
-      }
-      return new BatchedCacheEntries(response);
-    }
-
-    @Override
-    public Long elementToPrevKey(CacheDirectiveEntry element) {
-      return element.getInfo().getId();
-    }
-  }
-
   @Override
-  public RemoteIterator<CacheDirectiveEntry>
+  public BatchedEntries<CacheDirectiveEntry>
       listCacheDirectives(long prevId,
           CacheDirectiveInfo filter) throws IOException {
     if (filter == null) {
       filter = new CacheDirectiveInfo.Builder().build();
     }
-    return new CacheEntriesIterator(prevId, filter);
+    try {
+      return new BatchedCacheEntries(
+        rpcProxy.listCacheDirectives(null,
+          ListCacheDirectivesRequestProto.newBuilder().
+            setPrevId(prevId).
+            setFilter(PBHelper.convert(filter)).
+            build()));
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
   }
 
   @Override
@@ -1164,35 +1139,16 @@ public class ClientNamenodeProtocolTrans
     }
   }
 
-  private class CachePoolIterator 
-      extends BatchedRemoteIterator<String, CachePoolEntry> {
-
-    public CachePoolIterator(String prevKey) {
-      super(prevKey);
-    }
-
-    @Override
-    public BatchedEntries<CachePoolEntry> makeRequest(String prevKey)
-        throws IOException {
-      try {
-        return new BatchedCachePoolEntries(
-            rpcProxy.listCachePools(null, 
-              ListCachePoolsRequestProto.newBuilder().
-                setPrevPoolName(prevKey).build()));
-      } catch (ServiceException e) {
-        throw ProtobufHelper.getRemoteException(e);
-      }
-    }
-
-    @Override
-    public String elementToPrevKey(CachePoolEntry entry) {
-      return entry.getInfo().getPoolName();
-    }
-  }
-
   @Override
-  public RemoteIterator<CachePoolEntry> listCachePools(String prevKey)
+  public BatchedEntries<CachePoolEntry> listCachePools(String prevKey)
       throws IOException {
-    return new CachePoolIterator(prevKey);
+    try {
+      return new BatchedCachePoolEntries(
+        rpcProxy.listCachePools(null,
+          ListCachePoolsRequestProto.newBuilder().
+            setPrevPoolName(prevKey).build()));
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
   }
 }

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Fri Dec  6 06:57:15 2013
@@ -1642,7 +1642,8 @@ public class PBHelper {
         CacheDirectiveStatsProto.newBuilder();
     builder.setBytesNeeded(stats.getBytesNeeded());
     builder.setBytesCached(stats.getBytesCached());
-    builder.setFilesAffected(stats.getFilesAffected());
+    builder.setFilesNeeded(stats.getFilesNeeded());
+    builder.setFilesCached(stats.getFilesCached());
     builder.setHasExpired(stats.hasExpired());
     return builder.build();
   }
@@ -1651,7 +1652,8 @@ public class PBHelper {
     CacheDirectiveStats.Builder builder = new CacheDirectiveStats.Builder();
     builder.setBytesNeeded(proto.getBytesNeeded());
     builder.setBytesCached(proto.getBytesCached());
-    builder.setFilesAffected(proto.getFilesAffected());
+    builder.setFilesNeeded(proto.getFilesNeeded());
+    builder.setFilesCached(proto.getFilesCached());
     builder.setHasExpired(proto.getHasExpired());
     return builder.build();
   }
@@ -1711,7 +1713,8 @@ public class PBHelper {
     CachePoolStatsProto.Builder builder = CachePoolStatsProto.newBuilder();
     builder.setBytesNeeded(stats.getBytesNeeded());
     builder.setBytesCached(stats.getBytesCached());
-    builder.setFilesAffected(stats.getFilesAffected());
+    builder.setFilesNeeded(stats.getFilesNeeded());
+    builder.setFilesCached(stats.getFilesCached());
     return builder.build();
   }
 
@@ -1719,7 +1722,8 @@ public class PBHelper {
     CachePoolStats.Builder builder = new CachePoolStats.Builder();
     builder.setBytesNeeded(proto.getBytesNeeded());
     builder.setBytesCached(proto.getBytesCached());
-    builder.setFilesAffected(proto.getFilesAffected());
+    builder.setFilesNeeded(proto.getFilesNeeded());
+    builder.setFilesCached(proto.getFilesCached());
     return builder.build();
   }
 

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java Fri Dec  6 06:57:15 2013
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.CacheManager;
+import org.apache.hadoop.hdfs.server.namenode.CachePool;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -198,6 +199,7 @@ public class CacheReplicationMonitor ext
     scannedBlocks = 0;
     namesystem.writeLock();
     try {
+      resetStatistics();
       rescanCacheDirectives();
       rescanCachedBlockMap();
       blockManager.getDatanodeManager().resetLastCachingDirectiveSentTime();
@@ -206,6 +208,15 @@ public class CacheReplicationMonitor ext
     }
   }
 
+  private void resetStatistics() {
+    for (CachePool pool: cacheManager.getCachePools()) {
+      pool.resetStatistics();
+    }
+    for (CacheDirective directive: cacheManager.getCacheDirectives()) {
+      directive.resetStatistics();
+    }
+  }
+
   /**
    * Scan all CacheDirectives.  Use the information to figure out
    * what cache replication factor each block should have.
@@ -213,11 +224,9 @@ public class CacheReplicationMonitor ext
   private void rescanCacheDirectives() {
     FSDirectory fsDir = namesystem.getFSDirectory();
     final long now = new Date().getTime();
-    for (CacheDirective directive : cacheManager.getEntriesById().values()) {
-      // Reset the directive
-      directive.clearBytesNeeded();
-      directive.clearBytesCached();
-      directive.clearFilesAffected();
+    for (CacheDirective directive : cacheManager.getCacheDirectives()) {
+      // Reset the directive's statistics
+      directive.resetStatistics();
       // Skip processing this entry if it has expired
       LOG.info("Directive expiry is at " + directive.getExpiryTime());
       if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
@@ -262,26 +271,34 @@ public class CacheReplicationMonitor ext
   
   /**
    * Apply a CacheDirective to a file.
-   *
-   * @param pce       The CacheDirective to apply.
-   * @param file      The file.
+   * 
+   * @param directive The CacheDirective to apply.
+   * @param file The file.
    */
-  private void rescanFile(CacheDirective pce, INodeFile file) {
-    pce.incrementFilesAffected();
+  private void rescanFile(CacheDirective directive, INodeFile file) {
     BlockInfo[] blockInfos = file.getBlocks();
-    long cachedTotal = 0;
+
+    // Increment the "needed" statistics
+    directive.addFilesNeeded(1);
     long neededTotal = 0;
     for (BlockInfo blockInfo : blockInfos) {
+      long neededByBlock = 
+          directive.getReplication() * blockInfo.getNumBytes();
+       neededTotal += neededByBlock;
+    }
+    directive.addBytesNeeded(neededTotal);
+
+    // TODO: Enforce per-pool quotas
+
+    long cachedTotal = 0;
+    for (BlockInfo blockInfo : blockInfos) {
       if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) {
         // We don't try to cache blocks that are under construction.
         continue;
       }
-      long neededByBlock = 
-         pce.getReplication() * blockInfo.getNumBytes();
-      neededTotal += neededByBlock;
       Block block = new Block(blockInfo.getBlockId());
       CachedBlock ncblock = new CachedBlock(block.getBlockId(),
-          pce.getReplication(), mark);
+          directive.getReplication(), mark);
       CachedBlock ocblock = cachedBlocks.get(ncblock);
       if (ocblock == null) {
         cachedBlocks.put(ncblock);
@@ -294,26 +311,30 @@ public class CacheReplicationMonitor ext
         // both get them added to their bytesCached.
         List<DatanodeDescriptor> cachedOn =
             ocblock.getDatanodes(Type.CACHED);
-        long cachedByBlock = Math.min(cachedOn.size(), pce.getReplication()) *
-            blockInfo.getNumBytes();
+        long cachedByBlock = Math.min(cachedOn.size(),
+            directive.getReplication()) * blockInfo.getNumBytes();
         cachedTotal += cachedByBlock;
 
         if (mark != ocblock.getMark()) {
           // Mark hasn't been set in this scan, so update replication and mark.
-          ocblock.setReplicationAndMark(pce.getReplication(), mark);
+          ocblock.setReplicationAndMark(directive.getReplication(), mark);
         } else {
           // Mark already set in this scan.  Set replication to highest value in
           // any CacheDirective that covers this file.
           ocblock.setReplicationAndMark((short)Math.max(
-              pce.getReplication(), ocblock.getReplication()), mark);
+              directive.getReplication(), ocblock.getReplication()), mark);
         }
       }
     }
-    pce.addBytesNeeded(neededTotal);
-    pce.addBytesCached(cachedTotal);
+    // Increment the "cached" statistics
+    directive.addBytesCached(cachedTotal);
+    if (cachedTotal == neededTotal) {
+      directive.addFilesCached(1);
+    }
     if (LOG.isTraceEnabled()) {
-      LOG.debug("Directive " + pce.getId() + " is caching " +
-          file.getFullPathName() + ": " + cachedTotal + "/" + neededTotal);
+      LOG.trace("Directive " + directive.getId() + " is caching " +
+          file.getFullPathName() + ": " + cachedTotal + "/" + neededTotal +
+          " bytes");
     }
   }
 

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java Fri Dec  6 06:57:15 2013
@@ -458,7 +458,7 @@ class BPServiceActor implements Runnable
       long createCost = createTime - startTime;
       long sendCost = sendTime - createTime;
       dn.getMetrics().addCacheReport(sendCost);
-      LOG.info("CacheReport of " + blockIds.size()
+      LOG.debug("CacheReport of " + blockIds.size()
           + " block(s) took " + createCost + " msec to generate and "
           + sendCost + " msecs for RPC and NN processing");
     }

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri Dec  6 06:57:15 2013
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
@@ -65,6 +64,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.*;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.Param;
+import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.ReadaheadPool;
@@ -181,9 +181,11 @@ public class DataNode extends Configured
   private DNConf dnConf;
   private volatile boolean heartbeatsDisabledForTests = false;
   private DataStorage storage = null;
+
   private HttpServer infoServer = null;
   private int infoPort;
   private int infoSecurePort;
+
   DataNodeMetrics metrics;
   private InetSocketAddress streamingAddr;
   
@@ -288,7 +290,7 @@ public class DataNode extends Configured
    * explicitly configured in the given config, then it is determined
    * via the DNS class.
    *
-   * @param config
+   * @param config configuration
    * @return the hostname (NB: may not be a FQDN)
    * @throws UnknownHostException if the dfs.datanode.dns.interface
    *    option is used and the hostname can not be determined
@@ -306,39 +308,54 @@ public class DataNode extends Configured
     return name;
   }
 
+  /**
+   * @see DFSUtil#getHttpPolicy(org.apache.hadoop.conf.Configuration)
+   * for information related to the different configuration options and
+   * Http Policy is decided.
+   */
   private void startInfoServer(Configuration conf) throws IOException {
-    // create a servlet to serve full-file content
+    HttpServer.Builder builder = new HttpServer.Builder().setName("datanode")
+        .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")));
+
+    HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
     InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
     String infoHost = infoSocAddr.getHostName();
-    int tmpInfoPort = infoSocAddr.getPort();
-    HttpServer.Builder builder = new HttpServer.Builder().setName("datanode")
-        .addEndpoint(URI.create("http://" + NetUtils.getHostPortString(infoSocAddr)))
-        .setFindPort(tmpInfoPort == 0).setConf(conf)
-        .setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")));
 
-    LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort);
-    if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
+    if (policy.isHttpEnabled()) {
+      if (secureResources == null) {
+        int port = infoSocAddr.getPort();
+        builder.addEndpoint(URI.create("http://" + infoHost + ":" + port));
+        if (port == 0) {
+          builder.setFindPort(true);
+        }
+      } else {
+        // The http socket is created externally using JSVC, we add it in
+        // directly.
+        builder.setConnector(secureResources.getListener());
+      }
+    }
+
+    if (policy.isHttpsEnabled()) {
       InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get(
           DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0));
-      builder.addEndpoint(URI.create("https://"
-          + NetUtils.getHostPortString(secInfoSocAddr)));
+
       Configuration sslConf = new Configuration(false);
-      sslConf.setBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf
-          .getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
-              DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT));
       sslConf.addResource(conf.get(
           DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
           DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
+      sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf.getBoolean(
+          DFS_CLIENT_HTTPS_NEED_AUTH_KEY, DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT));
       DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf);
 
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Datanode listening for SSL on " + secInfoSocAddr);
+      int port = secInfoSocAddr.getPort();
+      if (port == 0) {
+        builder.setFindPort(true);
       }
-      infoSecurePort = secInfoSocAddr.getPort();
+      builder.addEndpoint(URI.create("https://" + infoHost + ":" + port));
     }
 
-    this.infoServer = (secureResources == null) ? builder.build() :
-      builder.setConnector(secureResources.getListener()).build();
+    this.infoServer = builder.build();
+
     this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class);
     this.infoServer.addInternalServlet(null, "/getFileChecksum/*",
         FileChecksumServlets.GetServlet.class);
@@ -354,9 +371,17 @@ public class DataNode extends Configured
           WebHdfsFileSystem.PATH_PREFIX + "/*");
     }
     this.infoServer.start();
-    this.infoPort = infoServer.getConnectorAddress(0).getPort();
+
+    int connIdx = 0;
+    if (policy.isHttpEnabled()) {
+      infoPort = infoServer.getConnectorAddress(connIdx++).getPort();
+    }
+
+    if (policy.isHttpsEnabled()) {
+      infoSecurePort = infoServer.getConnectorAddress(connIdx).getPort();
+    }
   }
-  
+
   private void startPlugins(Configuration conf) {
     plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY, ServicePlugin.class);
     for (ServicePlugin p: plugins) {
@@ -690,6 +715,8 @@ public class DataNode extends Configured
           ulimit));
       }
     }
+    LOG.info("Starting DataNode with maxLockedMemory = " +
+        dnConf.maxLockedMemory);
 
     storage = new DataStorage();
     

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java Fri Dec  6 06:57:15 2013
@@ -16,27 +16,20 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.nio.channels.ServerSocketChannel;
-import java.security.GeneralSecurityException;
 
 import org.apache.commons.daemon.Daemon;
 import org.apache.commons.daemon.DaemonContext;
 import org.apache.hadoop.conf.Configuration;
-
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.ssl.SSLFactory;
 import org.mortbay.jetty.Connector;
-import org.mortbay.jetty.nio.SelectChannelConnector;
-import org.mortbay.jetty.security.SslSocketConnector;
-
-import javax.net.ssl.SSLServerSocketFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -65,7 +58,6 @@ public class SecureDataNodeStarter imple
   
   private String [] args;
   private SecureResources resources;
-  private SSLFactory sslFactory;
 
   @Override
   public void init(DaemonContext context) throws Exception {
@@ -74,9 +66,7 @@ public class SecureDataNodeStarter imple
     
     // Stash command-line arguments for regular datanode
     args = context.getArguments();
-    
-    sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
-    resources = getSecureResources(sslFactory, conf);
+    resources = getSecureResources(conf);
   }
 
   @Override
@@ -84,68 +74,65 @@ public class SecureDataNodeStarter imple
     System.err.println("Starting regular datanode initialization");
     DataNode.secureMain(args, resources);
   }
-  
-  @Override public void destroy() {
-    sslFactory.destroy();
-  }
 
+  @Override public void destroy() {}
   @Override public void stop() throws Exception { /* Nothing to do */ }
 
+  /**
+   * Acquire privileged resources (i.e., the privileged ports) for the data
+   * node. The privileged resources consist of the port of the RPC server and
+   * the port of HTTP (not HTTPS) server.
+   */
   @VisibleForTesting
-  public static SecureResources getSecureResources(final SSLFactory sslFactory,
-                                  Configuration conf) throws Exception {
+  public static SecureResources getSecureResources(Configuration conf)
+      throws Exception {
+    HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
+
     // Obtain secure port for data streaming to datanode
     InetSocketAddress streamingAddr  = DataNode.getStreamingAddr(conf);
-    int socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
+    int socketWriteTimeout = conf.getInt(
+        DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
         HdfsServerConstants.WRITE_TIMEOUT);
-    
+
     ServerSocket ss = (socketWriteTimeout > 0) ? 
         ServerSocketChannel.open().socket() : new ServerSocket();
     ss.bind(streamingAddr, 0);
-    
+
     // Check that we got the port we need
     if (ss.getLocalPort() != streamingAddr.getPort()) {
-      throw new RuntimeException("Unable to bind on specified streaming port in secure " +
-          "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort());
+      throw new RuntimeException(
+          "Unable to bind on specified streaming port in secure "
+              + "context. Needed " + streamingAddr.getPort() + ", got "
+              + ss.getLocalPort());
     }
 
-    // Obtain secure listener for web server
-    Connector listener;
-    if (HttpConfig.isSecure()) {
-      try {
-        sslFactory.init();
-      } catch (GeneralSecurityException ex) {
-        throw new IOException(ex);
-      }
-      SslSocketConnector sslListener = new SslSocketConnector() {
-        @Override
-        protected SSLServerSocketFactory createFactory() throws Exception {
-          return sslFactory.createSSLServerSocketFactory();
-        }
-      };
-      listener = sslListener;
-    } else {
+    System.err.println("Opened streaming server at " + streamingAddr);
+
+    // Bind a port for the web server. The code intends to bind HTTP server to
+    // privileged port only, as the client can authenticate the server using
+    // certificates if they are communicating through SSL.
+    Connector listener = null;
+    if (policy.isHttpEnabled()) {
       listener = HttpServer.createDefaultChannelConnector();
-    }
+      InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
+      listener.setHost(infoSocAddr.getHostName());
+      listener.setPort(infoSocAddr.getPort());
+      // Open listener here in order to bind to port as root
+      listener.open();
+      if (listener.getPort() != infoSocAddr.getPort()) {
+        throw new RuntimeException("Unable to bind on specified info port in secure " +
+            "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort());
+      }
+      System.err.println("Successfully obtained privileged resources (streaming port = "
+          + ss + " ) (http listener port = " + listener.getConnection() +")");
 
-    InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
-    listener.setHost(infoSocAddr.getHostName());
-    listener.setPort(infoSocAddr.getPort());
-    // Open listener here in order to bind to port as root
-    listener.open();
-    if (listener.getPort() != infoSocAddr.getPort()) {
-      throw new RuntimeException("Unable to bind on specified info port in secure " +
-          "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort());
-    }
-    System.err.println("Successfully obtained privileged resources (streaming port = "
-        + ss + " ) (http listener port = " + listener.getConnection() +")");
-    
-    if ((ss.getLocalPort() > 1023 || listener.getPort() > 1023) &&
-        UserGroupInformation.isSecurityEnabled()) {
-      throw new RuntimeException("Cannot start secure datanode with unprivileged ports");
+      if ((ss.getLocalPort() > 1023 || listener.getPort() > 1023) &&
+          UserGroupInformation.isSecurityEnabled()) {
+        throw new RuntimeException("Cannot start secure datanode with unprivileged ports");
+      }
+      System.err.println("Opened info server at " + infoSocAddr);
     }
-    System.err.println("Opened streaming server at " + streamingAddr);
-    System.err.println("Opened info server at " + infoSocAddr);
+
     return new SecureResources(ss, listener);
   }
 

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Fri Dec  6 06:57:15 2013
@@ -123,11 +123,6 @@ public class BackupNode extends NameNode
     String addr = conf.get(BN_HTTP_ADDRESS_NAME_KEY, BN_HTTP_ADDRESS_DEFAULT);
     return NetUtils.createSocketAddr(addr);
   }
-  
-  @Override // NameNode
-  protected void setHttpServerAddress(Configuration conf){
-    conf.set(BN_HTTP_ADDRESS_NAME_KEY, NetUtils.getHostPortString(getHttpAddress()));
-  }
 
   @Override // NameNode
   protected void loadNamesystem(Configuration conf) throws IOException {
@@ -164,6 +159,10 @@ public class BackupNode extends NameNode
     registerWith(nsInfo);
     // Checkpoint daemon should start after the rpc server started
     runCheckpointDaemon(conf);
+    InetSocketAddress addr = getHttpAddress();
+    if (addr != null) {
+      conf.set(BN_HTTP_ADDRESS_NAME_KEY, NetUtils.getHostPortString(getHttpAddress()));
+    }
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java Fri Dec  6 06:57:15 2013
@@ -31,6 +31,7 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
@@ -238,9 +239,20 @@ public final class CacheManager {
     return active;
   }
 
-  public TreeMap<Long, CacheDirective> getEntriesById() {
+  /**
+   * @return Unmodifiable view of the collection of CachePools.
+   */
+  public Collection<CachePool> getCachePools() {
+    assert namesystem.hasReadLock();
+    return Collections.unmodifiableCollection(cachePools.values());
+  }
+
+  /**
+   * @return Unmodifiable view of the collection of CacheDirectives.
+   */
+  public Collection<CacheDirective> getCacheDirectives() {
     assert namesystem.hasReadLock();
-    return directivesById;
+    return Collections.unmodifiableCollection(directivesById.values());
   }
   
   @VisibleForTesting

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java Fri Dec  6 06:57:15 2013
@@ -70,9 +70,14 @@ public final class CachePool {
    */
   @Nonnull
   private FsPermission mode;
-  
+
   private int weight;
 
+  private long bytesNeeded;
+  private long bytesCached;
+  private long filesNeeded;
+  private long filesCached;
+
   public final static class DirectiveList
       extends IntrusiveCollection<CacheDirective> {
     private CachePool cachePool;
@@ -203,15 +208,58 @@ public final class CachePool {
   }
 
   /**
+   * Resets statistics related to this CachePool
+   */
+  public void resetStatistics() {
+    bytesNeeded = 0;
+    bytesCached = 0;
+    filesNeeded = 0;
+    filesCached = 0;
+  }
+
+  public void addBytesNeeded(long bytes) {
+    bytesNeeded += bytes;
+  }
+
+  public void addBytesCached(long bytes) {
+    bytesCached += bytes;
+  }
+
+  public void addFilesNeeded(long files) {
+    filesNeeded += files;
+  }
+
+  public void addFilesCached(long files) {
+    filesCached += files;
+  }
+
+  public long getBytesNeeded() {
+    return bytesNeeded;
+  }
+
+  public long getBytesCached() {
+    return bytesCached;
+  }
+
+  public long getFilesNeeded() {
+    return filesNeeded;
+  }
+
+  public long getFilesCached() {
+    return filesCached;
+  }
+
+  /**
    * Get statistics about this CachePool.
    *
    * @return   Cache pool statistics.
    */
   private CachePoolStats getStats() {
     return new CachePoolStats.Builder().
-        setBytesNeeded(0).
-        setBytesCached(0).
-        setFilesAffected(0).
+        setBytesNeeded(bytesNeeded).
+        setBytesCached(bytesCached).
+        setFilesNeeded(filesNeeded).
+        setFilesCached(filesCached).
         build();
   }
 

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java Fri Dec  6 06:57:15 2013
@@ -117,7 +117,7 @@ public class EditLogFileInputStream exte
    */
   public static EditLogInputStream fromUrl(
       URLConnectionFactory connectionFactory, URL url, long startTxId,
- long endTxId, boolean inProgress) {
+      long endTxId, boolean inProgress) {
     return new EditLogFileInputStream(new URLLog(connectionFactory, url),
         startTxId, endTxId, inProgress);
   }

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Dec  6 06:57:15 2013
@@ -79,8 +79,6 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT;
@@ -153,6 +151,8 @@ import org.apache.hadoop.hdfs.protocol.A
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
+import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -164,8 +164,6 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
-import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
@@ -365,7 +363,6 @@ public class FSNamesystem implements Nam
   static final int DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED = 100;
   static int BLOCK_DELETION_INCREMENT = 1000;
   private final boolean isPermissionEnabled;
-  private final boolean persistBlocks;
   private final UserGroupInformation fsOwner;
   private final String fsOwnerShortUserName;
   private final String supergroup;
@@ -467,7 +464,7 @@ public class FSNamesystem implements Nam
   private final long accessTimePrecision;
 
   /** Lock to protect FSNamesystem. */
-  private ReentrantReadWriteLock fsLock;
+  private FSNamesystemLock fsLock;
 
   /**
    * Used when this NN is in standby state to read from the shared edit log.
@@ -650,7 +647,7 @@ public class FSNamesystem implements Nam
       throws IOException {
     boolean fair = conf.getBoolean("dfs.namenode.fslock.fair", true);
     LOG.info("fsLock is fair:" + fair);
-    fsLock = new ReentrantReadWriteLock(fair);
+    fsLock = new FSNamesystemLock(fair);
     try {
       resourceRecheckInterval = conf.getLong(
           DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
@@ -670,13 +667,10 @@ public class FSNamesystem implements Nam
       LOG.info("supergroup          = " + supergroup);
       LOG.info("isPermissionEnabled = " + isPermissionEnabled);
 
-      final boolean persistBlocks = conf.getBoolean(DFS_PERSIST_BLOCKS_KEY,
-                                                    DFS_PERSIST_BLOCKS_DEFAULT);
       // block allocation has to be persisted in HA using a shared edits directory
       // so that the standby has up-to-date namespace information
       String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
       this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId);  
-      this.persistBlocks = persistBlocks || (haEnabled && HAUtil.usesSharedEditsDir(conf));
       
       // Sanity check the HA-related config.
       if (nameserviceId != null) {
@@ -2635,9 +2629,7 @@ public class FSNamesystem implements Nam
     } finally {
       writeUnlock();
     }
-    if (persistBlocks) {
-      getEditLog().logSync();
-    }
+    getEditLog().logSync();
 
     // Return located block
     return makeLocatedBlock(newBlock, targets, offset);
@@ -2828,9 +2820,7 @@ public class FSNamesystem implements Nam
     } finally {
       writeUnlock();
     }
-    if (persistBlocks) {
-      getEditLog().logSync();
-    }
+    getEditLog().logSync();
 
     return true;
   }
@@ -2933,6 +2923,12 @@ public class FSNamesystem implements Nam
       }
       throw lee;
     }
+    // Check the state of the penultimate block. It should be completed
+    // before attempting to complete the last one.
+    if (!checkFileProgress(pendingFile, false)) {
+      return false;
+    }
+
     // commit the last block and complete it if it has minimum replicas
     commitOrCompleteLastBlock(pendingFile, last);
 
@@ -3002,7 +2998,7 @@ public class FSNamesystem implements Nam
         //
         BlockInfo b = v.getPenultimateBlock();
         if (b != null && !b.isComplete()) {
-          LOG.info("BLOCK* checkFileProgress: " + b
+          LOG.warn("BLOCK* checkFileProgress: " + b
               + " has not reached minimal replication "
               + blockManager.minReplication);
           return false;
@@ -6765,12 +6761,12 @@ public class FSNamesystem implements Nam
   
   @VisibleForTesting
   void setFsLockForTests(ReentrantReadWriteLock lock) {
-    this.fsLock = lock;
+    this.fsLock.coarseLock = lock;
   }
   
   @VisibleForTesting
   ReentrantReadWriteLock getFsLockForTests() {
-    return fsLock;
+    return fsLock.coarseLock;
   }
 
   @VisibleForTesting



Mime
View raw message