hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cnaur...@apache.org
Subject svn commit: r1548386 [2/2] - in /hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs: ./ src/contrib/libwebhdfs/src/ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apac...
Date Fri, 06 Dec 2013 06:57:18 GMT
Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Dec  6 06:57:15 2013
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+
 import java.io.IOException;
 import java.io.PrintStream;
 import java.net.InetSocketAddress;
@@ -430,17 +434,11 @@ public class NameNode implements NameNod
     return getHttpAddress(conf);
   }
 
-  /** @return the NameNode HTTP address set in the conf. */
+  /** @return the NameNode HTTP address. */
   public static InetSocketAddress getHttpAddress(Configuration conf) {
     return  NetUtils.createSocketAddr(
         conf.get(DFS_NAMENODE_HTTP_ADDRESS_KEY, DFS_NAMENODE_HTTP_ADDRESS_DEFAULT));
   }
-  
-  protected void setHttpServerAddress(Configuration conf) {
-    String hostPort = NetUtils.getHostPortString(getHttpAddress());
-    conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, hostPort);
-    LOG.info("Web-server up at: " + hostPort);
-  }
 
   protected void loadNamesystem(Configuration conf) throws IOException {
     this.namesystem = FSNamesystem.loadFromDisk(conf);
@@ -490,7 +488,6 @@ public class NameNode implements NameNod
 
     if (NamenodeRole.NAMENODE == role) {
       startHttpServer(conf);
-      validateConfigurationSettingsOrAbort(conf);
     }
     loadNamesystem(conf);
 
@@ -498,8 +495,6 @@ public class NameNode implements NameNod
     if (NamenodeRole.NAMENODE == role) {
       httpServer.setNameNodeAddress(getNameNodeAddress());
       httpServer.setFSImage(getFSImage());
-    } else {
-      validateConfigurationSettingsOrAbort(conf);
     }
     
     pauseMonitor = new JvmPauseMonitor(conf);
@@ -517,45 +512,6 @@ public class NameNode implements NameNod
     return new NameNodeRpcServer(conf, this);
   }
 
-  /**
-   * Verifies that the final Configuration Settings look ok for the NameNode to
-   * properly start up
-   * Things to check for include:
-   * - HTTP Server Port does not equal the RPC Server Port
-   * @param conf
-   * @throws IOException
-   */
-  protected void validateConfigurationSettings(final Configuration conf) 
-      throws IOException {
-    // check to make sure the web port and rpc port do not match 
-    if(getHttpServerAddress(conf).getPort() 
-        == getRpcServerAddress(conf).getPort()) {
-      String errMsg = "dfs.namenode.rpc-address " +
-          "("+ getRpcServerAddress(conf) + ") and " +
-          "dfs.namenode.http-address ("+ getHttpServerAddress(conf) + ") " +
-          "configuration keys are bound to the same port, unable to start " +
-          "NameNode. Port: " + getRpcServerAddress(conf).getPort();
-      throw new IOException(errMsg);
-    } 
-  }
-
-  /**
-   * Validate NameNode configuration.  Log a fatal error and abort if
-   * configuration is invalid.
-   * 
-   * @param conf Configuration to validate
-   * @throws IOException thrown if conf is invalid
-   */
-  private void validateConfigurationSettingsOrAbort(Configuration conf)
-      throws IOException {
-    try {
-      validateConfigurationSettings(conf);
-    } catch (IOException e) {
-      LOG.fatal(e.toString());
-      throw e;
-    }
-  }
-
   /** Start the services common to active and standby states */
   private void startCommonServices(Configuration conf) throws IOException {
     namesystem.startCommonServices(conf, haContext);
@@ -634,7 +590,6 @@ public class NameNode implements NameNod
     httpServer = new NameNodeHttpServer(conf, this, getHttpServerAddress(conf));
     httpServer.start();
     httpServer.setStartupProgress(startupProgress);
-    setHttpServerAddress(conf);
   }
   
   private void stopHttpServer() {
@@ -656,7 +611,7 @@ public class NameNode implements NameNod
    * <li>{@link StartupOption#CHECKPOINT CHECKPOINT} - start checkpoint node</li>
    * <li>{@link StartupOption#UPGRADE UPGRADE} - start the cluster  
    * upgrade and create a snapshot of the current file system state</li> 
-   * <li>{@link StartupOption#RECOVERY RECOVERY} - recover name node
+   * <li>{@link StartupOption#RECOVER RECOVERY} - recover name node
    * metadata</li>
    * <li>{@link StartupOption#ROLLBACK ROLLBACK} - roll the  
    *            cluster back to the previous state</li>

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java Fri Dec  6 06:57:15 2013
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -37,6 +39,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.web.AuthFilter;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.Param;
+import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
@@ -60,76 +63,116 @@ public class NameNodeHttpServer {
   public static final String FSIMAGE_ATTRIBUTE_KEY = "name.system.image";
   protected static final String NAMENODE_ATTRIBUTE_KEY = "name.node";
   public static final String STARTUP_PROGRESS_ATTRIBUTE_KEY = "startup.progress";
-  
-  public NameNodeHttpServer(
-      Configuration conf,
-      NameNode nn,
+
+  NameNodeHttpServer(Configuration conf, NameNode nn,
       InetSocketAddress bindAddress) {
     this.conf = conf;
     this.nn = nn;
     this.bindAddress = bindAddress;
   }
-  
+
+  private void initWebHdfs(Configuration conf) throws IOException {
+    if (WebHdfsFileSystem.isEnabled(conf, HttpServer.LOG)) {
+      //add SPNEGO authentication filter for webhdfs
+      final String name = "SPNEGO";
+      final String classname = AuthFilter.class.getName();
+      final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
+      Map<String, String> params = getAuthFilterParams(conf);
+      HttpServer.defineFilter(httpServer.getWebAppContext(), name, classname, params,
+          new String[]{pathSpec});
+      HttpServer.LOG.info("Added filter '" + name + "' (class=" + classname + ")");
+
+      // add webhdfs packages
+      httpServer.addJerseyResourcePackage(
+          NamenodeWebHdfsMethods.class.getPackage().getName()
+              + ";" + Param.class.getPackage().getName(), pathSpec);
+    }
+  }
+
+  /**
+   * @see DFSUtil#getHttpPolicy(org.apache.hadoop.conf.Configuration)
+   * for information related to the different configuration options and
+   * Http Policy is decided.
+   */
   void start() throws IOException {
+    HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
     final String infoHost = bindAddress.getHostName();
-    int infoPort = bindAddress.getPort();
-    HttpServer.Builder builder = new HttpServer.Builder().setName("hdfs")
-        .addEndpoint(URI.create(("http://" + NetUtils.getHostPortString(bindAddress))))
-        .setFindPort(infoPort == 0).setConf(conf).setACL(
-            new AccessControlList(conf.get(DFS_ADMIN, " ")))
+
+    HttpServer.Builder builder = new HttpServer.Builder()
+        .setName("hdfs")
+        .setConf(conf)
+        .setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
         .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
         .setUsernameConfKey(
             DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY)
-        .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
-            DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
+        .setKeytabConfKey(
+            DFSUtil.getSpnegoKeytabKey(conf,
+                DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
+
+    if (policy.isHttpEnabled()) {
+      int port = bindAddress.getPort();
+      if (port == 0) {
+        builder.setFindPort(true);
+      }
+      builder.addEndpoint(URI.create("http://" + infoHost + ":" + port));
+    }
 
-    boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false);
-    if (certSSL) {
-      httpsAddress = NetUtils.createSocketAddr(conf.get(
+    if (policy.isHttpsEnabled()) {
+      final String httpsAddrString = conf.get(
           DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
-          DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT));
+          DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT);
+      InetSocketAddress addr = NetUtils.createSocketAddr(httpsAddrString);
 
-      builder.addEndpoint(URI.create("https://"
-          + NetUtils.getHostPortString(httpsAddress)));
       Configuration sslConf = new Configuration(false);
-      sslConf.setBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf
-          .getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
-              DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT));
+
+      sslConf.addResource(conf.get(
+          DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
+          DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
+
       sslConf.addResource(conf.get(
           DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
           DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
+      sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf.getBoolean(
+          DFS_CLIENT_HTTPS_NEED_AUTH_KEY, DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT));
       DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf);
+
+      if (addr.getPort() == 0) {
+        builder.setFindPort(true);
+      }
+
+      builder.addEndpoint(URI.create("https://"
+          + NetUtils.getHostPortString(addr)));
     }
 
     httpServer = builder.build();
-    if (WebHdfsFileSystem.isEnabled(conf, HttpServer.LOG)) {
-      //add SPNEGO authentication filter for webhdfs
-      final String name = "SPNEGO";
-      final String classname = AuthFilter.class.getName();
-      final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
-      Map<String, String> params = getAuthFilterParams(conf);
-      HttpServer.defineFilter(httpServer.getWebAppContext(), name, classname, params,
-          new String[]{pathSpec});
-      HttpServer.LOG.info("Added filter '" + name + "' (class=" + classname + ")");
 
-      // add webhdfs packages
-      httpServer.addJerseyResourcePackage(
-          NamenodeWebHdfsMethods.class.getPackage().getName()
-          + ";" + Param.class.getPackage().getName(), pathSpec);
-      }
+    if (policy.isHttpsEnabled()) {
+      // assume same ssl port for all datanodes
+      InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get(
+          DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":"
+              + DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT));
+      httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY,
+          datanodeSslPort.getPort());
+    }
+
+    initWebHdfs(conf);
 
     httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn);
     httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
     setupServlets(httpServer, conf);
     httpServer.start();
-    httpAddress = httpServer.getConnectorAddress(0);
-    if (certSSL) {
-      httpsAddress = httpServer.getConnectorAddress(1);
-      // assume same ssl port for all datanodes
-      InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get(
-        DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 50475));
-      httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY, datanodeSslPort
-        .getPort());
+
+    int connIdx = 0;
+    if (policy.isHttpEnabled()) {
+      httpAddress = httpServer.getConnectorAddress(connIdx++);
+      conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
+          NetUtils.getHostPortString(httpAddress));
+    }
+
+    if (policy.isHttpsEnabled()) {
+      httpsAddress = httpServer.getConnectorAddress(connIdx);
+      conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
+          NetUtils.getHostPortString(httpsAddress));
     }
   }
   
@@ -165,18 +208,17 @@ public class NameNodeHttpServer {
     return params;
   }
 
-
-  public void stop() throws Exception {
+  void stop() throws Exception {
     if (httpServer != null) {
       httpServer.stop();
     }
   }
 
-  public InetSocketAddress getHttpAddress() {
+  InetSocketAddress getHttpAddress() {
     return httpAddress;
   }
 
-  public InetSocketAddress getHttpsAddress() {
+  InetSocketAddress getHttpsAddress() {
     return httpsAddress;
   }
 
@@ -185,7 +227,7 @@ public class NameNodeHttpServer {
    * 
    * @param fsImage FSImage to set
    */
-  public void setFSImage(FSImage fsImage) {
+  void setFSImage(FSImage fsImage) {
     httpServer.setAttribute(FSIMAGE_ATTRIBUTE_KEY, fsImage);
   }
 
@@ -194,7 +236,7 @@ public class NameNodeHttpServer {
    * 
    * @param nameNodeAddress InetSocketAddress to set
    */
-  public void setNameNodeAddress(InetSocketAddress nameNodeAddress) {
+  void setNameNodeAddress(InetSocketAddress nameNodeAddress) {
     httpServer.setAttribute(NAMENODE_ADDRESS_ATTRIBUTE_KEY,
         NetUtils.getConnectAddress(nameNodeAddress));
   }
@@ -204,7 +246,7 @@ public class NameNodeHttpServer {
    * 
    * @param prog StartupProgress to set
    */
-  public void setStartupProgress(StartupProgress prog) {
+  void setStartupProgress(StartupProgress prog) {
     httpServer.setAttribute(STARTUP_PROGRESS_ATTRIBUTE_KEY, prog);
   }
 
@@ -234,7 +276,7 @@ public class NameNodeHttpServer {
         ContentSummaryServlet.class, false);
   }
 
-  public static FSImage getFsImageFromContext(ServletContext context) {
+  static FSImage getFsImageFromContext(ServletContext context) {
     return (FSImage)context.getAttribute(FSIMAGE_ATTRIBUTE_KEY);
   }
 
@@ -242,7 +284,7 @@ public class NameNodeHttpServer {
     return (NameNode)context.getAttribute(NAMENODE_ATTRIBUTE_KEY);
   }
 
-  public static Configuration getConfFromContext(ServletContext context) {
+  static Configuration getConfFromContext(ServletContext context) {
     return (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
   }
 
@@ -258,7 +300,7 @@ public class NameNodeHttpServer {
    * @param context ServletContext to get
    * @return StartupProgress associated with context
    */
-  public static StartupProgress getStartupProgressFromContext(
+  static StartupProgress getStartupProgressFromContext(
       ServletContext context) {
     return (StartupProgress)context.getAttribute(STARTUP_PROGRESS_ATTRIBUTE_KEY);
   }

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Fri Dec  6 06:57:15 2013
@@ -36,7 +36,6 @@ import java.util.Set;
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BatchedRemoteIterator;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
@@ -46,8 +45,8 @@ import org.apache.hadoop.fs.InvalidPathE
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
-import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.ha.HAServiceStatus;
@@ -1251,36 +1250,13 @@ class NameNodeRpcServer implements Namen
     namesystem.removeCacheDirective(id);
   }
 
-  private class ServerSideCacheEntriesIterator 
-      extends BatchedRemoteIterator<Long, CacheDirectiveEntry> {
-
-    private final CacheDirectiveInfo filter;
-    
-    public ServerSideCacheEntriesIterator (Long firstKey, 
-        CacheDirectiveInfo filter) {
-      super(firstKey);
-      this.filter = filter;
-    }
-
-    @Override
-    public BatchedEntries<CacheDirectiveEntry> makeRequest(
-        Long nextKey) throws IOException {
-      return namesystem.listCacheDirectives(nextKey, filter);
-    }
-
-    @Override
-    public Long elementToPrevKey(CacheDirectiveEntry entry) {
-      return entry.getInfo().getId();
-    }
-  }
-  
   @Override
-  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(long prevId,
+  public BatchedEntries<CacheDirectiveEntry> listCacheDirectives(long prevId,
       CacheDirectiveInfo filter) throws IOException {
     if (filter == null) {
       filter = new CacheDirectiveInfo.Builder().build();
     }
-    return new ServerSideCacheEntriesIterator(prevId, filter);
+    return namesystem.listCacheDirectives(prevId, filter);
   }
 
   @Override
@@ -1298,28 +1274,9 @@ class NameNodeRpcServer implements Namen
     namesystem.removeCachePool(cachePoolName);
   }
 
-  private class ServerSideCachePoolIterator 
-      extends BatchedRemoteIterator<String, CachePoolEntry> {
-
-    public ServerSideCachePoolIterator(String prevKey) {
-      super(prevKey);
-    }
-
-    @Override
-    public BatchedEntries<CachePoolEntry> makeRequest(String prevKey)
-        throws IOException {
-      return namesystem.listCachePools(prevKey);
-    }
-
-    @Override
-    public String elementToPrevKey(CachePoolEntry entry) {
-      return entry.getInfo().getPoolName();
-    }
-  }
-
   @Override
-  public RemoteIterator<CachePoolEntry> listCachePools(String prevKey)
+  public BatchedEntries<CachePoolEntry> listCachePools(String prevKey)
       throws IOException {
-    return new ServerSideCachePoolIterator(prevKey);
+    return namesystem.listCachePools(prevKey != null ? prevKey : "");
   }
 }

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Fri Dec  6 06:57:15 2013
@@ -30,7 +30,6 @@ import java.io.FilenameFilter;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
-import java.net.URISyntaxException;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
@@ -257,12 +256,7 @@ public class SecondaryNameNode implement
 
     // initialize the webserver for uploading files.
     int tmpInfoPort = infoSocAddr.getPort();
-    URI httpEndpoint;
-    try {
-      httpEndpoint = new URI("http://" + NetUtils.getHostPortString(infoSocAddr));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
+    URI httpEndpoint = URI.create("http://" + NetUtils.getHostPortString(infoSocAddr));
 
     infoServer = new HttpServer.Builder().setName("secondary")
         .addEndpoint(httpEndpoint)
@@ -273,6 +267,7 @@ public class SecondaryNameNode implement
             DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY)
         .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
             DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY)).build();
+
     infoServer.setAttribute("secondary.name.node", this);
     infoServer.setAttribute("name.system.image", checkpointImage);
     infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Fri Dec  6 06:57:15 2013
@@ -245,8 +245,12 @@ public class NamenodeWebHdfsMethods {
         + Param.toSortedString("&", parameters);
     final String uripath = WebHdfsFileSystem.PATH_PREFIX + path;
 
-    final URI uri = new URI("http", null, dn.getHostName(), dn.getInfoPort(),
-        uripath, query, null);
+    final String scheme = request.getScheme();
+    int port = "http".equals(scheme) ? dn.getInfoPort() : dn
+        .getInfoSecurePort();
+    final URI uri = new URI(scheme, null, dn.getHostName(), port, uripath,
+        query, null);
+
     if (LOG.isTraceEnabled()) {
       LOG.trace("redirectURI=" + uri);
     }

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java Fri Dec  6 06:57:15 2013
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolStats;
 import org.apache.hadoop.hdfs.server.namenode.CachePool;
 import org.apache.hadoop.hdfs.tools.TableListing.Justification;
 import org.apache.hadoop.ipc.RemoteException;
@@ -477,9 +478,10 @@ public class CacheAdmin extends Configur
           addField("EXPIRY", Justification.LEFT).
           addField("PATH", Justification.LEFT);
       if (printStats) {
-        tableBuilder.addField("NEEDED", Justification.RIGHT).
-                    addField("CACHED", Justification.RIGHT).
-                    addField("FILES", Justification.RIGHT);
+        tableBuilder.addField("BYTES_NEEDED", Justification.RIGHT).
+                    addField("BYTES_CACHED", Justification.RIGHT).
+                    addField("FILES_NEEDED", Justification.RIGHT).
+                    addField("FILES_CACHED", Justification.RIGHT);
       }
       TableListing tableListing = tableBuilder.build();
 
@@ -507,7 +509,8 @@ public class CacheAdmin extends Configur
         if (printStats) {
           row.add("" + stats.getBytesNeeded());
           row.add("" + stats.getBytesCached());
-          row.add("" + stats.getFilesAffected());
+          row.add("" + stats.getFilesNeeded());
+          row.add("" + stats.getFilesCached());
         }
         tableListing.addRow(row.toArray(new String[0]));
         numEntries++;
@@ -769,13 +772,14 @@ public class CacheAdmin extends Configur
 
     @Override
     public String getShortUsage() {
-      return "[" + getName() + " [name]]\n";
+      return "[" + getName() + " [-stats] [<name>]]\n";
     }
 
     @Override
     public String getLongUsage() {
       TableListing listing = getOptionDescriptionListing();
-      listing.addRow("[name]", "If specified, list only the named cache pool.");
+      listing.addRow("-stats", "Display additional cache pool statistics.");
+      listing.addRow("<name>", "If specified, list only the named cache pool.");
 
       return getShortUsage() + "\n" +
           WordUtils.wrap("Display information about one or more cache pools, " +
@@ -787,6 +791,7 @@ public class CacheAdmin extends Configur
     @Override
     public int run(Configuration conf, List<String> args) throws IOException {
       String name = StringUtils.popFirstNonOption(args);
+      final boolean printStats = StringUtils.popOption("-stats", args);
       if (!args.isEmpty()) {
         System.err.print("Can't understand arguments: " +
           Joiner.on(" ").join(args) + "\n");
@@ -794,28 +799,42 @@ public class CacheAdmin extends Configur
         return 1;
       }
       DistributedFileSystem dfs = getDFS(conf);
-      TableListing listing = new TableListing.Builder().
+      TableListing.Builder builder = new TableListing.Builder().
           addField("NAME", Justification.LEFT).
           addField("OWNER", Justification.LEFT).
           addField("GROUP", Justification.LEFT).
           addField("MODE", Justification.LEFT).
-          addField("WEIGHT", Justification.RIGHT).
-          build();
+          addField("WEIGHT", Justification.RIGHT);
+      if (printStats) {
+        builder.
+            addField("BYTES_NEEDED", Justification.RIGHT).
+            addField("BYTES_CACHED", Justification.RIGHT).
+            addField("FILES_NEEDED", Justification.RIGHT).
+            addField("FILES_CACHED", Justification.RIGHT);
+      }
+      TableListing listing = builder.build();
       int numResults = 0;
       try {
         RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
         while (iter.hasNext()) {
           CachePoolEntry entry = iter.next();
           CachePoolInfo info = entry.getInfo();
-          String[] row = new String[5];
+          LinkedList<String> row = new LinkedList<String>();
           if (name == null || info.getPoolName().equals(name)) {
-            row[0] = info.getPoolName();
-            row[1] = info.getOwnerName();
-            row[2] = info.getGroupName();
-            row[3] = info.getMode() != null ? info.getMode().toString() : null;
-            row[4] =
-                info.getWeight() != null ? info.getWeight().toString() : null;
-            listing.addRow(row);
+            row.add(info.getPoolName());
+            row.add(info.getOwnerName());
+            row.add(info.getGroupName());
+            row.add(info.getMode() != null ? info.getMode().toString() : null);
+            row.add(
+                info.getWeight() != null ? info.getWeight().toString() : null);
+            if (printStats) {
+              CachePoolStats stats = entry.getStats();
+              row.add(Long.toString(stats.getBytesNeeded()));
+              row.add(Long.toString(stats.getBytesCached()));
+              row.add(Long.toString(stats.getFilesNeeded()));
+              row.add(Long.toString(stats.getFilesCached()));
+            }
+            listing.addRow(row.toArray(new String[] {}));
             ++numResults;
             if (name != null) {
               break;

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java Fri Dec  6 06:57:15 2013
@@ -100,6 +100,18 @@ class FileDistributionVisitor extends Te
 
   @Override
   void finish() throws IOException {
+    output();
+    super.finish();
+  }
+
+  @Override
+  void finishAbnormally() throws IOException {
+    System.out.println("*** Image processing finished abnormally.  Ending ***");
+    output();
+    super.finishAbnormally();
+  }
+
+  private void output() throws IOException {
     // write the distribution into the output file
     write("Size\tNumFiles\n");
     for(int i = 0; i < distribution.length; i++)
@@ -109,7 +121,6 @@ class FileDistributionVisitor extends Te
     System.out.println("totalBlocks = " + totalBlocks);
     System.out.println("totalSpace = " + totalSpace);
     System.out.println("maxFileSize = " + maxFileSize);
-    super.finish();
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java Fri Dec  6 06:57:15 2013
@@ -129,7 +129,7 @@ class ImageLoaderCurrent implements Imag
       -40, -41, -42, -43, -44, -45, -46, -47, -48 };
   private int imageVersion = 0;
   
-  private final Map<Long, String> subtreeMap = new HashMap<Long, String>();
+  private final Map<Long, Boolean> subtreeMap = new HashMap<Long, Boolean>();
   private final Map<Long, String> dirNodeMap = new HashMap<Long, String>();
 
   /* (non-Javadoc)
@@ -500,11 +500,15 @@ class ImageLoaderCurrent implements Imag
     // 1. load dir node id
     long inodeId = in.readLong();
     
-    String dirName = dirNodeMap.get(inodeId);
-    String oldValue = subtreeMap.put(inodeId, dirName);
-    if (oldValue != null) { // the subtree has been visited
-      return;
-    }
+    String dirName = dirNodeMap.remove(inodeId);
+    Boolean visitedRef = subtreeMap.get(inodeId);
+    if (visitedRef != null) {
+      if (visitedRef.booleanValue()) { // the subtree has been visited
+        return;
+      } else { // first time to visit
+        subtreeMap.put(inodeId, true);
+      }
+    } // else the dir is not linked by a RefNode, thus cannot be revisited
     
     // 2. load possible snapshots
     processSnapshots(in, v, dirName);
@@ -695,6 +699,8 @@ class ImageLoaderCurrent implements Imag
     
     if (numBlocks >= 0) { // File
       if (supportSnapshot) {
+        // make sure subtreeMap only contains entry for directory
+        subtreeMap.remove(inodeId);
         // process file diffs
         processFileDiffList(in, v, parentName);
         if (isSnapshotCopy) {
@@ -738,6 +744,11 @@ class ImageLoaderCurrent implements Imag
       
       final boolean firstReferred = in.readBoolean();
       if (firstReferred) {
+        // if a subtree is linked by multiple "parents", the corresponding dir
+        // must be referred by a reference node. we put the reference node into
+        // the subtreeMap here and let its value be false. when we later visit
+        // the subtree for the first time, we change the value to true.
+        subtreeMap.put(inodeId, false);
         v.visitEnclosingElement(ImageElement.SNAPSHOT_REF_INODE);
         processINode(in, v, skipBlocks, parentName, isSnapshotCopy);
         v.leaveEnclosingElement();  // referred inode    

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1547224-1548385

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c Fri Dec  6 06:57:15 2013
@@ -35,36 +35,55 @@ struct ExceptionInfo {
 
 static const struct ExceptionInfo gExceptionInfo[] = {
     {
-        .name = "java/io/FileNotFoundException",
+        .name = "java.io.FileNotFoundException",
         .noPrintFlag = NOPRINT_EXC_FILE_NOT_FOUND,
         .excErrno = ENOENT,
     },
     {
-        .name = "org/apache/hadoop/security/AccessControlException",
+        .name = "org.apache.hadoop.security.AccessControlException",
         .noPrintFlag = NOPRINT_EXC_ACCESS_CONTROL,
         .excErrno = EACCES,
     },
     {
-        .name = "org/apache/hadoop/fs/UnresolvedLinkException",
+        .name = "org.apache.hadoop.fs.UnresolvedLinkException",
         .noPrintFlag = NOPRINT_EXC_UNRESOLVED_LINK,
         .excErrno = ENOLINK,
     },
     {
-        .name = "org/apache/hadoop/fs/ParentNotDirectoryException",
+        .name = "org.apache.hadoop.fs.ParentNotDirectoryException",
         .noPrintFlag = NOPRINT_EXC_PARENT_NOT_DIRECTORY,
         .excErrno = ENOTDIR,
     },
     {
-        .name = "java/lang/IllegalArgumentException",
+        .name = "java.lang.IllegalArgumentException",
         .noPrintFlag = NOPRINT_EXC_ILLEGAL_ARGUMENT,
         .excErrno = EINVAL,
     },
     {
-        .name = "java/lang/OutOfMemoryError",
+        .name = "java.lang.OutOfMemoryError",
         .noPrintFlag = 0,
         .excErrno = ENOMEM,
     },
-    
+    {
+        .name = "org.apache.hadoop.hdfs.server.namenode.SafeModeException",
+        .noPrintFlag = 0,
+        .excErrno = EROFS,
+    },
+    {
+        .name = "org.apache.hadoop.fs.FileAlreadyExistsException",
+        .noPrintFlag = 0,
+        .excErrno = EEXIST,
+    },
+    {
+        .name = "org.apache.hadoop.hdfs.protocol.QuotaExceededException",
+        .noPrintFlag = 0,
+        .excErrno = EDQUOT,
+    },
+    {
+        .name = "org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException",
+        .noPrintFlag = 0,
+        .excErrno = ESTALE,
+    },
 };
 
 void getExceptionInfo(const char *excName, int noPrintFlags,

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c Fri Dec  6 06:57:15 2013
@@ -48,7 +48,8 @@ struct tlhThreadInfo {
     pthread_t thread;
 };
 
-static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs)
+static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs,
+                                     const char *username)
 {
     int ret, port;
     hdfsFS hdfs;
@@ -70,6 +71,9 @@ static int hdfsSingleNameNodeConnect(str
                           TO_STR(TLH_DEFAULT_BLOCK_SIZE));
     hdfsBuilderConfSetStr(bld, "dfs.blocksize",
                           TO_STR(TLH_DEFAULT_BLOCK_SIZE));
+    if (username) {
+        hdfsBuilderSetUserName(bld, username);
+    }
     hdfs = hdfsBuilderConnect(bld);
     if (!hdfs) {
         ret = -errno;
@@ -110,36 +114,58 @@ static int doTestGetDefaultBlockSize(hdf
     return 0;
 }
 
-static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs)
+struct tlhPaths {
+    char prefix[256];
+    char file1[256];
+    char file2[256];
+};
+
+static int setupPaths(const struct tlhThreadInfo *ti, struct tlhPaths *paths)
 {
-    char prefix[256], tmp[256];
+    memset(paths, sizeof(*paths), 0);
+    if (snprintf(paths->prefix, sizeof(paths->prefix), "/tlhData%04d",
+                 ti->threadIdx) >= sizeof(paths->prefix)) {
+        return ENAMETOOLONG;
+    }
+    if (snprintf(paths->file1, sizeof(paths->file1), "%s/file1",
+                 paths->prefix) >= sizeof(paths->file1)) {
+        return ENAMETOOLONG;
+    }
+    if (snprintf(paths->file2, sizeof(paths->file2), "%s/file2",
+                 paths->prefix) >= sizeof(paths->file2)) {
+        return ENAMETOOLONG;
+    }
+    return 0;
+}
+
+static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs,
+                                const struct tlhPaths *paths)
+{
+    char tmp[4096];
     hdfsFile file;
     int ret, expected;
     hdfsFileInfo *fileInfo;
     struct hdfsReadStatistics *readStats = NULL;
 
-    snprintf(prefix, sizeof(prefix), "/tlhData%04d", ti->threadIdx);
-
-    if (hdfsExists(fs, prefix) == 0) {
-        EXPECT_ZERO(hdfsDelete(fs, prefix, 1));
+    if (hdfsExists(fs, paths->prefix) == 0) {
+        EXPECT_ZERO(hdfsDelete(fs, paths->prefix, 1));
     }
-    EXPECT_ZERO(hdfsCreateDirectory(fs, prefix));
-    snprintf(tmp, sizeof(tmp), "%s/file", prefix);
+    EXPECT_ZERO(hdfsCreateDirectory(fs, paths->prefix));
 
-    EXPECT_ZERO(doTestGetDefaultBlockSize(fs, prefix));
+    EXPECT_ZERO(doTestGetDefaultBlockSize(fs, paths->prefix));
 
     /* There should not be any file to open for reading. */
-    EXPECT_NULL(hdfsOpenFile(fs, tmp, O_RDONLY, 0, 0, 0));
+    EXPECT_NULL(hdfsOpenFile(fs, paths->file1, O_RDONLY, 0, 0, 0));
 
     /* hdfsOpenFile should not accept mode = 3 */
-    EXPECT_NULL(hdfsOpenFile(fs, tmp, 3, 0, 0, 0));
+    EXPECT_NULL(hdfsOpenFile(fs, paths->file1, 3, 0, 0, 0));
 
-    file = hdfsOpenFile(fs, tmp, O_WRONLY, 0, 0, 0);
+    file = hdfsOpenFile(fs, paths->file1, O_WRONLY, 0, 0, 0);
     EXPECT_NONNULL(file);
 
     /* TODO: implement writeFully and use it here */
-    expected = strlen(prefix);
-    ret = hdfsWrite(fs, file, prefix, expected);
+    expected = strlen(paths->prefix);
+    ret = hdfsWrite(fs, file, paths->prefix, expected);
     if (ret < 0) {
         ret = errno;
         fprintf(stderr, "hdfsWrite failed and set errno %d\n", ret);
@@ -155,7 +181,7 @@ static int doTestHdfsOperations(struct t
     EXPECT_ZERO(hdfsCloseFile(fs, file));
 
     /* Let's re-open the file for reading */
-    file = hdfsOpenFile(fs, tmp, O_RDONLY, 0, 0, 0);
+    file = hdfsOpenFile(fs, paths->file1, O_RDONLY, 0, 0, 0);
     EXPECT_NONNULL(file);
 
     EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
@@ -180,60 +206,67 @@ static int doTestHdfsOperations(struct t
     errno = 0;
     EXPECT_INT_EQ(expected, readStats->totalBytesRead);
     hdfsFileFreeReadStatistics(readStats);
-    EXPECT_ZERO(memcmp(prefix, tmp, expected));
+    EXPECT_ZERO(memcmp(paths->prefix, tmp, expected));
     EXPECT_ZERO(hdfsCloseFile(fs, file));
 
     // TODO: Non-recursive delete should fail?
     //EXPECT_NONZERO(hdfsDelete(fs, prefix, 0));
+    EXPECT_ZERO(hdfsCopy(fs, paths->file1, fs, paths->file2));
 
-    snprintf(tmp, sizeof(tmp), "%s/file", prefix);
-    EXPECT_ZERO(hdfsChown(fs, tmp, NULL, NULL));
-    EXPECT_ZERO(hdfsChown(fs, tmp, NULL, "doop"));
-    fileInfo = hdfsGetPathInfo(fs, tmp);
+    EXPECT_ZERO(hdfsChown(fs, paths->file2, NULL, NULL));
+    EXPECT_ZERO(hdfsChown(fs, paths->file2, NULL, "doop"));
+    fileInfo = hdfsGetPathInfo(fs, paths->file2);
     EXPECT_NONNULL(fileInfo);
     EXPECT_ZERO(strcmp("doop", fileInfo->mGroup));
     hdfsFreeFileInfo(fileInfo, 1);
 
-    EXPECT_ZERO(hdfsChown(fs, tmp, "ha", "doop2"));
-    fileInfo = hdfsGetPathInfo(fs, tmp);
+    EXPECT_ZERO(hdfsChown(fs, paths->file2, "ha", "doop2"));
+    fileInfo = hdfsGetPathInfo(fs, paths->file2);
     EXPECT_NONNULL(fileInfo);
     EXPECT_ZERO(strcmp("ha", fileInfo->mOwner));
     EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
     hdfsFreeFileInfo(fileInfo, 1);
 
-    EXPECT_ZERO(hdfsChown(fs, tmp, "ha2", NULL));
-    fileInfo = hdfsGetPathInfo(fs, tmp);
+    EXPECT_ZERO(hdfsChown(fs, paths->file2, "ha2", NULL));
+    fileInfo = hdfsGetPathInfo(fs, paths->file2);
     EXPECT_NONNULL(fileInfo);
     EXPECT_ZERO(strcmp("ha2", fileInfo->mOwner));
     EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
     hdfsFreeFileInfo(fileInfo, 1);
 
-    EXPECT_ZERO(hdfsDelete(fs, prefix, 1));
+    snprintf(tmp, sizeof(tmp), "%s/nonexistent-file-name", paths->prefix);
+    EXPECT_NEGATIVE_ONE_WITH_ERRNO(hdfsChown(fs, tmp, "ha3", NULL), ENOENT);
     return 0;
 }
 
-static void *testHdfsOperations(void *v)
+static int testHdfsOperationsImpl(struct tlhThreadInfo *ti)
 {
-    struct tlhThreadInfo *ti = (struct tlhThreadInfo*)v;
     hdfsFS fs = NULL;
-    int ret;
+    struct tlhPaths paths;
 
     fprintf(stderr, "testHdfsOperations(threadIdx=%d): starting\n",
         ti->threadIdx);
-    ret = hdfsSingleNameNodeConnect(tlhCluster, &fs);
-    if (ret) {
-        fprintf(stderr, "testHdfsOperations(threadIdx=%d): "
-            "hdfsSingleNameNodeConnect failed with error %d.\n",
-            ti->threadIdx, ret);
-        ti->success = EIO;
-        return NULL;
-    }
-    ti->success = doTestHdfsOperations(ti, fs);
-    if (hdfsDisconnect(fs)) {
-        ret = errno;
-        fprintf(stderr, "hdfsDisconnect error %d\n", ret);
-        ti->success = ret;
-    }
+    EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, NULL));
+    EXPECT_ZERO(setupPaths(ti, &paths));
+    // test some operations
+    EXPECT_ZERO(doTestHdfsOperations(ti, fs, &paths));
+    EXPECT_ZERO(hdfsDisconnect(fs));
+    // reconnect as user "foo" and verify that we get permission errors
+    EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, "foo"));
+    EXPECT_NEGATIVE_ONE_WITH_ERRNO(hdfsChown(fs, paths.file1, "ha3", NULL), EACCES);
+    EXPECT_ZERO(hdfsDisconnect(fs));
+    // reconnect to do the final delete.
+    EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, NULL));
+    EXPECT_ZERO(hdfsDelete(fs, paths.prefix, 1));
+    EXPECT_ZERO(hdfsDisconnect(fs));
+    return 0;
+}
+
+static void *testHdfsOperations(void *v)
+{
+    struct tlhThreadInfo *ti = (struct tlhThreadInfo*)v;
+    int ret = testHdfsOperationsImpl(ti);
+    ti->success = ret;
     return NULL;
 }
 

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Fri Dec  6 06:57:15 2013
@@ -379,8 +379,9 @@ message CacheDirectiveInfoExpirationProt
 message CacheDirectiveStatsProto {
   required int64 bytesNeeded = 1;
   required int64 bytesCached = 2;
-  required int64 filesAffected = 3;
-  required bool hasExpired = 4;
+  required int64 filesNeeded = 3;
+  required int64 filesCached = 4;
+  required bool hasExpired = 5;
 }
 
 message AddCacheDirectiveRequestProto {
@@ -431,7 +432,8 @@ message CachePoolInfoProto {
 message CachePoolStatsProto {
   required int64 bytesNeeded = 1;
   required int64 bytesCached = 2;
-  required int64 filesAffected = 3;
+  required int64 filesNeeded = 3;
+  required int64 filesCached = 4;
 }
 
 message AddCachePoolRequestProto {

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Fri Dec  6 06:57:15 2013
@@ -137,7 +137,20 @@
 <property>
   <name>dfs.https.enable</name>
   <value>false</value>
+  <description>
+    Deprecated. Use "dfs.http.policy" instead.
+  </description>
+</property>
+
+<property>
+  <name>dfs.http.policy</name>
+  <value>HTTP_ONLY</value>
   <description>Decide if HTTPS(SSL) is supported on HDFS
+    This configures the HTTP endpoint for HDFS daemons:
+      The following values are supported:
+      - HTTP_ONLY : Service is provided only on http
+      - HTTPS_ONLY : Service is provided only on https
+      - HTTP_AND_HTTPS : Service is provided both on http and https
   </description>
 </property>
 

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1547224-1548385

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1547224-1548385

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1547224-1548385

Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1547224-1548385

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Fri Dec  6 06:57:15 2013
@@ -33,6 +33,7 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY;
@@ -900,12 +901,17 @@ public class MiniDFSCluster {
     
     // After the NN has started, set back the bound ports into
     // the conf
-    conf.set(DFSUtil.addKeySuffixes(
-        DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId),
-        nn.getNameNodeAddressHostPortString());
-    conf.set(DFSUtil.addKeySuffixes(
-        DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId, nnId), NetUtils
-        .getHostPortString(nn.getHttpAddress()));
+    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
+        nameserviceId, nnId), nn.getNameNodeAddressHostPortString());
+    if (nn.getHttpAddress() != null) {
+      conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY,
+          nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpAddress()));
+    }
+    if (nn.getHttpsAddress() != null) {
+      conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTPS_ADDRESS_KEY,
+          nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpsAddress()));
+    }
+
     DFSUtil.setGenericConf(conf, nameserviceId, nnId,
         DFS_NAMENODE_HTTP_ADDRESS_KEY);
     nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId,
@@ -1181,9 +1187,8 @@ public class MiniDFSCluster {
 
       SecureResources secureResources = null;
       if (UserGroupInformation.isSecurityEnabled()) {
-        SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, dnConf);
         try {
-          secureResources = SecureDataNodeStarter.getSecureResources(sslFactory, dnConf);
+          secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
         } catch (Exception ex) {
           ex.printStackTrace();
         }

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java Fri Dec  6 06:57:15 2013
@@ -158,9 +158,8 @@ public class MiniDFSClusterWithNodeGroup
       
       SecureResources secureResources = null;
       if (UserGroupInformation.isSecurityEnabled()) {
-        SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, dnConf);
         try {
-          secureResources = SecureDataNodeStarter.getSecureResources(sslFactory, dnConf);
+          secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
         } catch (Exception ex) {
           ex.printStackTrace();
         }

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java Fri Dec  6 06:57:15 2013
@@ -97,7 +97,6 @@ public class TestPersistBlocks {
     conf.setInt(
         CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
         0);
-    conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true);
     MiniDFSCluster cluster = null;
 
     long len = 0;
@@ -157,7 +156,6 @@ public class TestPersistBlocks {
     conf.setInt(
         CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
         0);
-    conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true);
     MiniDFSCluster cluster = null;
 
     long len = 0;
@@ -219,7 +217,6 @@ public class TestPersistBlocks {
     conf.setInt(
         CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
         0);
-    conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true);
     MiniDFSCluster cluster = null;
 
     FSDataOutputStream stream;
@@ -269,7 +266,6 @@ public class TestPersistBlocks {
     conf.setInt(
         CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
         0);
-    conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true);
     MiniDFSCluster cluster = null;
 
     FSDataOutputStream stream;

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Fri Dec  6 06:57:15 2013
@@ -434,7 +434,6 @@ public class TestBackupNode {
   public void testCanReadData() throws IOException {
     Path file1 = new Path("/fileToRead.dat");
     Configuration conf = new HdfsConfiguration();
-    conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true);
     MiniDFSCluster cluster = null;
     FileSystem fileSys = null;
     BackupNode backup = null;

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java Fri Dec  6 06:57:15 2013
@@ -57,10 +57,12 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
+import org.apache.hadoop.hdfs.protocol.CachePoolStats;
 import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@@ -622,45 +624,111 @@ public class TestCacheDirectives {
     }, 500, 60000);
   }
 
-  private static void waitForCachedStats(final DistributedFileSystem dfs,
-      final long targetFilesAffected, final long targetBytesNeeded,
-        final long targetBytesCached,
-          final CacheDirectiveInfo filter, final String infoString)
+  private static void waitForCacheDirectiveStats(final DistributedFileSystem dfs,
+      final long targetBytesNeeded, final long targetBytesCached,
+      final long targetFilesNeeded, final long targetFilesCached,
+      final CacheDirectiveInfo filter, final String infoString)
             throws Exception {
-      LOG.info("Polling listDirectives{" + 
-          ((filter == null) ? "ALL" : filter.toString()) +
-          " for " + targetFilesAffected + " targetFilesAffected, " +
-          targetBytesNeeded + " targetBytesNeeded, " +
-          targetBytesCached + " targetBytesCached");
-      GenericTestUtils.waitFor(new Supplier<Boolean>() {
-        @Override
-        public Boolean get() {
-          RemoteIterator<CacheDirectiveEntry> iter = null;
-          CacheDirectiveEntry entry = null;
+    LOG.info("Polling listCacheDirectives " + 
+        ((filter == null) ? "ALL" : filter.toString()) + " for " +
+        targetBytesNeeded + " targetBytesNeeded, " +
+        targetBytesCached + " targetBytesCached, " +
+        targetFilesNeeded + " targetFilesNeeded, " +
+        targetFilesCached + " targetFilesCached");
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        RemoteIterator<CacheDirectiveEntry> iter = null;
+        CacheDirectiveEntry entry = null;
+        try {
+          iter = dfs.listCacheDirectives(filter);
+          entry = iter.next();
+        } catch (IOException e) {
+          fail("got IOException while calling " +
+              "listCacheDirectives: " + e.getMessage());
+        }
+        Assert.assertNotNull(entry);
+        CacheDirectiveStats stats = entry.getStats();
+        if ((targetBytesNeeded == stats.getBytesNeeded()) &&
+            (targetBytesCached == stats.getBytesCached()) &&
+            (targetFilesNeeded == stats.getFilesNeeded()) &&
+            (targetFilesCached == stats.getFilesCached())) {
+          return true;
+        } else {
+          LOG.info(infoString + ": " +
+              "filesNeeded: " +
+              stats.getFilesNeeded() + "/" + targetFilesNeeded +
+              ", filesCached: " + 
+              stats.getFilesCached() + "/" + targetFilesCached +
+              ", bytesNeeded: " +
+              stats.getBytesNeeded() + "/" + targetBytesNeeded +
+              ", bytesCached: " + 
+              stats.getBytesCached() + "/" + targetBytesCached);
+          return false;
+        }
+      }
+    }, 500, 60000);
+  }
+
+  private static void waitForCachePoolStats(final DistributedFileSystem dfs,
+      final long targetBytesNeeded, final long targetBytesCached,
+      final long targetFilesNeeded, final long targetFilesCached,
+      final CachePoolInfo pool, final String infoString)
+            throws Exception {
+    LOG.info("Polling listCachePools " + pool.toString() + " for " +
+        targetBytesNeeded + " targetBytesNeeded, " +
+        targetBytesCached + " targetBytesCached, " +
+        targetFilesNeeded + " targetFilesNeeded, " +
+        targetFilesCached + " targetFilesCached");
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        RemoteIterator<CachePoolEntry> iter = null;
+        try {
+          iter = dfs.listCachePools();
+        } catch (IOException e) {
+          fail("got IOException while calling " +
+              "listCachePools: " + e.getMessage());
+        }
+        while (true) {
+          CachePoolEntry entry = null;
           try {
-            iter = dfs.listCacheDirectives(filter);
+            if (!iter.hasNext()) {
+              break;
+            }
             entry = iter.next();
           } catch (IOException e) {
-            fail("got IOException while calling " +
-                "listCacheDirectives: " + e.getMessage());
+            fail("got IOException while iterating through " +
+                "listCachePools: " + e.getMessage());
+          }
+          if (entry == null) {
+            break;
+          }
+          if (!entry.getInfo().getPoolName().equals(pool.getPoolName())) {
+            continue;
           }
-          Assert.assertNotNull(entry);
-          CacheDirectiveStats stats = entry.getStats();
-          if ((targetFilesAffected == stats.getFilesAffected()) &&
-              (targetBytesNeeded == stats.getBytesNeeded()) &&
-              (targetBytesCached == stats.getBytesCached())) {
+          CachePoolStats stats = entry.getStats();
+          if ((targetBytesNeeded == stats.getBytesNeeded()) &&
+              (targetBytesCached == stats.getBytesCached()) &&
+              (targetFilesNeeded == stats.getFilesNeeded()) &&
+              (targetFilesCached == stats.getFilesCached())) {
             return true;
           } else {
-            LOG.info(infoString + ": filesAffected: " + 
-              stats.getFilesAffected() + "/" + targetFilesAffected +
-              ", bytesNeeded: " +
+            LOG.info(infoString + ": " +
+                "filesNeeded: " +
+                stats.getFilesNeeded() + "/" + targetFilesNeeded +
+                ", filesCached: " + 
+                stats.getFilesCached() + "/" + targetFilesCached +
+                ", bytesNeeded: " +
                 stats.getBytesNeeded() + "/" + targetBytesNeeded +
-              ", bytesCached: " + 
+                ", bytesCached: " + 
                 stats.getBytesCached() + "/" + targetBytesCached);
             return false;
           }
         }
-      }, 500, 60000);
+        return false;
+      }
+    }, 500, 60000);
   }
 
   private static void checkNumCachedReplicas(final DistributedFileSystem dfs,
@@ -763,7 +831,7 @@ public class TestCacheDirectives {
       }
       // Uncache and check each path in sequence
       RemoteIterator<CacheDirectiveEntry> entries =
-          nnRpc.listCacheDirectives(0, null);
+        new CacheDirectiveIterator(nnRpc, null);
       for (int i=0; i<numFiles; i++) {
         CacheDirectiveEntry entry = entries.next();
         nnRpc.removeCacheDirective(entry.getInfo().getId());
@@ -836,7 +904,8 @@ public class TestCacheDirectives {
       NameNode namenode = cluster.getNameNode();
       // Create the pool
       final String pool = "friendlyPool";
-      dfs.addCachePool(new CachePoolInfo(pool));
+      final CachePoolInfo poolInfo = new CachePoolInfo(pool);
+      dfs.addCachePool(poolInfo);
       // Create some test files
       final List<Path> paths = new LinkedList<Path>();
       paths.add(new Path("/foo/bar"));
@@ -852,6 +921,7 @@ public class TestCacheDirectives {
       }
       waitForCachedBlocks(namenode, 0, 0,
           "testWaitForCachedReplicasInDirectory:0");
+
       // cache entire directory
       long id = dfs.addCacheDirective(
             new CacheDirectiveInfo.Builder().
@@ -860,14 +930,20 @@ public class TestCacheDirectives {
               setPool(pool).
               build());
       waitForCachedBlocks(namenode, 4, 8,
-          "testWaitForCachedReplicasInDirectory:1");
+          "testWaitForCachedReplicasInDirectory:1:blocks");
       // Verify that listDirectives gives the stats we want.
-      waitForCachedStats(dfs, 2,
-          8 * BLOCK_SIZE, 8 * BLOCK_SIZE,
+      waitForCacheDirectiveStats(dfs,
+          4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
+          2, 2,
           new CacheDirectiveInfo.Builder().
               setPath(new Path("/foo")).
               build(),
-          "testWaitForCachedReplicasInDirectory:2");
+          "testWaitForCachedReplicasInDirectory:1:directive");
+      waitForCachePoolStats(dfs,
+          4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
+          2, 2,
+          poolInfo, "testWaitForCachedReplicasInDirectory:1:pool");
+
       long id2 = dfs.addCacheDirective(
             new CacheDirectiveInfo.Builder().
               setPath(new Path("/foo/bar")).
@@ -876,28 +952,42 @@ public class TestCacheDirectives {
               build());
       // wait for an additional 2 cached replicas to come up
       waitForCachedBlocks(namenode, 4, 10,
-          "testWaitForCachedReplicasInDirectory:3");
+          "testWaitForCachedReplicasInDirectory:2:blocks");
       // the directory directive's stats are unchanged
-      waitForCachedStats(dfs, 2,
-          8 * BLOCK_SIZE, 8 * BLOCK_SIZE,
+      waitForCacheDirectiveStats(dfs,
+          4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
+          2, 2,
           new CacheDirectiveInfo.Builder().
               setPath(new Path("/foo")).
               build(),
-          "testWaitForCachedReplicasInDirectory:4");
+          "testWaitForCachedReplicasInDirectory:2:directive-1");
       // verify /foo/bar's stats
-      waitForCachedStats(dfs, 1,
+      waitForCacheDirectiveStats(dfs,
           4 * numBlocksPerFile * BLOCK_SIZE,
           // only 3 because the file only has 3 replicas, not 4 as requested.
           3 * numBlocksPerFile * BLOCK_SIZE,
+          1,
+          // only 0 because the file can't be fully cached
+          0,
           new CacheDirectiveInfo.Builder().
               setPath(new Path("/foo/bar")).
               build(),
-          "testWaitForCachedReplicasInDirectory:5");
+          "testWaitForCachedReplicasInDirectory:2:directive-2");
+      waitForCachePoolStats(dfs,
+          (4+4) * numBlocksPerFile * BLOCK_SIZE,
+          (4+3) * numBlocksPerFile * BLOCK_SIZE,
+          3, 2,
+          poolInfo, "testWaitForCachedReplicasInDirectory:2:pool");
+
       // remove and watch numCached go to 0
       dfs.removeCacheDirective(id);
       dfs.removeCacheDirective(id2);
       waitForCachedBlocks(namenode, 0, 0,
-          "testWaitForCachedReplicasInDirectory:6");
+          "testWaitForCachedReplicasInDirectory:3:blocks");
+      waitForCachePoolStats(dfs,
+          0, 0,
+          0, 0,
+          poolInfo, "testWaitForCachedReplicasInDirectory:3:pool");
     } finally {
       cluster.shutdown();
     }

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java Fri Dec  6 06:57:15 2013
@@ -158,4 +158,40 @@ public class TestFSNamesystem {
     fsNamesystem = new FSNamesystem(conf, fsImage);
     assertFalse(fsNamesystem.getFsLockForTests().isFair());
   }  
+  
+  @Test
+  public void testFSNamesystemLockCompatibility() {
+    FSNamesystemLock rwLock = new FSNamesystemLock(true);
+
+    assertEquals(0, rwLock.getReadHoldCount());
+    rwLock.readLock().lock();
+    assertEquals(1, rwLock.getReadHoldCount());
+
+    rwLock.readLock().lock();
+    assertEquals(2, rwLock.getReadHoldCount());
+
+    rwLock.readLock().unlock();
+    assertEquals(1, rwLock.getReadHoldCount());
+
+    rwLock.readLock().unlock();
+    assertEquals(0, rwLock.getReadHoldCount());
+
+    assertFalse(rwLock.isWriteLockedByCurrentThread());
+    assertEquals(0, rwLock.getWriteHoldCount());
+    rwLock.writeLock().lock();
+    assertTrue(rwLock.isWriteLockedByCurrentThread());
+    assertEquals(1, rwLock.getWriteHoldCount());
+    
+    rwLock.writeLock().lock();
+    assertTrue(rwLock.isWriteLockedByCurrentThread());
+    assertEquals(2, rwLock.getWriteHoldCount());
+
+    rwLock.writeLock().unlock();
+    assertTrue(rwLock.isWriteLockedByCurrentThread());
+    assertEquals(1, rwLock.getWriteHoldCount());
+
+    rwLock.writeLock().unlock();
+    assertFalse(rwLock.isWriteLockedByCurrentThread());
+    assertEquals(0, rwLock.getWriteHoldCount());
+  }
 }

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java Fri Dec  6 06:57:15 2013
@@ -22,6 +22,7 @@ import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.IOException;
+import java.net.BindException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -50,7 +51,7 @@ public class TestValidateConfigurationSe
    * an exception
    * is thrown when trying to re-use the same port
    */
-  @Test
+  @Test(expected = BindException.class)
   public void testThatMatchingRPCandHttpPortsThrowException() 
       throws IOException {
 
@@ -63,14 +64,7 @@ public class TestValidateConfigurationSe
     FileSystem.setDefaultUri(conf, "hdfs://localhost:9000"); 
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:9000");
     DFSTestUtil.formatNameNode(conf);
-    try {
-      NameNode nameNode = new NameNode(conf);
-      fail("Should have throw the exception since the ports match");
-    } catch (IOException e) {
-      // verify we're getting the right IOException
-      assertTrue(e.toString().contains("dfs.namenode.rpc-address (")); 
-      System.out.println("Got expected exception: " + e.toString());
-    }
+    new NameNode(conf);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Fri Dec  6 06:57:15 2013
@@ -29,6 +29,7 @@ import java.net.URI;
 import java.net.UnknownHostException;
 import java.util.EnumSet;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Random;
@@ -86,6 +87,7 @@ public class TestRetryCacheWithHA {
   private static final int BlockSize = 1024;
   private static final short DataNodes = 3;
   private static final int CHECKTIMES = 10;
+  private static final int ResponseSize = 3;
   
   private MiniDFSCluster cluster;
   private DistributedFileSystem dfs;
@@ -120,6 +122,8 @@ public class TestRetryCacheWithHA {
   @Before
   public void setup() throws Exception {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, ResponseSize);
     cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleHATopology())
         .numDataNodes(DataNodes).build();
@@ -1175,4 +1179,92 @@ public class TestRetryCacheWithHA {
           + results.get(op.name));
     }
   }
+
+  /**
+   * Add a list of cache pools, list cache pools,
+   * switch active NN, and list cache pools again.
+   */
+  @Test (timeout=60000)
+  public void testListCachePools() throws Exception {
+    final int poolCount = 7;
+    HashSet<String> poolNames = new HashSet<String>(poolCount);
+    for (int i=0; i<poolCount; i++) {
+      String poolName = "testListCachePools-" + i;
+      dfs.addCachePool(new CachePoolInfo(poolName));
+      poolNames.add(poolName);
+    }
+    listCachePools(poolNames, 0);
+
+    cluster.transitionToStandby(0);
+    cluster.transitionToActive(1);
+    cluster.waitActive(1);
+    listCachePools(poolNames, 1);
+  }
+
+  /**
+   * Add a list of cache directives, list cache directives,
+   * switch active NN, and list cache directives again.
+   */
+  @Test (timeout=60000)
+  public void testListCacheDirectives() throws Exception {
+    final int poolCount = 7;
+    HashSet<String> poolNames = new HashSet<String>(poolCount);
+    Path path = new Path("/p");
+    for (int i=0; i<poolCount; i++) {
+      String poolName = "testListCacheDirectives-" + i;
+      CacheDirectiveInfo directiveInfo =
+        new CacheDirectiveInfo.Builder().setPool(poolName).setPath(path).build();
+      dfs.addCachePool(new CachePoolInfo(poolName));
+      dfs.addCacheDirective(directiveInfo);
+      poolNames.add(poolName);
+    }
+    listCacheDirectives(poolNames, 0);
+
+    cluster.transitionToStandby(0);
+    cluster.transitionToActive(1);
+    cluster.waitActive(1);
+    listCacheDirectives(poolNames, 1);
+  }
+
+  @SuppressWarnings("unchecked")
+  private void listCachePools(
+      HashSet<String> poolNames, int active) throws Exception {
+    HashSet<String> tmpNames = (HashSet<String>)poolNames.clone();
+    RemoteIterator<CachePoolEntry> pools = dfs.listCachePools();
+    int poolCount = poolNames.size();
+    for (int i=0; i<poolCount; i++) {
+      CachePoolEntry pool = pools.next();
+      String pollName = pool.getInfo().getPoolName();
+      assertTrue("The pool name should be expected", tmpNames.remove(pollName));
+      if (i % 2 == 0) {
+        int standby = active;
+        active = (standby == 0) ? 1 : 0;
+        cluster.transitionToStandby(standby);
+        cluster.transitionToActive(active);
+        cluster.waitActive(active);
+      }
+    }
+    assertTrue("All pools must be found", tmpNames.isEmpty());
+  }
+
+  @SuppressWarnings("unchecked")
+  private void listCacheDirectives(
+      HashSet<String> poolNames, int active) throws Exception {
+    HashSet<String> tmpNames = (HashSet<String>)poolNames.clone();
+    RemoteIterator<CacheDirectiveEntry> directives = dfs.listCacheDirectives(null);
+    int poolCount = poolNames.size();
+    for (int i=0; i<poolCount; i++) {
+      CacheDirectiveEntry directive = directives.next();
+      String pollName = directive.getInfo().getPool();
+      assertTrue("The pool name should be expected", tmpNames.remove(pollName));
+      if (i % 2 == 0) {
+        int standby = active;
+        active = (standby == 0) ? 1 : 0;
+        cluster.transitionToStandby(standby);
+        cluster.transitionToActive(active);
+        cluster.waitActive(active);
+      }
+    }
+    assertTrue("All pools must be found", tmpNames.isEmpty());
+  }
 }

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java Fri Dec  6 06:57:15 2013
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -49,7 +50,7 @@ public class TestHttpsFileSystem {
   public static void setUp() throws Exception {
     conf = new Configuration();
     conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
-    conf.setBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, true);
+    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
 
     File base = new File(BASEDIR);

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml?rev=1548386&r1=1548385&r2=1548386&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml (original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml Fri Dec  6 06:57:15 2013
@@ -399,5 +399,63 @@
       </comparators>
     </test>
 
+    <test> <!--Tested -->
+      <description>Testing listing cache pool statistics</description>
+      <test-commands>
+        <cache-admin-command>-addPool foo -owner bob -group bob -mode 0664</cache-admin-command>
+        <cache-admin-command>-addPool bar -owner alice -group alicegroup -mode 0755</cache-admin-command>
+        <cache-admin-command>-listPools -stats</cache-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <cache-admin-command>-removePool foo</cache-admin-command>
+        <cache-admin-command>-removePool bar</cache-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Found 2 results.</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>bar   alice  alicegroup  rwxr-xr-x      100             0             0             0             0</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>foo   bob    bob         rw-rw-r--      100             0             0             0             0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--Tested -->
+      <description>Testing listing cache directive statistics</description>
+      <test-commands>
+        <cache-admin-command>-addPool pool1</cache-admin-command>
+        <cache-admin-command>-addDirective -path /foo -pool pool1 -ttl 2d</cache-admin-command>
+        <cache-admin-command>-addDirective -path /bar -pool pool1 -ttl 24h</cache-admin-command>
+        <cache-admin-command>-addDirective -path /baz -replication 2 -pool pool1 -ttl 60m</cache-admin-command>
+        <cache-admin-command>-listDirectives -pool pool1 -stats</cache-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <cache-admin-command>-removePool pool1</cache-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Found 3 entries</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>/foo              0             0             0             0</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>/bar              0             0             0             0</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>/baz              0             0             0             0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
   </tests>
 </configuration>



Mime
View raw message