hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cmcc...@apache.org
Subject svn commit: r1619012 [3/35] - in /hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-...
Date Tue, 19 Aug 2014 23:50:25 GMT
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java Tue Aug 19 23:49:39 2014
@@ -24,7 +24,6 @@ import org.apache.hadoop.fs.CommonConfig
 import org.apache.hadoop.lib.server.ServerException;
 import org.apache.hadoop.lib.service.FileSystemAccess;
 import org.apache.hadoop.lib.servlet.ServerWebApp;
-import org.apache.hadoop.lib.wsrs.UserProvider;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -103,9 +102,6 @@ public class HttpFSServerWebApp extends 
     LOG.info("Connects to Namenode [{}]",
              get().get(FileSystemAccess.class).getFileSystemConfiguration().
                get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
-    String userPattern = getConfig().get(UserProvider.USER_PATTERN_KEY, 
-      UserProvider.USER_PATTERN_DEFAULT);
-    UserProvider.setUserPattern(userPattern);
   }
 
   /**

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java Tue Aug 19 23:49:39 2014
@@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.lib.server.BaseService;
 import org.apache.hadoop.lib.server.ServiceException;
 import org.apache.hadoop.lib.service.FileSystemAccess;
@@ -395,6 +396,10 @@ public class FileSystemAccessService ext
     Configuration conf = new Configuration(true);
     ConfigurationUtils.copy(serviceHadoopConf, conf);
     conf.setBoolean(FILE_SYSTEM_SERVICE_CREATED, true);
+
+    // Force-clear server-side umask to make HttpFS match WebHDFS behavior
+    conf.set(FsPermission.UMASK_LABEL, "000");
+
     return conf;
   }
 

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java Tue Aug 19 23:49:39 2014
@@ -19,6 +19,9 @@ package org.apache.hadoop.lib.wsrs;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 
+import com.google.common.collect.Lists;
+
+import java.util.List;
 import java.util.Map;
 
 /**
@@ -28,14 +31,14 @@ import java.util.Map;
  */
 @InterfaceAudience.Private
 public class Parameters {
-  private Map<String, Param<?>> params;
+  private Map<String, List<Param<?>>> params;
 
   /**
    * Constructor that receives the request parsed parameters.
    *
    * @param params the request parsed parameters.
    */
-  public Parameters(Map<String, Param<?>> params) {
+  public Parameters(Map<String, List<Param<?>>> params) {
     this.params = params;
   }
 
@@ -44,11 +47,36 @@ public class Parameters {
    *
    * @param name parameter name.
    * @param klass class of the parameter, used for value casting.
-  * @return the value of the parameter.
+   * @return the value of the parameter.
    */
   @SuppressWarnings("unchecked")
   public <V, T extends Param<V>> V get(String name, Class<T> klass) {
-    return ((T)params.get(name)).value();
+    List<Param<?>> multiParams = (List<Param<?>>)params.get(name);
+    if (multiParams != null && multiParams.size() > 0) {
+      return ((T) multiParams.get(0)).value(); // Return first value;
+    }
+    return null;
   }
   
+  /**
+   * Returns the values of a request parsed parameter.
+   *
+   * @param name parameter name.
+   * @param klass class of the parameter, used for value casting.
+   * @return List<V> the values of the parameter.
+   */
+  @SuppressWarnings("unchecked")
+  public <V, T extends Param<V>> List<V> getValues(String name, Class<T> klass) {
+    List<Param<?>> multiParams = (List<Param<?>>)params.get(name);
+    List<V> values = Lists.newArrayList();
+    if (multiParams != null) {
+      for (Param<?> param : multiParams) {
+        V value = ((T) param).value();
+        if (value != null) {
+          values.add(value);
+        }
+      }
+    }
+    return values;
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java Tue Aug 19 23:49:39 2014
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.lib.wsrs;
 
+import com.google.common.collect.Lists;
 import com.sun.jersey.api.core.HttpContext;
 import com.sun.jersey.core.spi.component.ComponentContext;
 import com.sun.jersey.core.spi.component.ComponentScope;
@@ -31,6 +32,7 @@ import javax.ws.rs.core.MultivaluedMap;
 import java.lang.reflect.Type;
 import java.text.MessageFormat;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 
 /**
@@ -56,10 +58,11 @@ public class ParametersProvider
   @Override
   @SuppressWarnings("unchecked")
   public Parameters getValue(HttpContext httpContext) {
-    Map<String, Param<?>> map = new HashMap<String, Param<?>>();
-    MultivaluedMap<String, String> queryString =
+    Map<String, List<Param<?>>> map = new HashMap<String, List<Param<?>>>();
+    Map<String, List<String>> queryString =
       httpContext.getRequest().getQueryParameters();
-    String str = queryString.getFirst(driverParam);
+    String str = ((MultivaluedMap<String, String>) queryString).
+        getFirst(driverParam);
     if (str == null) {
       throw new IllegalArgumentException(
         MessageFormat.format("Missing Operation parameter [{0}]",
@@ -77,26 +80,40 @@ public class ParametersProvider
         MessageFormat.format("Unsupported Operation [{0}]", op));
     }
     for (Class<Param<?>> paramClass : paramsDef.get(op)) {
-      Param<?> param;
-      try {
-        param = paramClass.newInstance();
-      } catch (Exception ex) {
-        throw new UnsupportedOperationException(
-          MessageFormat.format(
-            "Param class [{0}] does not have default constructor",
-            paramClass.getName()));
+      Param<?> param = newParam(paramClass);
+      List<Param<?>> paramList = Lists.newArrayList();
+      List<String> ps = queryString.get(param.getName());
+      if (ps != null) {
+        for (String p : ps) {
+          try {
+            param.parseParam(p);
+          }
+          catch (Exception ex) {
+            throw new IllegalArgumentException(ex.toString(), ex);
+          }
+          paramList.add(param);
+          param = newParam(paramClass);
+        }
+      } else {
+        paramList.add(param);
       }
-      try {
-        param.parseParam(queryString.getFirst(param.getName()));
-      }
-      catch (Exception ex) {
-        throw new IllegalArgumentException(ex.toString(), ex);
-      }
-      map.put(param.getName(), param);
+
+      map.put(param.getName(), paramList);
     }
     return new Parameters(map);
   }
 
+  private Param<?> newParam(Class<Param<?>> paramClass) {
+    try {
+      return paramClass.newInstance();
+    } catch (Exception ex) {
+      throw new UnsupportedOperationException(
+        MessageFormat.format(
+          "Param class [{0}] does not have default constructor",
+          paramClass.getName()));
+    }
+  }
+
   @Override
   public ComponentScope getScope() {
     return ComponentScope.PerRequest;

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml Tue Aug 19 23:49:39 2014
@@ -34,8 +34,6 @@
       org.apache.hadoop.lib.service.instrumentation.InstrumentationService,
       org.apache.hadoop.lib.service.scheduler.SchedulerService,
       org.apache.hadoop.lib.service.security.GroupsService,
-      org.apache.hadoop.lib.service.security.ProxyUserService,
-      org.apache.hadoop.lib.service.security.DelegationTokenManagerService,
       org.apache.hadoop.lib.service.hadoop.FileSystemAccessService
     </value>
     <description>
@@ -119,6 +117,10 @@
   </property>
 
   <!-- HttpFSServer proxy user Configuration -->
+<!--
+
+  The following 2 properties within this comment are provided as an
+  example to facilitate configuring HttpFS proxyusers.
 
   <property>
     <name>httpfs.proxyuser.#USER#.hosts</name>
@@ -153,6 +155,7 @@
       in the property name.
     </description>
   </property>
+-->
 
   <!-- HttpFS Delegation Token configuration -->
 
@@ -226,12 +229,4 @@
     </description>
   </property>
 
-  <property>
-    <name>httpfs.user.provider.user.pattern</name>
-    <value>^[A-Za-z_][A-Za-z0-9._-]*[$]?$</value>
-    <description>
-      Valid pattern for user and group names, it must be a valid java regex.
-    </description>
-  </property>
-
 </configuration>

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm Tue Aug 19 23:49:39 2014
@@ -18,8 +18,6 @@
 
 Hadoop HDFS over HTTP ${project.version} - Server Setup
 
-  \[ {{{./index.html}Go Back}} \]
-
   This page explains how to quickly setup HttpFS with Pseudo authentication
   against a Hadoop cluster with Pseudo authentication.
 
@@ -159,5 +157,3 @@ $ keytool -genkey -alias tomcat -keyalg 
   <<<swebhdfs://>>> scheme. Make sure the JVM is picking up the truststore
   containing the public key of the SSL certificate if using a self-signed
   certificate.
-
-  \[ {{{./index.html}Go Back}} \]

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/UsingHttpTools.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/UsingHttpTools.apt.vm?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/UsingHttpTools.apt.vm (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/UsingHttpTools.apt.vm Tue Aug 19 23:49:39 2014
@@ -18,8 +18,6 @@
 
 Hadoop HDFS over HTTP ${project.version} - Using HTTP Tools
 
-  \[ {{{./index.html}Go Back}} \]
-
 * Security
 
   Out of the box HttpFS supports both pseudo authentication and Kerberos HTTP
@@ -87,5 +85,3 @@ $ curl --negotiate -u foo -c ~/.httpfsau
 +---+
 $ curl -b ~/.httpfsauth "http://<HTTPFS_HOST>:14000/webhdfs/v1?op=liststatus"
 +---+
-
-  \[ {{{./index.html}Go Back}} \]

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java Tue Aug 19 23:49:39 2014
@@ -26,6 +26,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -45,6 +47,8 @@ import org.junit.runners.Parameterized;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.webapp.WebAppContext;
 
+import com.google.common.collect.Lists;
+
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.FileWriter;
@@ -57,6 +61,8 @@ import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.List;
+import java.util.Map;
 
 @RunWith(value = Parameterized.class)
 public abstract class BaseTestHttpFSWith extends HFSTestCase {
@@ -87,6 +93,8 @@ public abstract class BaseTestHttpFSWith
     String fsDefaultName = getProxiedFSURI();
     Configuration conf = new Configuration(false);
     conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
     File hdfsSite = new File(new File(homeDir, "conf"), "hdfs-site.xml");
     OutputStream os = new FileOutputStream(hdfsSite);
     conf.writeXml(os);
@@ -478,10 +486,305 @@ public abstract class BaseTestHttpFSWith
     Assert.assertEquals(httpContentSummary.getSpaceConsumed(), hdfsContentSummary.getSpaceConsumed());
     Assert.assertEquals(httpContentSummary.getSpaceQuota(), hdfsContentSummary.getSpaceQuota());
   }
+  
+  /** Set xattr */
+  private void testSetXAttr() throws Exception {
+    if (!isLocalFS()) {
+      FileSystem fs = FileSystem.get(getProxiedFSConf());
+      fs.mkdirs(getProxiedFSTestDir());
+      Path path = new Path(getProxiedFSTestDir(), "foo.txt");
+      OutputStream os = fs.create(path);
+      os.write(1);
+      os.close();
+      fs.close();
+ 
+      final String name1 = "user.a1";
+      final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
+      final String name2 = "user.a2";
+      final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
+      final String name3 = "user.a3";
+      final byte[] value3 = null;
+      final String name4 = "trusted.a1";
+      final byte[] value4 = new byte[]{0x31, 0x32, 0x33};
+      final String name5 = "a1";
+      fs = getHttpFSFileSystem();
+      fs.setXAttr(path, name1, value1);
+      fs.setXAttr(path, name2, value2);
+      fs.setXAttr(path, name3, value3);
+      fs.setXAttr(path, name4, value4);
+      try {
+        fs.setXAttr(path, name5, value1);
+        Assert.fail("Set xAttr with incorrect name format should fail.");
+      } catch (IOException e) {
+      } catch (IllegalArgumentException e) {
+      }
+      fs.close();
+
+      fs = FileSystem.get(getProxiedFSConf());
+      Map<String, byte[]> xAttrs = fs.getXAttrs(path);
+      fs.close();
+      Assert.assertEquals(4, xAttrs.size());
+      Assert.assertArrayEquals(value1, xAttrs.get(name1));
+      Assert.assertArrayEquals(value2, xAttrs.get(name2));
+      Assert.assertArrayEquals(new byte[0], xAttrs.get(name3));
+      Assert.assertArrayEquals(value4, xAttrs.get(name4));
+    }
+  }
+
+  /** Get xattrs */
+  private void testGetXAttrs() throws Exception {
+    if (!isLocalFS()) {
+      FileSystem fs = FileSystem.get(getProxiedFSConf());
+      fs.mkdirs(getProxiedFSTestDir());
+      Path path = new Path(getProxiedFSTestDir(), "foo.txt");
+      OutputStream os = fs.create(path);
+      os.write(1);
+      os.close();
+      fs.close();
+
+      final String name1 = "user.a1";
+      final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
+      final String name2 = "user.a2";
+      final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
+      final String name3 = "user.a3";
+      final byte[] value3 = null;
+      final String name4 = "trusted.a1";
+      final byte[] value4 = new byte[]{0x31, 0x32, 0x33};
+      fs = FileSystem.get(getProxiedFSConf());
+      fs.setXAttr(path, name1, value1);
+      fs.setXAttr(path, name2, value2);
+      fs.setXAttr(path, name3, value3);
+      fs.setXAttr(path, name4, value4);
+      fs.close();
+
+      // Get xattrs with names parameter
+      fs = getHttpFSFileSystem();
+      List<String> names = Lists.newArrayList();
+      names.add(name1);
+      names.add(name2);
+      names.add(name3);
+      names.add(name4);
+      Map<String, byte[]> xAttrs = fs.getXAttrs(path, names);
+      fs.close();
+      Assert.assertEquals(4, xAttrs.size());
+      Assert.assertArrayEquals(value1, xAttrs.get(name1));
+      Assert.assertArrayEquals(value2, xAttrs.get(name2));
+      Assert.assertArrayEquals(new byte[0], xAttrs.get(name3));
+      Assert.assertArrayEquals(value4, xAttrs.get(name4));
+
+      // Get specific xattr
+      fs = getHttpFSFileSystem();
+      byte[] value = fs.getXAttr(path, name1);
+      Assert.assertArrayEquals(value1, value);
+      final String name5 = "a1";
+      try {
+        value = fs.getXAttr(path, name5);
+        Assert.fail("Get xAttr with incorrect name format should fail.");
+      } catch (IOException e) {
+      } catch (IllegalArgumentException e) {
+      }
+      fs.close();
+
+      // Get all xattrs
+      fs = getHttpFSFileSystem();
+      xAttrs = fs.getXAttrs(path);
+      fs.close();
+      Assert.assertEquals(4, xAttrs.size());
+      Assert.assertArrayEquals(value1, xAttrs.get(name1));
+      Assert.assertArrayEquals(value2, xAttrs.get(name2));
+      Assert.assertArrayEquals(new byte[0], xAttrs.get(name3));
+      Assert.assertArrayEquals(value4, xAttrs.get(name4));
+    }
+  }
+
+  /** Remove xattr */
+  private void testRemoveXAttr() throws Exception {
+    if (!isLocalFS()) {
+      FileSystem fs = FileSystem.get(getProxiedFSConf());
+      fs.mkdirs(getProxiedFSTestDir());
+      Path path = new Path(getProxiedFSTestDir(), "foo.txt");
+      OutputStream os = fs.create(path);
+      os.write(1);
+      os.close();
+      fs.close();
+
+      final String name1 = "user.a1";
+      final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
+      final String name2 = "user.a2";
+      final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
+      final String name3 = "user.a3";
+      final byte[] value3 = null;
+      final String name4 = "trusted.a1";
+      final byte[] value4 = new byte[]{0x31, 0x32, 0x33};
+      final String name5 = "a1";
+      fs = FileSystem.get(getProxiedFSConf());
+      fs.setXAttr(path, name1, value1);
+      fs.setXAttr(path, name2, value2);
+      fs.setXAttr(path, name3, value3);
+      fs.setXAttr(path, name4, value4);
+      fs.close();
+
+      fs = getHttpFSFileSystem();
+      fs.removeXAttr(path, name1);
+      fs.removeXAttr(path, name3);
+      fs.removeXAttr(path, name4);
+      try {
+        fs.removeXAttr(path, name5);
+        Assert.fail("Remove xAttr with incorrect name format should fail.");
+      } catch (IOException e) {
+      } catch (IllegalArgumentException e) {
+      }
+
+      fs = FileSystem.get(getProxiedFSConf());
+      Map<String, byte[]> xAttrs = fs.getXAttrs(path);
+      fs.close();
+      Assert.assertEquals(1, xAttrs.size());
+      Assert.assertArrayEquals(value2, xAttrs.get(name2));
+    }
+  }
+
+  /** List xattrs */
+  private void testListXAttrs() throws Exception {
+    if (!isLocalFS()) {
+      FileSystem fs = FileSystem.get(getProxiedFSConf());
+      fs.mkdirs(getProxiedFSTestDir());
+      Path path = new Path(getProxiedFSTestDir(), "foo.txt");
+      OutputStream os = fs.create(path);
+      os.write(1);
+      os.close();
+      fs.close();
+
+      final String name1 = "user.a1";
+      final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
+      final String name2 = "user.a2";
+      final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
+      final String name3 = "user.a3";
+      final byte[] value3 = null;
+      final String name4 = "trusted.a1";
+      final byte[] value4 = new byte[]{0x31, 0x32, 0x33};
+      fs = FileSystem.get(getProxiedFSConf());
+      fs.setXAttr(path, name1, value1);
+      fs.setXAttr(path, name2, value2);
+      fs.setXAttr(path, name3, value3);
+      fs.setXAttr(path, name4, value4);
+      fs.close();
+
+      fs = getHttpFSFileSystem();
+      List<String> names = fs.listXAttrs(path);
+      Assert.assertEquals(4, names.size());
+      Assert.assertTrue(names.contains(name1));
+      Assert.assertTrue(names.contains(name2));
+      Assert.assertTrue(names.contains(name3));
+      Assert.assertTrue(names.contains(name4));
+    }
+  }
+
+  /**
+   * Runs assertions testing that two AclStatus objects contain the same info
+   * @param a First AclStatus
+   * @param b Second AclStatus
+   * @throws Exception
+   */
+  private void assertSameAcls(AclStatus a, AclStatus b) throws Exception {
+    Assert.assertTrue(a.getOwner().equals(b.getOwner()));
+    Assert.assertTrue(a.getGroup().equals(b.getGroup()));
+    Assert.assertTrue(a.isStickyBit() == b.isStickyBit());
+    Assert.assertTrue(a.getEntries().size() == b.getEntries().size());
+    for (AclEntry e : a.getEntries()) {
+      Assert.assertTrue(b.getEntries().contains(e));
+    }
+    for (AclEntry e : b.getEntries()) {
+      Assert.assertTrue(a.getEntries().contains(e));
+    }
+  }
+
+  /**
+   * Simple ACL tests on a file:  Set an acl, add an acl, remove one acl,
+   * and remove all acls.
+   * @throws Exception
+   */
+  private void testFileAcls() throws Exception {
+    if ( isLocalFS() ) {
+      return;
+    }
+
+    final String aclUser1 = "user:foo:rw-";
+    final String aclUser2 = "user:bar:r--";
+    final String aclGroup1 = "group::r--";
+    final String aclSet = "user::rwx," + aclUser1 + ","
+            + aclGroup1 + ",other::---";
+
+    FileSystem proxyFs = FileSystem.get(getProxiedFSConf());
+    FileSystem httpfs = getHttpFSFileSystem();
+
+    Path path = new Path(getProxiedFSTestDir(), "testAclStatus.txt");
+    OutputStream os = proxyFs.create(path);
+    os.write(1);
+    os.close();
+
+    AclStatus proxyAclStat = proxyFs.getAclStatus(path);
+    AclStatus httpfsAclStat = httpfs.getAclStatus(path);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    httpfs.setAcl(path, AclEntry.parseAclSpec(aclSet,true));
+    proxyAclStat = proxyFs.getAclStatus(path);
+    httpfsAclStat = httpfs.getAclStatus(path);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    httpfs.modifyAclEntries(path, AclEntry.parseAclSpec(aclUser2, true));
+    proxyAclStat = proxyFs.getAclStatus(path);
+    httpfsAclStat = httpfs.getAclStatus(path);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    httpfs.removeAclEntries(path, AclEntry.parseAclSpec(aclUser1, true));
+    proxyAclStat = proxyFs.getAclStatus(path);
+    httpfsAclStat = httpfs.getAclStatus(path);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    httpfs.removeAcl(path);
+    proxyAclStat = proxyFs.getAclStatus(path);
+    httpfsAclStat = httpfs.getAclStatus(path);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+  }
+
+  /**
+   * Simple acl tests on a directory: set a default acl, remove default acls.
+   * @throws Exception
+   */
+  private void testDirAcls() throws Exception {
+    if ( isLocalFS() ) {
+      return;
+    }
+
+    final String defUser1 = "default:user:glarch:r-x";
+
+    FileSystem proxyFs = FileSystem.get(getProxiedFSConf());
+    FileSystem httpfs = getHttpFSFileSystem();
+
+    Path dir = getProxiedFSTestDir();
+
+    /* ACL Status on a directory */
+    AclStatus proxyAclStat = proxyFs.getAclStatus(dir);
+    AclStatus httpfsAclStat = httpfs.getAclStatus(dir);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    /* Set a default ACL on the directory */
+    httpfs.setAcl(dir, (AclEntry.parseAclSpec(defUser1,true)));
+    proxyAclStat = proxyFs.getAclStatus(dir);
+    httpfsAclStat = httpfs.getAclStatus(dir);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    /* Remove the default ACL */
+    httpfs.removeDefaultAcl(dir);
+    proxyAclStat = proxyFs.getAclStatus(dir);
+    httpfsAclStat = httpfs.getAclStatus(dir);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+  }
 
   protected enum Operation {
     GET, OPEN, CREATE, APPEND, CONCAT, RENAME, DELETE, LIST_STATUS, WORKING_DIRECTORY, MKDIRS,
-    SET_TIMES, SET_PERMISSION, SET_OWNER, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY
+    SET_TIMES, SET_PERMISSION, SET_OWNER, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY,
+    FILEACLS, DIRACLS, SET_XATTR, GET_XATTRS, REMOVE_XATTR, LIST_XATTRS
   }
 
   private void operation(Operation op) throws Exception {
@@ -533,6 +836,24 @@ public abstract class BaseTestHttpFSWith
       case CONTENT_SUMMARY:
         testContentSummary();
         break;
+      case FILEACLS:
+        testFileAcls();
+        break;
+      case DIRACLS:
+        testDirAcls();
+        break;
+      case SET_XATTR:
+        testSetXAttr();
+        break;
+      case REMOVE_XATTR:
+        testRemoveXAttr();
+        break;
+      case GET_XATTRS:
+        testGetXAttrs();
+        break;
+      case LIST_XATTRS:
+        testListXAttrs();
+        break;
     }
   }
 

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java Tue Aug 19 23:49:39 2014
@@ -17,15 +17,19 @@
  */
 package org.apache.hadoop.fs.http.server;
 
+import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
+
 import javax.servlet.ServletException;
 import java.util.Properties;
 
 public class HttpFSKerberosAuthenticationHandlerForTesting
-  extends HttpFSKerberosAuthenticationHandler {
+  extends KerberosDelegationTokenAuthenticationHandler {
 
   @Override
   public void init(Properties config) throws ServletException {
     //NOP overwrite to avoid Kerberos initialization
+    config.setProperty(TOKEN_KIND, "t");
+    initTokenManager(config);
   }
 
   @Override

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java Tue Aug 19 23:49:39 2014
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.fs.http.server;
 
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
+import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
+import org.json.simple.JSONArray;
 import org.junit.Assert;
 
 import java.io.BufferedReader;
@@ -31,14 +35,16 @@ import java.io.Writer;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.text.MessageFormat;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
+import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.lib.server.Service;
 import org.apache.hadoop.lib.server.ServiceException;
 import org.apache.hadoop.lib.service.Groups;
@@ -59,6 +65,9 @@ import org.junit.Test;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.webapp.WebAppContext;
 
+import com.google.common.collect.Maps;
+import org.apache.hadoop.security.authentication.util.StringSignerSecretProvider;
+
 public class TestHttpFSServer extends HFSTestCase {
 
   @Test
@@ -128,6 +137,8 @@ public class TestHttpFSServer extends HF
     String fsDefaultName = TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
     Configuration conf = new Configuration(false);
     conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
     File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
     OutputStream os = new FileOutputStream(hdfsSite);
     conf.writeXml(os);
@@ -231,6 +242,389 @@ public class TestHttpFSServer extends HF
     reader.close();
   }
 
+  /**
+   * Talks to the http interface to create a file.
+   *
+   * @param filename The file to create
+   * @param perms The permission field, if any (may be null)
+   * @throws Exception
+   */
+  private void createWithHttp ( String filename, String perms )
+          throws Exception {
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    // Remove leading / from filename
+    if ( filename.charAt(0) == '/' ) {
+      filename = filename.substring(1);
+    }
+    String pathOps;
+    if ( perms == null ) {
+      pathOps = MessageFormat.format(
+              "/webhdfs/v1/{0}?user.name={1}&op=CREATE",
+              filename, user);
+    } else {
+      pathOps = MessageFormat.format(
+              "/webhdfs/v1/{0}?user.name={1}&permission={2}&op=CREATE",
+              filename, user, perms);
+    }
+    URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.addRequestProperty("Content-Type", "application/octet-stream");
+    conn.setRequestMethod("PUT");
+    conn.connect();
+    Assert.assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode());
+  }
+
+  /**
+   * Talks to the http interface to get the json output of a *STATUS command
+   * on the given file.
+   *
+   * @param filename The file to query.
+   * @param command Either GETFILESTATUS, LISTSTATUS, or ACLSTATUS
+   * @return A string containing the JSON output describing the file.
+   * @throws Exception
+   */
+  private String getStatus(String filename, String command)
+          throws Exception {
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    // Remove leading / from filename
+    if ( filename.charAt(0) == '/' ) {
+      filename = filename.substring(1);
+    }
+    String pathOps = MessageFormat.format(
+            "/webhdfs/v1/{0}?user.name={1}&op={2}",
+            filename, user, command);
+    URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.connect();
+    Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+
+    BufferedReader reader =
+            new BufferedReader(new InputStreamReader(conn.getInputStream()));
+
+    return reader.readLine();
+  }
+
+  /**
+   * General-purpose http PUT command to the httpfs server.
+   * @param filename The file to operate upon
+   * @param command The command to perform (SETACL, etc)
+   * @param params Parameters, like "aclspec=..."
+   */
+  private void putCmd(String filename, String command,
+                      String params) throws Exception {
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    // Remove leading / from filename
+    if ( filename.charAt(0) == '/' ) {
+      filename = filename.substring(1);
+    }
+    String pathOps = MessageFormat.format(
+            "/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}",
+            filename, user, (params == null) ? "" : "&",
+            (params == null) ? "" : params, command);
+    URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.setRequestMethod("PUT");
+    conn.connect();
+    Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+  }
+
+  /**
+   * Given the JSON output from the GETFILESTATUS call, return the
+   * 'permission' value.
+   *
+   * @param statusJson JSON from GETFILESTATUS
+   * @return The value of 'permission' in statusJson
+   * @throws Exception
+   */
+  private String getPerms ( String statusJson ) throws Exception {
+    JSONParser parser = new JSONParser();
+    JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
+    JSONObject details = (JSONObject) jsonObject.get("FileStatus");
+    return (String) details.get("permission");
+  }
+
+  /**
+   * Given the JSON output from the GETACLSTATUS call, return the
+   * 'entries' value as a List<String>.
+   * @param statusJson JSON from GETACLSTATUS
+   * @return A List of Strings which are the elements of the ACL entries
+   * @throws Exception
+   */
+  private List<String> getAclEntries ( String statusJson ) throws Exception {
+    List<String> entries = new ArrayList<String>();
+    JSONParser parser = new JSONParser();
+    JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
+    JSONObject details = (JSONObject) jsonObject.get("AclStatus");
+    JSONArray jsonEntries = (JSONArray) details.get("entries");
+    if ( jsonEntries != null ) {
+      for (Object e : jsonEntries) {
+        entries.add(e.toString());
+      }
+    }
+    return entries;
+  }
+  
+  /**
+   * Parse xAttrs from JSON result of GETXATTRS call, return xAttrs Map.
+   * @param statusJson JSON from GETXATTRS
+   * @return Map<String, byte[]> xAttrs Map
+   * @throws Exception
+   */
+  private Map<String, byte[]> getXAttrs(String statusJson) throws Exception {
+    Map<String, byte[]> xAttrs = Maps.newHashMap();
+    JSONParser parser = new JSONParser();
+    JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
+    JSONArray jsonXAttrs = (JSONArray) jsonObject.get("XAttrs");
+    if (jsonXAttrs != null) {
+      for (Object a : jsonXAttrs) {
+        String name = (String) ((JSONObject)a).get("name");
+        String value = (String) ((JSONObject)a).get("value");
+        xAttrs.put(name, decodeXAttrValue(value));
+      }
+    }
+    return xAttrs;
+  }
+  
+  /** Decode xattr value from string */
+  private byte[] decodeXAttrValue(String value) throws IOException {
+    if (value != null) {
+      return XAttrCodec.decodeValue(value);
+    } else {
+      return new byte[0];
+    }
+  }
+
+  /**
+   * Validate that files are created with 755 permissions when no
+   * 'permissions' attribute is specified, and when 'permissions'
+   * is specified, that value is honored.
+   */
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testPerms() throws Exception {
+    createHttpFSServer(false);
+
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    fs.mkdirs(new Path("/perm"));
+
+    createWithHttp("/perm/none", null);
+    String statusJson = getStatus("/perm/none", "GETFILESTATUS");
+    Assert.assertTrue("755".equals(getPerms(statusJson)));
+
+    createWithHttp("/perm/p-777", "777");
+    statusJson = getStatus("/perm/p-777", "GETFILESTATUS");
+    Assert.assertTrue("777".equals(getPerms(statusJson)));
+
+    createWithHttp("/perm/p-654", "654");
+    statusJson = getStatus("/perm/p-654", "GETFILESTATUS");
+    Assert.assertTrue("654".equals(getPerms(statusJson)));
+
+    createWithHttp("/perm/p-321", "321");
+    statusJson = getStatus("/perm/p-321", "GETFILESTATUS");
+    Assert.assertTrue("321".equals(getPerms(statusJson)));
+  }
+  
+  /**
+   * Validate XAttr get/set/remove calls.
+   */
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testXAttrs() throws Exception {
+    final String name1 = "user.a1";
+    final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
+    final String name2 = "user.a2";
+    final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
+    final String dir = "/xattrTest";
+    final String path = dir + "/file";
+    
+    createHttpFSServer(false);
+    
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    fs.mkdirs(new Path(dir));
+    
+    createWithHttp(path,null);
+    String statusJson = getStatus(path, "GETXATTRS");
+    Map<String, byte[]> xAttrs = getXAttrs(statusJson);
+    Assert.assertEquals(0, xAttrs.size());
+    
+    // Set two xattrs
+    putCmd(path, "SETXATTR", setXAttrParam(name1, value1));
+    putCmd(path, "SETXATTR", setXAttrParam(name2, value2));
+    statusJson = getStatus(path, "GETXATTRS");
+    xAttrs = getXAttrs(statusJson);
+    Assert.assertEquals(2, xAttrs.size());
+    Assert.assertArrayEquals(value1, xAttrs.get(name1));
+    Assert.assertArrayEquals(value2, xAttrs.get(name2));
+    
+    // Remove one xattr
+    putCmd(path, "REMOVEXATTR", "xattr.name=" + name1);
+    statusJson = getStatus(path, "GETXATTRS");
+    xAttrs = getXAttrs(statusJson);
+    Assert.assertEquals(1, xAttrs.size());
+    Assert.assertArrayEquals(value2, xAttrs.get(name2));
+    
+    // Remove another xattr, then there is no xattr
+    putCmd(path, "REMOVEXATTR", "xattr.name=" + name2);
+    statusJson = getStatus(path, "GETXATTRS");
+    xAttrs = getXAttrs(statusJson);
+    Assert.assertEquals(0, xAttrs.size());
+  }
+  
+  /** Params for setting an xAttr */
+  public static String setXAttrParam(String name, byte[] value) throws IOException {
+    return "xattr.name=" + name + "&xattr.value=" + XAttrCodec.encodeValue(
+        value, XAttrCodec.HEX) + "&encoding=hex&flag=create"; 
+  }
+
+  /**
+   * Validate the various ACL set/modify/remove calls.  General strategy is
+   * to verify each of the following steps with GETFILESTATUS, LISTSTATUS,
+   * and GETACLSTATUS:
+   * <ol>
+   *   <li>Create a file with no ACLs</li>
+   *   <li>Add a user + group ACL</li>
+   *   <li>Add another user ACL</li>
+   *   <li>Remove the first user ACL</li>
+   *   <li>Remove all ACLs</li>
+   * </ol>
+   */
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testFileAcls() throws Exception {
+    final String aclUser1 = "user:foo:rw-";
+    final String aclUser2 = "user:bar:r--";
+    final String aclGroup1 = "group::r--";
+    final String aclSpec = "aclspec=user::rwx," + aclUser1 + ","
+            + aclGroup1 + ",other::---";
+    final String modAclSpec = "aclspec=" + aclUser2;
+    final String remAclSpec = "aclspec=" + aclUser1;
+    final String dir = "/aclFileTest";
+    final String path = dir + "/test";
+    String statusJson;
+    List<String> aclEntries;
+
+    createHttpFSServer(false);
+
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    fs.mkdirs(new Path(dir));
+
+    createWithHttp(path, null);
+
+    /* getfilestatus and liststatus don't have 'aclBit' in their reply */
+    statusJson = getStatus(path, "GETFILESTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(dir, "LISTSTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+
+    /* getaclstatus works and returns no entries */
+    statusJson = getStatus(path, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 0);
+
+    /*
+     * Now set an ACL on the file.  (getfile|list)status have aclBit,
+     * and aclstatus has entries that looks familiar.
+     */
+    putCmd(path, "SETACL", aclSpec);
+    statusJson = getStatus(path, "GETFILESTATUS");
+    Assert.assertNotEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(dir, "LISTSTATUS");
+    Assert.assertNotEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(path, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 2);
+    Assert.assertTrue(aclEntries.contains(aclUser1));
+    Assert.assertTrue(aclEntries.contains(aclGroup1));
+
+    /* Modify acl entries to add another user acl */
+    putCmd(path, "MODIFYACLENTRIES", modAclSpec);
+    statusJson = getStatus(path, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 3);
+    Assert.assertTrue(aclEntries.contains(aclUser1));
+    Assert.assertTrue(aclEntries.contains(aclUser2));
+    Assert.assertTrue(aclEntries.contains(aclGroup1));
+
+    /* Remove the first user acl entry and verify */
+    putCmd(path, "REMOVEACLENTRIES", remAclSpec);
+    statusJson = getStatus(path, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 2);
+    Assert.assertTrue(aclEntries.contains(aclUser2));
+    Assert.assertTrue(aclEntries.contains(aclGroup1));
+
+    /* Remove all acls and verify */
+    putCmd(path, "REMOVEACL", null);
+    statusJson = getStatus(path, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 0);
+    statusJson = getStatus(path, "GETFILESTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(dir, "LISTSTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+  }
+
+  /**
+   * Test ACL operations on a directory, including default ACLs.
+   * General strategy is to use GETFILESTATUS and GETACLSTATUS to verify:
+   * <ol>
+   *   <li>Initial status with no ACLs</li>
+   *   <li>The addition of a default ACL</li>
+   *   <li>The removal of default ACLs</li>
+   * </ol>
+   *
+   * @throws Exception
+   */
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testDirAcls() throws Exception {
+    final String defUser1 = "default:user:glarch:r-x";
+    final String defSpec1 = "aclspec=" + defUser1;
+    final String dir = "/aclDirTest";
+    String statusJson;
+    List<String> aclEntries;
+
+    createHttpFSServer(false);
+
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    fs.mkdirs(new Path(dir));
+
+    /* getfilestatus and liststatus don't have 'aclBit' in their reply */
+    statusJson = getStatus(dir, "GETFILESTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+
+    /* No ACLs, either */
+    statusJson = getStatus(dir, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 0);
+
+    /* Give it a default ACL and verify */
+    putCmd(dir, "SETACL", defSpec1);
+    statusJson = getStatus(dir, "GETFILESTATUS");
+    Assert.assertNotEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(dir, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 5);
+    /* 4 Entries are default:(user|group|mask|other):perm */
+    Assert.assertTrue(aclEntries.contains(defUser1));
+
+    /* Remove the default ACL and re-verify */
+    putCmd(dir, "REMOVEDEFAULTACL", null);
+    statusJson = getStatus(dir, "GETFILESTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(dir, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 0);
+  }
+
   @Test
   @TestDir
   @TestJetty
@@ -289,9 +683,9 @@ public class TestHttpFSServer extends HF
 
     AuthenticationToken token =
       new AuthenticationToken("u", "p",
-        HttpFSKerberosAuthenticationHandlerForTesting.TYPE);
+          new KerberosDelegationTokenAuthenticationHandler().getType());
     token.setExpires(System.currentTimeMillis() + 100000000);
-    Signer signer = new Signer("secret".getBytes());
+    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
     String tokenSigned = signer.sign(token.toString());
 
     url = new URL(TestJettyHelper.getJettyURL(),
@@ -313,9 +707,9 @@ public class TestHttpFSServer extends HF
     JSONObject json = (JSONObject)
       new JSONParser().parse(new InputStreamReader(conn.getInputStream()));
     json = (JSONObject)
-      json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON);
+      json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
     String tokenStr = (String)
-        json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
+        json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
 
     url = new URL(TestJettyHelper.getJettyURL(),
                   "/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java Tue Aug 19 23:49:39 2014
@@ -23,11 +23,11 @@ import org.apache.hadoop.fs.DelegationTo
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
 import org.apache.hadoop.test.HFSTestCase;
 import org.apache.hadoop.test.KerberosTestUtils;
 import org.apache.hadoop.test.TestDir;
@@ -166,9 +166,9 @@ public class TestHttpFSWithKerberos exte
           .parse(new InputStreamReader(conn.getInputStream()));
         json =
           (JSONObject) json
-            .get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON);
+            .get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
         String tokenStr = (String) json
-          .get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
+          .get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
 
         //access httpfs using the delegation token
         url = new URL(TestJettyHelper.getJettyURL(),

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java Tue Aug 19 23:49:39 2014
@@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.Test;
 import org.junit.runners.model.FrameworkMethod;
@@ -145,6 +146,8 @@ public class TestHdfsHelper extends Test
       conf.set("dfs.block.access.token.enable", "false");
       conf.set("dfs.permissions", "true");
       conf.set("hadoop.security.authentication", "simple");
+      conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+      conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
       MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
       builder.numDataNodes(2);
       MiniDFSCluster miniHdfs = builder.build();

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml Tue Aug 19 23:49:39 2014
@@ -135,11 +135,6 @@ http://maven.apache.org/xsd/maven-4.0.0.
       <scope>compile</scope>
     </dependency>
     <dependency>
-      <groupId>javax.servlet.jsp</groupId>
-      <artifactId>jsp-api</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
       <groupId>log4j</groupId>
       <artifactId>log4j</artifactId>
       <scope>compile</scope>
@@ -180,11 +175,6 @@ http://maven.apache.org/xsd/maven-4.0.0.
       <scope>compile</scope>
     </dependency>
     <dependency>
-      <groupId>tomcat</groupId>
-      <artifactId>jasper-runtime</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
       <groupId>xmlenc</groupId>
       <artifactId>xmlenc</artifactId>
       <scope>compile</scope>

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java Tue Aug 19 23:49:39 2014
@@ -18,8 +18,9 @@
 package org.apache.hadoop.hdfs.nfs.mount;
 
 import java.io.IOException;
+import java.net.DatagramSocket;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.mount.MountdBase;
 
 /**
@@ -31,13 +32,14 @@ import org.apache.hadoop.mount.MountdBas
  */
 public class Mountd extends MountdBase {
 
-  public Mountd(Configuration config) throws IOException {
-    super(new RpcProgramMountd(config));
+  public Mountd(NfsConfiguration config, DatagramSocket registrationSocket,
+      boolean allowInsecurePorts) throws IOException {
+    super(new RpcProgramMountd(config, registrationSocket, allowInsecurePorts));
   }
   
   public static void main(String[] args) throws IOException {
-    Configuration config = new Configuration();
-    Mountd mountd = new Mountd(config);
+    NfsConfiguration config = new NfsConfiguration();
+    Mountd mountd = new Mountd(config, null, true);
     mountd.start(true);
   }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java Tue Aug 19 23:49:39 2014
@@ -16,10 +16,8 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs.nfs.mount;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NFS_KEYTAB_FILE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY;
-
 import java.io.IOException;
+import java.net.DatagramSocket;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
@@ -28,8 +26,9 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.mount.MountEntry;
@@ -38,7 +37,6 @@ import org.apache.hadoop.mount.MountResp
 import org.apache.hadoop.nfs.AccessPrivilege;
 import org.apache.hadoop.nfs.NfsExports;
 import org.apache.hadoop.nfs.nfs3.FileHandle;
-import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.RpcAcceptedReply;
 import org.apache.hadoop.oncrpc.RpcCall;
@@ -65,9 +63,7 @@ public class RpcProgramMountd extends Rp
   public static final int VERSION_1 = 1;
   public static final int VERSION_2 = 2;
   public static final int VERSION_3 = 3;
-  public static final int PORT = 4242;
 
-  // Need DFSClient for branch-1 to get ExtendedHdfsFileStatus
   private final DFSClient dfsClient;
   
   /** Synchronized list */
@@ -78,18 +74,22 @@ public class RpcProgramMountd extends Rp
   
   private final NfsExports hostsMatcher;
 
-  public RpcProgramMountd(Configuration config) throws IOException {
+  public RpcProgramMountd(NfsConfiguration config,
+      DatagramSocket registrationSocket, boolean allowInsecurePorts)
+      throws IOException {
     // Note that RPC cache is not enabled
-    super("mountd", "localhost", config.getInt("nfs3.mountd.port", PORT),
-        PROGRAM, VERSION_1, VERSION_3);
+    super("mountd", "localhost", config.getInt(
+        NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY,
+        NfsConfigKeys.DFS_NFS_MOUNTD_PORT_DEFAULT), PROGRAM, VERSION_1,
+        VERSION_3, registrationSocket, allowInsecurePorts);
     exports = new ArrayList<String>();
-    exports.add(config.get(Nfs3Constant.EXPORT_POINT,
-        Nfs3Constant.EXPORT_POINT_DEFAULT));
+    exports.add(config.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY,
+        NfsConfigKeys.DFS_NFS_EXPORT_POINT_DEFAULT));
     this.hostsMatcher = NfsExports.getInstance(config);
     this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>());
     UserGroupInformation.setConfiguration(config);
-    SecurityUtil.login(config, DFS_NFS_KEYTAB_FILE_KEY,
-            DFS_NFS_KERBEROS_PRINCIPAL_KEY);
+    SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
+        NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
     this.dfsClient = new DFSClient(NameNode.getAddress(config), config);
   }
   
@@ -104,6 +104,10 @@ public class RpcProgramMountd extends Rp
 
   @Override
   public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
+    if (hostsMatcher == null) {
+      return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
+          null);
+    }
     AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client);
     if (accessPrivilege == AccessPrivilege.NONE) {
       return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
@@ -194,7 +198,13 @@ public class RpcProgramMountd extends Rp
     if (mntproc == MNTPROC.NULL) {
       out = nullOp(out, xid, client);
     } else if (mntproc == MNTPROC.MNT) {
-      out = mnt(xdr, out, xid, client);
+      // Only do port monitoring for MNT
+      if (!doPortMonitoring(info.remoteAddress())) {
+        out = MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out,
+            xid, null);
+      } else {
+        out = mnt(xdr, out, xid, client);
+      }
     } else if (mntproc == MNTPROC.DUMP) {
       out = dump(out, xid, client);
     } else if (mntproc == MNTPROC.UMNT) {      
@@ -202,16 +212,23 @@ public class RpcProgramMountd extends Rp
     } else if (mntproc == MNTPROC.UMNTALL) {
       umntall(out, xid, client);
     } else if (mntproc == MNTPROC.EXPORT) {
-      // Currently only support one NFS export 
+      // Currently only support one NFS export
       List<NfsExports> hostsMatchers = new ArrayList<NfsExports>();
-      hostsMatchers.add(hostsMatcher);
-      out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
+      if (hostsMatcher != null) {
+        hostsMatchers.add(hostsMatcher);
+        out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
+      } else {
+        // This means there are no valid exports provided.
+        RpcAcceptedReply.getInstance(xid,
+          RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
+          out);
+      }
     } else {
       // Invalid procedure
       RpcAcceptedReply.getInstance(xid,
           RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
           out);
-    }  
+    }
     ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
     RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
     RpcUtil.sendRpcResponse(ctx, rsp);

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java Tue Aug 19 23:49:39 2014
@@ -30,10 +30,10 @@ import java.util.concurrent.TimeUnit;
 
 import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSInputStream;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -72,7 +72,7 @@ class DFSClientCache {
   final static int DEFAULT_DFS_INPUTSTREAM_CACHE_SIZE = 1024;
   final static int DEFAULT_DFS_INPUTSTREAM_CACHE_TTL = 10 * 60;
 
-  private final Configuration config;
+  private final NfsConfiguration config;
 
   private static class DFSInputStreamCaheKey {
     final String userId;
@@ -99,11 +99,11 @@ class DFSClientCache {
     }
   }
 
-  DFSClientCache(Configuration config) {
+  DFSClientCache(NfsConfiguration config) {
     this(config, DEFAULT_DFS_CLIENT_CACHE_SIZE);
   }
   
-  DFSClientCache(Configuration config, int clientCache) {
+  DFSClientCache(NfsConfiguration config, int clientCache) {
     this.config = config;
     this.clientCache = CacheBuilder.newBuilder()
         .maximumSize(clientCache)

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java Tue Aug 19 23:49:39 2014
@@ -18,8 +18,10 @@
 package org.apache.hadoop.hdfs.nfs.nfs3;
 
 import java.io.IOException;
+import java.net.DatagramSocket;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.nfs.mount.Mountd;
 import org.apache.hadoop.nfs.nfs3.Nfs3Base;
 import org.apache.hadoop.util.StringUtils;
@@ -34,14 +36,14 @@ import com.google.common.annotations.Vis
 public class Nfs3 extends Nfs3Base {
   private Mountd mountd;
   
-  static {
-    Configuration.addDefaultResource("hdfs-default.xml");
-    Configuration.addDefaultResource("hdfs-site.xml");
+  public Nfs3(NfsConfiguration conf) throws IOException {
+    this(conf, null, true);
   }
   
-  public Nfs3(Configuration conf) throws IOException {
-    super(new RpcProgramNfs3(conf), conf);
-    mountd = new Mountd(conf);
+  public Nfs3(NfsConfiguration conf, DatagramSocket registrationSocket,
+      boolean allowInsecurePorts) throws IOException {
+    super(new RpcProgramNfs3(conf, registrationSocket, allowInsecurePorts), conf);
+    mountd = new Mountd(conf, registrationSocket, allowInsecurePorts);
   }
 
   public Mountd getMountd() {
@@ -54,9 +56,19 @@ public class Nfs3 extends Nfs3Base {
     start(register);
   }
   
-  public static void main(String[] args) throws IOException {
-    StringUtils.startupShutdownMessage(Nfs3.class, args, LOG);    
-    final Nfs3 nfsServer = new Nfs3(new Configuration());
+  static void startService(String[] args,
+      DatagramSocket registrationSocket) throws IOException {
+    StringUtils.startupShutdownMessage(Nfs3.class, args, LOG);
+    NfsConfiguration conf = new NfsConfiguration();
+    boolean allowInsecurePorts = conf.getBoolean(
+        NfsConfigKeys.DFS_NFS_PORT_MONITORING_DISABLED_KEY,
+        NfsConfigKeys.DFS_NFS_PORT_MONITORING_DISABLED_DEFAULT);
+    final Nfs3 nfsServer = new Nfs3(conf, registrationSocket,
+        allowInsecurePorts);
     nfsServer.startServiceInternal(true);
   }
+  
+  public static void main(String[] args) throws IOException {
+    startService(args, null);
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java Tue Aug 19 23:49:39 2014
@@ -154,13 +154,15 @@ public class Nfs3Utils {
     if (isSet(mode, Nfs3Constant.ACCESS_MODE_EXECUTE)) {
       if (type == NfsFileType.NFSREG.toValue()) {
         rtn |= Nfs3Constant.ACCESS3_EXECUTE;
+      } else {
+        rtn |= Nfs3Constant.ACCESS3_LOOKUP;
       }
     }
     return rtn;
   }
 
   public static int getAccessRightsForUserGroup(int uid, int gid,
-      Nfs3FileAttributes attr) {
+      int[] auxGids, Nfs3FileAttributes attr) {
     int mode = attr.getMode();
     if (uid == attr.getUid()) {
       return getAccessRights(mode >> 6, attr.getType());
@@ -168,6 +170,14 @@ public class Nfs3Utils {
     if (gid == attr.getGid()) {
       return getAccessRights(mode >> 3, attr.getType());
     }
+    // Check for membership in auxiliary groups
+    if (auxGids != null) {
+      for (int auxGid : auxGids) {
+        if (attr.getGid() == auxGid) {
+          return getAccessRights(mode >> 3, attr.getType());
+        }
+      }
+    }
     return getAccessRights(mode, attr.getType());
   }
   
@@ -191,4 +201,4 @@ public class Nfs3Utils {
     data[7] = (byte) (v >>> 0);
     return data;
   }
-}
\ No newline at end of file
+}

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java Tue Aug 19 23:49:39 2014
@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
 import org.apache.hadoop.hdfs.nfs.nfs3.WriteCtx.DataState;
 import org.apache.hadoop.io.BytesWritable.Comparator;
 import org.apache.hadoop.io.IOUtils;
@@ -54,6 +55,7 @@ import org.apache.hadoop.nfs.nfs3.respon
 import org.apache.hadoop.oncrpc.XDR;
 import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
 import org.jboss.netty.channel.Channel;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -93,6 +95,7 @@ class OpenFileCtx {
    */
   private AtomicLong nextOffset;
   private final HdfsDataOutputStream fos;
+  private final boolean aixCompatMode;
   
   // It's updated after each sync to HDFS
   private Nfs3FileAttributes latestAttr;
@@ -136,7 +139,7 @@ class OpenFileCtx {
       this.channel = channel;
       this.xid = xid;
       this.preOpAttr = preOpAttr;
-      this.startTime = System.currentTimeMillis();
+      this.startTime = Time.monotonicNow();
     }
 
     @Override
@@ -158,11 +161,11 @@ class OpenFileCtx {
   private Daemon dumpThread;
   
   private void updateLastAccessTime() {
-    lastAccessTime = System.currentTimeMillis();
+    lastAccessTime = Time.monotonicNow();
   }
 
   private boolean checkStreamTimeout(long streamTimeout) {
-    return System.currentTimeMillis() - lastAccessTime > streamTimeout;
+    return Time.monotonicNow() - lastAccessTime > streamTimeout;
   }
   
   long getLastAccessTime() {
@@ -197,8 +200,15 @@ class OpenFileCtx {
   
   OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr,
       String dumpFilePath, DFSClient client, IdUserGroup iug) {
+    this(fos, latestAttr, dumpFilePath, client, iug, false);
+  }
+  
+  OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr,
+      String dumpFilePath, DFSClient client, IdUserGroup iug,
+      boolean aixCompatMode) {
     this.fos = fos;
     this.latestAttr = latestAttr;
+    this.aixCompatMode = aixCompatMode;
     // We use the ReverseComparatorOnMin as the comparator of the map. In this
     // way, we first dump the data with larger offset. In the meanwhile, we
     // retrieve the last element to write back to HDFS.
@@ -696,7 +706,7 @@ class OpenFileCtx {
           + " updating the mtime, then return success");
       Nfs3FileAttributes postOpAttr = null;
       try {
-        dfsClient.setTimes(path, System.currentTimeMillis(), -1);
+        dfsClient.setTimes(path, Time.monotonicNow(), -1);
         postOpAttr = Nfs3Utils.getFileAttr(dfsClient, path, iug);
       } catch (IOException e) {
         LOG.info("Got error when processing perfect overwrite, path=" + path
@@ -778,15 +788,29 @@ class OpenFileCtx {
     }
 
     if (commitOffset > 0) {
-      if (commitOffset > flushed) {
-        if (!fromRead) {
-          CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid,
-              preOpAttr);
-          pendingCommits.put(commitOffset, commitCtx);
+      if (aixCompatMode) {
+        // The AIX NFS client misinterprets RFC-1813 and will always send 4096
+        // for the commitOffset even if fewer bytes than that have ever (or will
+        // ever) be sent by the client. So, if in AIX compatibility mode, we
+        // will always DO_SYNC if the number of bytes to commit have already all
+        // been flushed, else we will fall through to the logic below which
+        // checks for pending writes in the case that we're being asked to
+        // commit more bytes than have so far been flushed. See HDFS-6549 for
+        // more info.
+        if (commitOffset <= flushed) {
+          return COMMIT_STATUS.COMMIT_DO_SYNC;
         }
-        return COMMIT_STATUS.COMMIT_WAIT;
       } else {
-        return COMMIT_STATUS.COMMIT_DO_SYNC;
+        if (commitOffset > flushed) {
+          if (!fromRead) {
+            CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid,
+                preOpAttr);
+            pendingCommits.put(commitOffset, commitCtx);
+          }
+          return COMMIT_STATUS.COMMIT_WAIT;
+        } else {
+          return COMMIT_STATUS.COMMIT_DO_SYNC;
+        } 
       }
     }
 
@@ -822,7 +846,7 @@ class OpenFileCtx {
    */
   public synchronized boolean streamCleanup(long fileId, long streamTimeout) {
     Preconditions
-        .checkState(streamTimeout >= Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT);
+        .checkState(streamTimeout >= NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
     if (!activeState) {
       return true;
     }
@@ -997,7 +1021,7 @@ class OpenFileCtx {
       
       if (LOG.isDebugEnabled()) {
         LOG.debug("FileId: " + latestAttr.getFileId() + " Service time:"
-            + (System.currentTimeMillis() - commit.getStartTime())
+            + (Time.monotonicNow() - commit.getStartTime())
             + "ms. Sent response for commit:" + commit);
       }
       entry = pendingCommits.firstEntry();

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java Tue Aug 19 23:49:39 2014
@@ -24,10 +24,11 @@ import java.util.concurrent.ConcurrentMa
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.nfs.nfs3.FileHandle;
-import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -47,9 +48,9 @@ class OpenFileCtxCache {
   private final long streamTimeout;
   private final StreamMonitor streamMonitor;
 
-  OpenFileCtxCache(Configuration config, long streamTimeout) {
-    maxStreams = config.getInt(Nfs3Constant.MAX_OPEN_FILES,
-        Nfs3Constant.MAX_OPEN_FILES_DEFAULT);
+  OpenFileCtxCache(NfsConfiguration config, long streamTimeout) {
+    maxStreams = config.getInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY,
+        NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_DEFAULT);
     LOG.info("Maximum open streams is " + maxStreams);
     this.streamTimeout = streamTimeout;
     streamMonitor = new StreamMonitor();
@@ -99,9 +100,9 @@ class OpenFileCtxCache {
       LOG.warn("No eviction candidate. All streams have pending work.");
       return null;
     } else {
-      long idleTime = System.currentTimeMillis()
+      long idleTime = Time.monotonicNow()
           - idlest.getValue().getLastAccessTime();
-      if (idleTime < Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT) {
+      if (idleTime < NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("idlest stream's idle time:" + idleTime);
         }
@@ -250,7 +251,7 @@ class OpenFileCtxCache {
 
         // Check if it can sleep
         try {
-          long workedTime = System.currentTimeMillis() - lastWakeupTime;
+          long workedTime = Time.monotonicNow() - lastWakeupTime;
           if (workedTime < rotation) {
             if (LOG.isTraceEnabled()) {
               LOG.trace("StreamMonitor can still have a sleep:"
@@ -258,7 +259,7 @@ class OpenFileCtxCache {
             }
             Thread.sleep(rotation - workedTime);
           }
-          lastWakeupTime = System.currentTimeMillis();
+          lastWakeupTime = Time.monotonicNow();
 
         } catch (InterruptedException e) {
           LOG.info("StreamMonitor got interrupted");
@@ -267,4 +268,4 @@ class OpenFileCtxCache {
       }
     }
   }
-}
\ No newline at end of file
+}



Mime
View raw message